pax_global_header00006660000000000000000000000064151557403010014513gustar00rootroot0000000000000052 comment=fe997ecba52de839aaac8e6429f31fa684c0830d zxc-0.9.1/000077500000000000000000000000001515574030100123265ustar00rootroot00000000000000zxc-0.9.1/.clang-format000066400000000000000000000003111515574030100146740ustar00rootroot00000000000000--- Language: Cpp BasedOnStyle: Google IndentWidth: 4 TabWidth: 4 UseTab: Never ColumnLimit: 100 AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false BreakBeforeBraces: Attach ---zxc-0.9.1/.clusterfuzzlite/000077500000000000000000000000001515574030100156625ustar00rootroot00000000000000zxc-0.9.1/.clusterfuzzlite/Dockerfile000066400000000000000000000005741515574030100176620ustar00rootroot00000000000000# Copyright (c) 2025-2026, Bertrand Lebonnois # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. FROM gcr.io/oss-fuzz-base/base-builder:v1 RUN apt-get update && apt-get install -y make cmake COPY . $SRC/zxc WORKDIR $SRC/zxc COPY .clusterfuzzlite/build.sh $SRC/build.shzxc-0.9.1/.clusterfuzzlite/build.sh000066400000000000000000000014611515574030100173170ustar00rootroot00000000000000#!/bin/bash -eu # Copyright (c) 2025-2026, Bertrand Lebonnois # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. AVAILABLE_FUZZERS="decompress roundtrip" LIB_SOURCES="src/lib/zxc_common.c src/lib/zxc_compress.c src/lib/zxc_decompress.c src/lib/zxc_driver.c src/lib/zxc_dispatch.c" for fuzzer in $AVAILABLE_FUZZERS; do if [ -z "${FUZZER_TARGET:-}" ] || [ "${FUZZER_TARGET}" == "$fuzzer" ]; then $CC $CFLAGS -I include \ -I src/lib/vendors \ -DZXC_FUNCTION_SUFFIX=_default -DZXC_ONLY_DEFAULT \ $LIB_SOURCES \ tests/fuzz_${fuzzer}.c \ -o $OUT/zxc_fuzzer_${fuzzer} \ $LIB_FUZZING_ENGINE \ -lm -pthread fi donezxc-0.9.1/.github/000077500000000000000000000000001515574030100136665ustar00rootroot00000000000000zxc-0.9.1/.github/CODEOWNERS000066400000000000000000000000751515574030100152630ustar00rootroot00000000000000# Default owners for everything in the repo * @hellobertrand zxc-0.9.1/.github/CODE_OF_CONDUCT.md000066400000000000000000000054601515574030100164720ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex, gender characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at **zxc.codec@gmail.com**. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.htmlzxc-0.9.1/.github/CONTRIBUTING.md000066400000000000000000000027741515574030100161310ustar00rootroot00000000000000# Contributing to ZXC Thank you for your interest in contributing to ZXC! This guide will help you get started. ## Developer Certificate of Origin (DCO) By contributing, you certify that: - You have the right to submit the contribution - You agree to license your contribution under the BSD-3-Clause license Add this to your commits: ```bash git commit -s -m "Your commit message" ``` ## License Headers To maintain legal clarity and recognize all contributors, every new source file (.c, .h, .rs, .py, etc.) must include the following header at the very top: ```C /* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ ``` ## Quick Start ### Build and Test ```bash git clone https://github.com/hellobertrand/zxc.git cd zxc mkdir build && cd build cmake .. -DCMAKE_BUILD_TYPE=Debug make -j ctest --output-on-failure ``` ### Format Code ```bash clang-format -i src/lib/*.c include/*.h ``` ## Requirements - **C17** compiler (GCC, Clang, or MSVC) - **CMake** 3.10+ - Follow `.clang-format` style (Google) - All code must be ASCII-only - Pass `ctest` and static analysis ## Submitting Changes 1. Fork and create a feature branch 2. Add tests for new functionality 3. Ensure CI passes (build, tests, benchmarks) 4. Sign your commits with `-s` 5. Open a PR to `main` ## Reporting Issues Include: - ZXC version (`zxc --version`) - OS and architecture - Minimal reproduction steps Thank you for making ZXC better! zxc-0.9.1/.github/FUNDING.yml000066400000000000000000000000261515574030100155010ustar00rootroot00000000000000github: hellobertrand zxc-0.9.1/.github/SECURITY.md000066400000000000000000000004111515574030100154530ustar00rootroot00000000000000# Security Policy ## Reporting a Vulnerability **Please do not report security vulnerabilities through public GitHub issues.** If you have discovered a security issue in ZXC, please email us privately at **zxc.codec@gmail.com**. We will respond within 48 hours.zxc-0.9.1/.github/codeql/000077500000000000000000000000001515574030100151355ustar00rootroot00000000000000zxc-0.9.1/.github/codeql/codeql-config.yml000066400000000000000000000001601515574030100203670ustar00rootroot00000000000000name: "CodeQL Config" paths-ignore: - 'src/lib/vendors/rapidhash.h' queries: - uses: security-and-quality zxc-0.9.1/.github/dependabot.yml000066400000000000000000000001611515574030100165140ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: github-actions directory: / schedule: interval: monthlyzxc-0.9.1/.github/workflows/000077500000000000000000000000001515574030100157235ustar00rootroot00000000000000zxc-0.9.1/.github/workflows/README.md000066400000000000000000000051601515574030100172040ustar00rootroot00000000000000# GitHub Actions Workflows This directory contains CI/CD workflows for the ZXC compression library. ## Workflows ### build.yml - Build & Release **Triggers:** Push to main, tags, pull requests, manual dispatch Builds and tests ZXC across multiple platforms (Linux x86_64/ARM64, macOS ARM64, Windows x64). Generates release artifacts and uploads binaries when tags are pushed. ### build_all.yml - Multi-Architecture Build **Triggers:** Push to main, pull requests, manual dispatch Comprehensive build matrix testing across multiple architectures including 32-bit and 64-bit variants for Linux (x64, x86, ARM64, ARM) and Windows (x64, x86). Validates compilation compatibility across different platforms. ### benchmark.yml - Performance Benchmark **Triggers:** Push to main (src changes), pull requests, manual dispatch Runs performance benchmarks using LZbench on Ubuntu and macOS. Integrates ZXC into the LZbench framework and tests compression/decompression performance against the Silesia corpus. ### coverage.yml - Code Coverage **Triggers:** Push to main, pull requests, manual dispatch Builds the project with coverage instrumentation (`-DZXC_ENABLE_COVERAGE=ON`), runs unit and CLI tests, and generates a coverage report using `lcov`. The report is then uploaded to Codecov for analysis. ### fuzzing.yml - Fuzz Testing **Triggers:** Pull requests, scheduled (every 3 days), manual dispatch Executes fuzz testing using ClusterFuzzLite with multiple sanitizers (address, undefined) on decompression and roundtrip fuzzers. Helps identify memory safety issues and edge cases. ### quality.yml - Code Quality **Triggers:** Push to main, pull requests, manual dispatch Performs static analysis using Cppcheck and Clang Static Analyzer. Runs memory leak detection with Valgrind to ensure code quality and identify potential bugs. ### wrapper-rust-publish.yml - Publish Rust Crates **Triggers:** Release published, manual dispatch Tests and publishes Rust crates to crates.io. Verifies the version matches the release tag, runs tests across platforms, and publishes `zxc-compress-sys` (FFI bindings) followed by `zxc-compress` (safe wrapper). ### wrapper-python-publish.yml - Publish Python Package **Triggers:** Release published, manual dispatch Builds platform-specific wheels using `cibuildwheel` for Linux (x86_64, ARM64), macOS (ARM64, Intel), and Windows (AMD64, ARM64). Tests wheels against Python 3.12-3.13, then publishes to PyPI via trusted publishing. ### security.yml - Code Security **Triggers:** Push to main, pull requests Runs CodeQL security analysis to detect potential security vulnerabilities and coding errors in the C/C++ codebase. zxc-0.9.1/.github/workflows/benchmark.yml000066400000000000000000000050761515574030100204100ustar00rootroot00000000000000name: Benchmark on: workflow_dispatch: push: branches: [ main ] paths: - 'src/**' - '.github/workflows/benchmark.yml' pull_request: branches: [ main ] permissions: contents: read concurrency: cancel-in-progress: true group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} jobs: lzbench-benchmark: name: Run Benchmark if: github.event.pull_request.draft == false runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, macos-latest-xlarge] env: LZBENCH_DIR: lzbench SILESIA_DIR: silesia steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Clone LZbench run: | git clone -b zxc-0.9.x https://github.com/hellobertrand/lzbench "${LZBENCH_DIR}" - name: Copy Lib ZXC run: | mkdir -p ${LZBENCH_DIR}/lz/zxc/include/ mkdir -p ${LZBENCH_DIR}/lz/zxc/src/lib/ cp -r include ${LZBENCH_DIR}/lz/zxc/ cp -r src/lib ${LZBENCH_DIR}/lz/zxc/src/ - name: Build Lzbench working-directory: ${{ env.LZBENCH_DIR }} run: | make -j MOREFLAGS="-march=native" - name: Cache Silesia Corpus id: cache-silesia uses: actions/cache@v5 with: path: ${{ env.SILESIA_DIR }} key: silesia-corpus-${{ runner.os }}-v1 - name: Download Silesia Corpus (${{ runner.os }}) if: steps.cache-silesia.outputs.cache-hit != 'true' run: | wget --no-check-certificate -q https://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip -O silesia.zip mkdir -p "${SILESIA_DIR}" unzip -q silesia.zip -d "${SILESIA_DIR}" - name: Run Lzbench on Silesia Corpus working-directory: ${{ env.LZBENCH_DIR }} run: | ./lzbench \ -e"memcpy/zxc/lz4/lz4fast,17/lz4hc,12/zstd,1/zstd_fast,-1/brotli,0/snappy" \ -j -t8,8 -o1 -r "../${SILESIA_DIR}"/ \ > ../benchmark.md echo "Benchmark finished. First lines:" head -n 40 ../benchmark.md - name: Add Benchmark to Job Summary if: always() run: | echo "### Résultats Benchmark (${{ matrix.os }})" >> $GITHUB_STEP_SUMMARY cat benchmark.md >> $GITHUB_STEP_SUMMARY - name: Upload Benchmark Result as Artifact uses: actions/upload-artifact@v7 with: name: benchmark-${{ matrix.os }}-${{ github.sha }} path: benchmark.md if-no-files-found: error retention-days: 10zxc-0.9.1/.github/workflows/build.yml000066400000000000000000000076471515574030100175630ustar00rootroot00000000000000name: Build & Release on: workflow_dispatch: push: branches: [ main ] tags: - 'v*' pull_request: branches: [ main ] permissions: contents: read concurrency: cancel-in-progress: true group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} jobs: build-and-test: name: Build ${{ matrix.name }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - name: Linux x86_64 os: ubuntu-latest artifact_name: zxc asset_name: zxc-linux-x86_64 cmake_flags: "-DZXC_NATIVE_ARCH=OFF" - name: Linux ARM64 os: ubuntu-24.04-arm artifact_name: zxc asset_name: zxc-linux-aarch64 cmake_flags: "-DZXC_NATIVE_ARCH=OFF" - name: macOS ARM64 os: macos-latest artifact_name: zxc asset_name: zxc-macos-arm64 cmake_flags: "-DZXC_NATIVE_ARCH=OFF" - name: Windows x64 os: windows-latest artifact_name: zxc.exe asset_name: zxc-windows-x64.exe cmake_flags: "-DZXC_NATIVE_ARCH=OFF" steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Configure CMake run: cmake -S . -B build -DCMAKE_BUILD_TYPE=Release ${{ matrix.cmake_flags }} - name: Build run: cmake --build build --config Release --parallel - name: Test (native) working-directory: build run: ctest -C Release --output-on-failure - name: Test CLI shell: bash run: | BUILD_DIR="build" # Find binary (handle both Ninja and Visual Studio generators) if [ -f "$BUILD_DIR/Release/${{ matrix.artifact_name }}" ]; then ZXC_BIN="$BUILD_DIR/Release/${{ matrix.artifact_name }}" elif [ -f "$BUILD_DIR/${{ matrix.artifact_name }}" ]; then ZXC_BIN="$BUILD_DIR/${{ matrix.artifact_name }}" else echo "Binary not found for CLI test!" find "$BUILD_DIR" -name "${{ matrix.artifact_name }}" exit 1 fi echo "Testing with binary: $ZXC_BIN" chmod +x tests/test_cli.sh ./tests/test_cli.sh "$ZXC_BIN" - name: Package Release (Archive) if: startsWith(github.ref, 'refs/tags/') shell: bash run: | # Install to staging directory using CMake (Standard way for all OS) cmake --install build --prefix release_staging --config Release # Strip binary on Unix-like systems only (installed to bin/) if [[ "${{ matrix.os }}" != "windows-latest" ]]; then strip release_staging/bin/${{ matrix.artifact_name }} fi # Copy documentation to root cp LICENSE release_staging/LICENSE.txt cp README.md release_staging/README.md # Create archive cd release_staging if [[ "${{ matrix.os }}" == "windows-latest" ]]; then 7z a ../${{ matrix.asset_name }}.zip * else tar -czvf ../${{ matrix.asset_name }}.tar.gz * fi - name: Upload Artifacts if: startsWith(github.ref, 'refs/tags/') uses: actions/upload-artifact@v7 with: name: ${{ matrix.asset_name }} path: | ${{ matrix.asset_name }}.tar.gz ${{ matrix.asset_name }}.zip release: name: Create GitHub Release needs: build-and-test runs-on: ubuntu-latest if: startsWith(github.ref, 'refs/tags/') permissions: contents: write steps: - name: Download all artifacts uses: actions/download-artifact@v8 - name: Create Release uses: softprops/action-gh-release@v2 with: files: | **/*.tar.gz **/*.zip env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}zxc-0.9.1/.github/workflows/coverage.yml000066400000000000000000000036631515574030100202510ustar00rootroot00000000000000name: Code Coverage on: workflow_dispatch: push: branches: [ main ] pull_request: branches: [ main ] jobs: coverage: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - name: Install dependencies run: | sudo apt-get update sudo apt-get install -y lcov - name: Configure CMake run: cmake -S . -B build -DCMAKE_BUILD_TYPE=Debug -DZXC_ENABLE_COVERAGE=ON -DZXC_NATIVE_ARCH=OFF - name: Build run: cmake --build build --parallel - name: Run Unit Tests working-directory: build run: ctest --output-on-failure - name: Run CLI Tests run: | chmod +x tests/test_cli.sh ./tests/test_cli.sh build/zxc - name: Generate Coverage Report run: | # Capture coverage data lcov --capture --directory . --output-file coverage.info --ignore-errors gcov,negative # Filter out system headers, external libraries, CLI frontend, and test files lcov --remove coverage.info '/usr/*' '*/tests/*' '*/cli/*' '*/src/lib/vendors/rapidhash.h' --output-file coverage.info --ignore-errors unused # Output file list lcov --list coverage.info - name: Generate HTML Report run: genhtml coverage.info --output-directory coverage-report - name: Publish Coverage Summary run: | echo "## Code Coverage Summary" >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY lcov --summary coverage.info 2>&1 | tail -n +2 >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - name: Upload Coverage Artifact uses: actions/upload-artifact@v7 with: name: coverage-report path: coverage-report retention-days: 10 - name: Upload coverage to Codecov uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} files: ./coverage.info fail_ci_if_error: true zxc-0.9.1/.github/workflows/fuzzing.yml000066400000000000000000000071771515574030100201560ustar00rootroot00000000000000name: Fuzzing on: workflow_dispatch: inputs: mode: description: 'Fuzzing mode' required: true default: 'batch' type: choice options: - code-change - batch fuzz_seconds: description: 'Duration (seconds)' required: true default: '3600' pull_request: branches: [ main ] schedule: - cron: '0 1 * * 1' permissions: contents: read security-events: write jobs: fuzzing: name: Run Fuzzing (${{ matrix.fuzzer }} - ${{ matrix.sanitizer }}) runs-on: ubuntu-latest concurrency: group: ${{ github.workflow }}-${{ matrix.fuzzer }}-${{ matrix.sanitizer }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true strategy: fail-fast: false matrix: sanitizer: [address, undefined] fuzzer: [decompress, roundtrip] steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Configure Fuzzer Target run: | sed -i '2i export FUZZER_TARGET="${{ matrix.fuzzer }}"' .clusterfuzzlite/build.sh # TODO: Remove this step once ClusterFuzzLite updates to support Docker 29+ - name: Downgrade Docker (Temporary Workaround) run: | # ClusterFuzzLite v1 uses Docker API 1.41 which is incompatible with Docker 29.0+ # Downgrade to Docker 28 until the action is updated sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null sudo apt-get update # Install Docker 28.0.4 specifically sudo apt-get install -y --allow-downgrades docker-ce=5:28.0.4-1~ubuntu.$(lsb_release -rs)~$(lsb_release -cs) docker-ce-cli=5:28.0.4-1~ubuntu.$(lsb_release -rs)~$(lsb_release -cs) containerd.io sudo systemctl restart docker docker version - name: Build Fuzzers (${{ matrix.fuzzer }} - ${{ matrix.sanitizer }}) id: build uses: google/clusterfuzzlite/actions/build_fuzzers@v1 with: language: c github-token: ${{ secrets.GITHUB_TOKEN }} sanitizer: ${{ matrix.sanitizer }} - name: Run Fuzzers (${{ matrix.fuzzer }} - ${{ matrix.sanitizer }}) id: run uses: google/clusterfuzzlite/actions/run_fuzzers@v1 with: github-token: ${{ secrets.GITHUB_TOKEN }} mode: ${{ github.event_name == 'pull_request' && 'code-change' || inputs.mode || 'batch' }} fuzz-seconds: ${{ github.event_name == 'pull_request' && 120 || inputs.fuzz_seconds || 3600 }} sanitizer: ${{ matrix.sanitizer }} output-sarif: true storage-repo: https://${{ secrets.CFLITE_CORPUS_TOKEN }}@github.com/hellobertrand/zxc-fuzz-corpus.git storage-repo-branch: main - name: Upload SARIF to GitHub Security if: success() || failure() uses: github/codeql-action/upload-sarif@v4 with: sarif_file: . category: clusterfuzzlite-${{ matrix.fuzzer }}-${{ matrix.sanitizer }} tsan: name: Thread Sanitizer runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - name: Build with TSan run: | cmake -B build -DCMAKE_C_FLAGS="-fsanitize=thread -g -fno-omit-frame-pointer" -DCMAKE_BUILD_TYPE=Debug cmake --build build - name: Run Tests run: ctest --test-dir build --output-on-failurezxc-0.9.1/.github/workflows/multiarch.yml000066400000000000000000000301031515574030100204330ustar00rootroot00000000000000name: Multi-Architecture Build on: workflow_dispatch: push: branches: [ main ] pull_request: branches: [ main ] permissions: contents: read concurrency: cancel-in-progress: true group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} jobs: # =========================================================================== # Tier 1: Main architectures # =========================================================================== build-tier1: name: "Tier 1: ${{ matrix.name }}" runs-on: ${{ matrix.runner }} strategy: fail-fast: false matrix: include: - name: Linux x64 runner: ubuntu-latest os: linux arch_flags: "" - name: Linux x86 (32-bit) runner: ubuntu-latest os: linux install_32bit_x86: true arch_flags: "-m32" - name: Linux ARM64 runner: ubuntu-24.04-arm os: linux arch_flags: "" - name: Linux ARM (32-bit) runner: ubuntu-latest os: linux install_32bit_arm: true cross_compiler_c: arm-linux-gnueabihf-gcc cross_compiler_cxx: arm-linux-gnueabihf-g++ cross_strip: arm-linux-gnueabihf-strip cmake_system_name: "Linux" cmake_system_processor: "arm" - name: Windows x64 runner: windows-latest os: windows cmake_arch: "x64" - name: Windows x86 (32-bit) runner: windows-latest os: windows cmake_arch: "Win32" - name: Windows ARM64 runner: windows-11-arm os: windows cmake_arch: "ARM64" # Note: Windows ARM 32-bit removed - deprecated and unsupported by Windows SDK 10.0.26100+ - name: macOS Intel x64 runner: macos-15-intel os: macos - name: macOS ARM64 runner: macos-latest os: macos steps: - name: Checkout Repository uses: actions/checkout@v6 # Case: Linux x86 (32-bit) - name: Install x86 32-bit libs if: matrix.install_32bit_x86 run: | sudo apt-get update sudo apt-get install -y gcc-multilib g++-multilib # Case: Linux ARM (32-bit) - name: Install ARM 32-bit toolchain if: matrix.install_32bit_arm run: | sudo apt-get update sudo apt-get install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf - name: Configure, Build & Test (Static & Shared) shell: bash run: | # Initialize variables EXTRA_ARGS="-DZXC_NATIVE_ARCH=OFF" # Disable native arch detection (Runtime Dispatch enabled) # Setup Environment for Linux/Mac (Flags & Cross-compilation) if [ "${{ matrix.os }}" != "windows" ]; then # Handle C flags (e.g., -m32) if [ ! -z "${{ matrix.arch_flags }}" ]; then export CFLAGS="${{ matrix.arch_flags }}" export CXXFLAGS="${{ matrix.arch_flags }}" fi # Handle cross-compiler (e.g., ARM 32-bit on Linux) if [ ! -z "${{ matrix.cross_compiler_c }}" ]; then EXTRA_ARGS="$EXTRA_ARGS -DCMAKE_C_COMPILER=${{ matrix.cross_compiler_c }} -DCMAKE_CXX_COMPILER=${{ matrix.cross_compiler_cxx }}" fi # Handle explicit System Name/Processor (crucial for cross-compilation) if [ ! -z "${{ matrix.cmake_system_name }}" ]; then EXTRA_ARGS="$EXTRA_ARGS -DCMAKE_SYSTEM_NAME=${{ matrix.cmake_system_name }}" fi if [ ! -z "${{ matrix.cmake_system_processor }}" ]; then EXTRA_ARGS="$EXTRA_ARGS -DCMAKE_SYSTEM_PROCESSOR=${{ matrix.cmake_system_processor }}" fi # Handle cross-strip tool (e.g., ARM 32-bit on Linux) if [ ! -z "${{ matrix.cross_strip }}" ]; then EXTRA_ARGS="$EXTRA_ARGS -DCMAKE_STRIP=/usr/bin/${{ matrix.cross_strip }}" fi fi # Iterate over Library Types for LIB_TYPE in "STATIC" "SHARED"; do echo ">>> Starting Build: $LIB_TYPE" BUILD_DIR="build_${LIB_TYPE}" SHARED_FLAG="OFF" if [ "$LIB_TYPE" == "SHARED" ]; then SHARED_FLAG="ON"; fi CURRENT_ARGS="$EXTRA_ARGS -DBUILD_SHARED_LIBS=$SHARED_FLAG" # 1. Configure if [ "${{ matrix.os }}" = "windows" ]; then # Visual Studio Generator cmake -S . -B $BUILD_DIR -A ${{ matrix.cmake_arch }} $CURRENT_ARGS else # Ninja/Makefiles cmake -S . -B $BUILD_DIR -DCMAKE_BUILD_TYPE=Release $CURRENT_ARGS fi # 2. Build cmake --build $BUILD_DIR --config Release --parallel # 3. Test (Skip if cross-compiling) if [ -z "${{ matrix.cross_compiler_c }}" ]; then echo "Running Tests for $LIB_TYPE..." ctest --test-dir $BUILD_DIR -C Release --output-on-failure else echo "Skipping tests for $LIB_TYPE (Cross-compilation)" fi echo ">>> Completed Build: $LIB_TYPE" echo "" done # =========================================================================== # Tier 2: Extended Linux architectures (cross-compiled, tested via QEMU) # =========================================================================== build-tier2: name: "Tier 2: ${{ matrix.name }}" runs-on: ubuntu-latest container: debian:trixie strategy: fail-fast: false matrix: include: - name: Linux amd64 cross_pkg: gcc cross_prefix: x86_64-linux-gnu cmake_processor: x86_64 qemu_binary: qemu-x86_64 - name: Linux arm64 cross_pkg: gcc-aarch64-linux-gnu cross_prefix: aarch64-linux-gnu cmake_processor: aarch64 qemu_binary: qemu-aarch64 - name: Linux i386 cross_pkg: gcc-i686-linux-gnu cross_prefix: i686-linux-gnu cmake_processor: i686 qemu_binary: qemu-i386 - name: Linux RISC-V 64 cross_pkg: gcc-riscv64-linux-gnu cross_prefix: riscv64-linux-gnu cmake_processor: riscv64 qemu_binary: qemu-riscv64 - name: Linux s390x cross_pkg: gcc-s390x-linux-gnu cross_prefix: s390x-linux-gnu cmake_processor: s390x qemu_binary: qemu-s390x - name: Linux armhf cross_pkg: gcc-arm-linux-gnueabihf cross_prefix: arm-linux-gnueabihf cmake_processor: arm qemu_binary: qemu-arm - name: Linux ppc64el cross_pkg: gcc-powerpc64le-linux-gnu cross_prefix: powerpc64le-linux-gnu cmake_processor: ppc64le qemu_binary: qemu-ppc64le - name: Linux mipsel cross_pkg: gcc-mipsel-linux-gnu cross_prefix: mipsel-linux-gnu cmake_processor: mipsel qemu_binary: qemu-mipsel - name: Linux powerpc cross_pkg: gcc-powerpc-linux-gnu cross_prefix: powerpc-linux-gnu cmake_processor: powerpc qemu_binary: qemu-ppc - name: Linux ppc64 cross_pkg: gcc-powerpc64-linux-gnu cross_prefix: powerpc64-linux-gnu cmake_processor: ppc64 qemu_binary: qemu-ppc64 - name: Linux sparc64 cross_pkg: gcc-sparc64-linux-gnu cross_prefix: sparc64-linux-gnu cmake_processor: sparc64 qemu_binary: qemu-sparc64 - name: Linux armel cross_pkg: gcc-arm-linux-gnueabi cross_prefix: arm-linux-gnueabi cmake_processor: arm qemu_binary: qemu-arm steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install cross-compiler run: | apt-get update apt-get install -y cmake build-essential ninja-build ${{ matrix.cross_pkg }} qemu-user - name: Configure, Build & Test (Static & Shared) shell: bash run: | CROSS_C=${{ matrix.cross_prefix }}-gcc CROSS_CXX=${{ matrix.cross_prefix }}-g++ CROSS_STRIP=/usr/bin/${{ matrix.cross_prefix }}-strip SYSROOT=/usr/${{ matrix.cross_prefix }} for LIB_TYPE in "STATIC" "SHARED"; do echo ">>> Starting Build: $LIB_TYPE" BUILD_DIR="build_${LIB_TYPE}" SHARED_FLAG="OFF" if [ "$LIB_TYPE" == "SHARED" ]; then SHARED_FLAG="ON"; fi cmake -S . -B $BUILD_DIR \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=$CROSS_C \ -DCMAKE_CXX_COMPILER=$CROSS_CXX \ -DCMAKE_STRIP=$CROSS_STRIP \ -DCMAKE_SYSTEM_NAME=Linux \ -DCMAKE_SYSTEM_PROCESSOR=${{ matrix.cmake_processor }} \ -DZXC_NATIVE_ARCH=OFF \ -DBUILD_SHARED_LIBS=$SHARED_FLAG cmake --build $BUILD_DIR --config Release --parallel echo "Running Tests for $LIB_TYPE via QEMU..." ${{ matrix.qemu_binary }} -L $SYSROOT ./$BUILD_DIR/zxc_test echo ">>> Completed Build: $LIB_TYPE" echo "" done # =========================================================================== # Tier 3: Experimental Linux architectures (cross-compiled, tested via QEMU) # =========================================================================== build-tier3: name: "Tier 3: ${{ matrix.name }}" runs-on: ubuntu-latest container: debian:trixie strategy: fail-fast: false matrix: include: - name: Linux mips64el cross_pkg: gcc-mips64el-linux-gnuabi64 cross_prefix: mips64el-linux-gnuabi64 cmake_processor: mips64 qemu_binary: qemu-mips64el - name: Linux loong64 cross_pkg: gcc-loongarch64-linux-gnu cross_prefix: loongarch64-linux-gnu cmake_processor: loongarch64 qemu_binary: qemu-loongarch64 - name: Linux alpha cross_pkg: gcc-alpha-linux-gnu cross_prefix: alpha-linux-gnu cmake_processor: alpha qemu_binary: qemu-alpha steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install cross-compiler run: | apt-get update apt-get install -y cmake build-essential ninja-build ${{ matrix.cross_pkg }} qemu-user - name: Configure, Build & Test (Static & Shared) shell: bash run: | CROSS_C=${{ matrix.cross_prefix }}-gcc CROSS_CXX=${{ matrix.cross_prefix }}-g++ CROSS_STRIP=/usr/bin/${{ matrix.cross_prefix }}-strip SYSROOT=/usr/${{ matrix.cross_prefix }} for LIB_TYPE in "STATIC" "SHARED"; do echo ">>> Starting Build: $LIB_TYPE" BUILD_DIR="build_${LIB_TYPE}" SHARED_FLAG="OFF" if [ "$LIB_TYPE" == "SHARED" ]; then SHARED_FLAG="ON"; fi cmake -S . -B $BUILD_DIR \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=$CROSS_C \ -DCMAKE_CXX_COMPILER=$CROSS_CXX \ -DCMAKE_STRIP=$CROSS_STRIP \ -DCMAKE_SYSTEM_NAME=Linux \ -DCMAKE_SYSTEM_PROCESSOR=${{ matrix.cmake_processor }} \ -DZXC_NATIVE_ARCH=OFF \ -DBUILD_SHARED_LIBS=$SHARED_FLAG cmake --build $BUILD_DIR --config Release --parallel echo "Running Tests for $LIB_TYPE via QEMU..." ${{ matrix.qemu_binary }} -L $SYSROOT ./$BUILD_DIR/zxc_test echo ">>> Completed Build: $LIB_TYPE" echo "" donezxc-0.9.1/.github/workflows/multicomp.yml000066400000000000000000000250751515574030100204700ustar00rootroot00000000000000name: Compiler Compatibility on: workflow_dispatch: push: branches: [ main ] pull_request: branches: [ main ] permissions: contents: read concurrency: cancel-in-progress: true group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} jobs: # =========================================================================== # Clang 12–20 on Ubuntu # =========================================================================== clang: name: "Clang ${{ matrix.version }} (Ubuntu 22.04)" runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: version: [12, 13, 14, 15, 16, 17, 18, 19, 20] steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install Clang ${{ matrix.version }} run: | if [ ${{ matrix.version }} -ge 15 ]; then wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-${{ matrix.version }} main" | sudo tee /etc/apt/sources.list.d/llvm.list fi sudo apt-get update sudo apt-get install -y clang-${{ matrix.version }} - name: Configure, Build & Test (Static & Shared) shell: bash run: | for LIB_TYPE in "STATIC" "SHARED"; do echo ">>> Starting Build: Clang ${{ matrix.version }} – $LIB_TYPE" BUILD_DIR="build_${LIB_TYPE}" SHARED_FLAG="OFF" if [ "$LIB_TYPE" == "SHARED" ]; then SHARED_FLAG="ON"; fi cmake -S . -B $BUILD_DIR \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=clang-${{ matrix.version }} \ -DCMAKE_C_STANDARD=17 \ -DZXC_NATIVE_ARCH=OFF \ -DZXC_ENABLE_LTO=OFF \ -DBUILD_SHARED_LIBS=$SHARED_FLAG cmake --build $BUILD_DIR --config Release --parallel echo "Running Tests for $LIB_TYPE..." ctest --test-dir $BUILD_DIR -C Release --output-on-failure echo ">>> Completed Build: $LIB_TYPE" echo "" done # =========================================================================== # Apple Clang on macOS 14 # =========================================================================== clang-macos: name: "Apple Clang (macOS 14)" runs-on: macos-14 strategy: fail-fast: false steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Configure, Build & Test (Static & Shared) shell: bash run: | for LIB_TYPE in "STATIC" "SHARED"; do echo ">>> Starting Build: Apple Clang – $LIB_TYPE" BUILD_DIR="build_${LIB_TYPE}" SHARED_FLAG="OFF" if [ "$LIB_TYPE" == "SHARED" ]; then SHARED_FLAG="ON"; fi cmake -S . -B $BUILD_DIR \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_STANDARD=17 \ -DZXC_NATIVE_ARCH=OFF \ -DBUILD_SHARED_LIBS=$SHARED_FLAG cmake --build $BUILD_DIR --config Release --parallel echo "Running Tests for $LIB_TYPE..." ctest --test-dir $BUILD_DIR -C Release --output-on-failure echo ">>> Completed Build: $LIB_TYPE" echo "" done # =========================================================================== # GCC 9–14 on Ubuntu # =========================================================================== gcc-jammy: name: "GCC ${{ matrix.version }} (Ubuntu 22.04)" runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: version: [9, 10, 11, 12, 13] steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install GCC ${{ matrix.version }} run: | sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test sudo apt-get update sudo apt-get install -y gcc-${{ matrix.version }} g++-${{ matrix.version }} - name: Configure, Build & Test (Static & Shared) shell: bash run: | for LIB_TYPE in "STATIC" "SHARED"; do echo ">>> Starting Build: GCC ${{ matrix.version }} – $LIB_TYPE" BUILD_DIR="build_${LIB_TYPE}" SHARED_FLAG="OFF" if [ "$LIB_TYPE" == "SHARED" ]; then SHARED_FLAG="ON"; fi cmake -S . -B $BUILD_DIR \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=gcc-${{ matrix.version }} \ -DCMAKE_CXX_COMPILER=g++-${{ matrix.version }} \ -DCMAKE_C_STANDARD=17 \ -DZXC_NATIVE_ARCH=OFF \ -DBUILD_SHARED_LIBS=$SHARED_FLAG cmake --build $BUILD_DIR --config Release --parallel echo "Running Tests for $LIB_TYPE..." ctest --test-dir $BUILD_DIR -C Release --output-on-failure echo ">>> Completed Build: $LIB_TYPE" echo "" done # =========================================================================== # GCC 14 on Ubuntu 24.04 # =========================================================================== gcc-latest: name: "GCC 14 (Ubuntu 24.04)" runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install GCC 14 run: | sudo apt-get update sudo apt-get install -y gcc-14 g++-14 - name: Configure, Build & Test (Static & Shared) shell: bash run: | for LIB_TYPE in "STATIC" "SHARED"; do echo ">>> Starting Build: GCC 14 – $LIB_TYPE" BUILD_DIR="build_${LIB_TYPE}" SHARED_FLAG="OFF" if [ "$LIB_TYPE" == "SHARED" ]; then SHARED_FLAG="ON"; fi cmake -S . -B $BUILD_DIR \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=gcc-14 \ -DCMAKE_CXX_COMPILER=g++-14 \ -DCMAKE_C_STANDARD=17 \ -DZXC_NATIVE_ARCH=OFF \ -DBUILD_SHARED_LIBS=$SHARED_FLAG cmake --build $BUILD_DIR --config Release --parallel echo "Running Tests for $LIB_TYPE..." ctest --test-dir $BUILD_DIR -C Release --output-on-failure echo ">>> Completed Build: $LIB_TYPE" echo "" done # =========================================================================== # MinGW on Windows (32-bit & 64-bit) # =========================================================================== mingw: name: "MinGW ${{ matrix.name }}" runs-on: windows-latest strategy: fail-fast: false matrix: include: - name: "x86 32-bit" msystem: MINGW32 pkg_prefix: mingw-w64-i686 - name: "x86_64 64-bit" msystem: MINGW64 pkg_prefix: mingw-w64-x86_64 defaults: run: shell: msys2 {0} steps: - name: Setup MSYS2 uses: msys2/setup-msys2@v2 with: msystem: ${{ matrix.msystem }} update: true install: >- ${{ matrix.pkg_prefix }}-gcc ${{ matrix.pkg_prefix }}-cmake make - name: Checkout Repository uses: actions/checkout@v6 - name: Configure, Build & Test (Static & Shared) run: | for LIB_TYPE in "STATIC" "SHARED"; do echo ">>> Starting Build: MinGW ${{ matrix.name }} – $LIB_TYPE" BUILD_DIR="build_${LIB_TYPE}" SHARED_FLAG="OFF" if [ "$LIB_TYPE" == "SHARED" ]; then SHARED_FLAG="ON"; fi cmake -S . -B $BUILD_DIR \ -G "MSYS Makefiles" \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_STANDARD=17 \ -DZXC_NATIVE_ARCH=OFF \ -DBUILD_SHARED_LIBS=$SHARED_FLAG cmake --build $BUILD_DIR --config Release --parallel echo "Running Tests for $LIB_TYPE..." ctest --test-dir $BUILD_DIR -C Release --output-on-failure echo ">>> Completed Build: $LIB_TYPE" echo "" done # =========================================================================== # GCC ARM cross-compilation on Ubuntu (32-bit & 64-bit) # =========================================================================== gcc-arm: name: "GCC ARM ${{ matrix.name }}" runs-on: ubuntu-latest strategy: fail-fast: false matrix: include: - name: "32-bit (armhf)" cross_pkg: gcc-arm-linux-gnueabihf cross_prefix: arm-linux-gnueabihf cmake_processor: arm qemu_binary: qemu-arm - name: "64-bit (aarch64)" cross_pkg: gcc-aarch64-linux-gnu cross_prefix: aarch64-linux-gnu cmake_processor: aarch64 qemu_binary: qemu-aarch64 steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install cross-compiler & QEMU run: | sudo apt-get update sudo apt-get install -y cmake build-essential ${{ matrix.cross_pkg }} qemu-user - name: Configure, Build & Test (Static & Shared) shell: bash run: | CROSS_C=${{ matrix.cross_prefix }}-gcc CROSS_CXX=${{ matrix.cross_prefix }}-g++ CROSS_STRIP=/usr/bin/${{ matrix.cross_prefix }}-strip SYSROOT=/usr/${{ matrix.cross_prefix }} for LIB_TYPE in "STATIC" "SHARED"; do echo ">>> Starting Build: GCC ARM ${{ matrix.name }} – $LIB_TYPE" BUILD_DIR="build_${LIB_TYPE}" SHARED_FLAG="OFF" if [ "$LIB_TYPE" == "SHARED" ]; then SHARED_FLAG="ON"; fi cmake -S . -B $BUILD_DIR \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=$CROSS_C \ -DCMAKE_CXX_COMPILER=$CROSS_CXX \ -DCMAKE_STRIP=$CROSS_STRIP \ -DCMAKE_SYSTEM_NAME=Linux \ -DCMAKE_SYSTEM_PROCESSOR=${{ matrix.cmake_processor }} \ -DCMAKE_C_STANDARD=17 \ -DZXC_NATIVE_ARCH=OFF \ -DBUILD_SHARED_LIBS=$SHARED_FLAG cmake --build $BUILD_DIR --config Release --parallel echo "Running Tests for $LIB_TYPE via QEMU..." ${{ matrix.qemu_binary }} -L $SYSROOT ./$BUILD_DIR/zxc_test echo ">>> Completed Build: $LIB_TYPE" echo "" done zxc-0.9.1/.github/workflows/quality.yml000066400000000000000000000061641515574030100201450ustar00rootroot00000000000000name: Code Quality on: workflow_dispatch: push: branches: [ main ] pull_request: branches: [ main ] permissions: contents: read concurrency: cancel-in-progress: true group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} jobs: cppcheck: name: Code Quality - Cppcheck runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install Cppcheck and Cross-Compiler run: | sudo apt-get update sudo apt-get install -y cppcheck gcc-aarch64-linux-gnu libc6-dev-arm64-cross - name: Configure CMake (x86 Compile Database) run: mkdir build_x86 && cd build_x86 && cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON .. - name: Configure CMake (ARM64 Compile Database) run: | mkdir build_arm && cd build_arm cmake -DCMAKE_SYSTEM_NAME=Linux \ -DCMAKE_SYSTEM_PROCESSOR=aarch64 \ -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON .. - name: Run Cppcheck (x86 Variants) run: | cppcheck --project=build_x86/compile_commands.json \ --enable=all \ --suppress=missingIncludeSystem --suppress=*:src/lib/vendors/rapidhash.h \ --check-level=exhaustive \ --inline-suppr \ --error-exitcode=1 \ -j4 - name: Run Cppcheck (ARM64 Variants) run: | cppcheck --project=build_arm/compile_commands.json \ --enable=all \ --suppress=missingIncludeSystem --suppress=*:src/lib/vendors/rapidhash.h \ --check-level=exhaustive \ --inline-suppr \ --error-exitcode=1 \ -j4 quality-checks: name: Code Quality - Advanced Checks runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install Analysis Tools run: | sudo apt-get update sudo apt-get install -y clang-tools valgrind cmake - name: Clang Static Analyzer (Scan-Build) run: | mkdir -p build_clang scan-build cmake -S . -B build_clang -DCMAKE_BUILD_TYPE=Debug scan-build --status-bugs cmake --build build_clang - name: Build for Valgrind (Debug) run: | cmake -S . -B build_valgrind -DCMAKE_BUILD_TYPE=Debug -DZXC_NATIVE_ARCH=OFF cmake --build build_valgrind - name: Valgrind Memory Check working-directory: build_valgrind run: | valgrind --leak-check=full \ --show-leak-kinds=all \ --track-origins=yes \ --error-exitcode=1 \ ./zxc_test - name: Unicode Linting shell: bash run: | echo "Scanning for non-ASCII characters in source files..." if grep -rP --include="*.c" --include="*.h" "[^\x00-\x7F]" .; then echo "::error::Non-ASCII characters found in source files." exit 1 else echo "No non-ASCII characters found." fizxc-0.9.1/.github/workflows/security.yml000066400000000000000000000025721515574030100203230ustar00rootroot00000000000000name: Code Security on: workflow_dispatch: push: branches: [ main ] pull_request: branches: [ main ] permissions: contents: read security-events: write jobs: analyze: name: Analyze (${{ matrix.language }}) runs-on: ubuntu-latest timeout-minutes: 10 strategy: fail-fast: false matrix: language: [ 'c-cpp', 'rust', 'python' ] steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Initialize CodeQL uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} config-file: ./.github/codeql/codeql-config.yml - name: Install Dependencies run: | sudo apt-get update sudo apt-get install -y cmake build-essential - name: Build Project run: | if [ "${{ matrix.language }}" = "c-cpp" ]; then mkdir -p build cd build cmake -DCMAKE_BUILD_TYPE=Release .. make -j$(nproc) elif [ "${{ matrix.language }}" = "rust" ]; then cd wrappers/rust cargo build --workspace --all-targets elif [ "${{ matrix.language }}" = "python" ]; then cd wrappers/python python3 -m pip install -e . fi - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v4 with: category: "/language:${{ matrix.language }}" zxc-0.9.1/.github/workflows/vendors.yml000066400000000000000000000017611515574030100201330ustar00rootroot00000000000000name: Update Vendors on: workflow_dispatch: schedule: - cron: '0 4 * * 1' jobs: update-rapidhash-header: runs-on: ubuntu-latest permissions: contents: write pull-requests: write steps: - name: Checkout repo uses: actions/checkout@v6 - name: Download latest rapidhash.h run: | curl -o src/lib/vendors/rapidhash.h https://raw.githubusercontent.com/Nicoshev/rapidhash/refs/heads/master/rapidhash.h - name: Create Pull Request uses: peter-evans/create-pull-request@v8 with: token: ${{ secrets.GITHUB_TOKEN }} commit-message: "chore(deps): update rapidhash.h from upstream" title: "Automatic update of rapidhash.h" body: | This PR updates `rapidhash.h` with the latest version from the official repository. Source : [Nicoshev/rapidhash](https://github.com/Nicoshev/rapidhash) branch: auto-update/rapidhash delete-branch: truezxc-0.9.1/.github/workflows/wrapper-nodejs-publish.yml000066400000000000000000000071431515574030100230570ustar00rootroot00000000000000name: Publish Node.js Package on: workflow_dispatch: release: types: [ published ] permissions: contents: read jobs: build: name: Build on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - os: ubuntu-latest arch: x86_64 - os: ubuntu-24.04-arm arch: aarch64 - os: macos-latest arch: arm64 - os: macos-15-intel arch: x86_64 - os: windows-latest arch: AMD64 - os: windows-11-arm arch: ARM64 defaults: run: working-directory: ./wrappers/nodejs steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Set up Node.js uses: actions/setup-node@v6 with: node-version: "22" - name: Install dependencies and build run: npm install - name: Run tests run: npx vitest run --globals - name: Package prebuilt addon run: | mkdir -p prebuilds/${{ matrix.os }}-${{ matrix.arch }} cp build/Release/zxc_nodejs.node prebuilds/${{ matrix.os }}-${{ matrix.arch }}/ - name: Upload prebuilt addon uses: actions/upload-artifact@v7 with: name: prebuilt-${{ matrix.os }}-${{ matrix.arch }} path: wrappers/nodejs/prebuilds/ test: name: Test on ${{ matrix.os }} Node ${{ matrix.node }} needs: [build] runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, ubuntu-24.04-arm, macos-latest, macos-15-intel, windows-latest, windows-11-arm] node: ["18", "20", "22"] exclude: # Node.js 18 and 20 don't have official Windows ARM64 builds - os: windows-11-arm node: "18" - os: windows-11-arm node: "20" defaults: run: working-directory: ./wrappers/nodejs steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Set up Node.js uses: actions/setup-node@v6 with: node-version: ${{ matrix.node }} - name: Install dependencies and build run: npm install - name: Run tests run: npx vitest run --globals publish: name: Publish to npm needs: [build, test] runs-on: ubuntu-latest if: github.event_name == 'release' permissions: id-token: write defaults: run: working-directory: ./wrappers/nodejs steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Set up Node.js uses: actions/setup-node@v6 with: node-version: "22" registry-url: "https://registry.npmjs.org" - name: Verify Version Matches Release Tag run: | RELEASE_VERSION="${{ github.event.release.tag_name }}" # Remove 'v' prefix if present RELEASE_VERSION="${RELEASE_VERSION#v}" PACKAGE_VERSION=$(node -p "require('./package.json').version") if [ "$RELEASE_VERSION" != "$PACKAGE_VERSION" ]; then echo "Version mismatch: Release tag is $RELEASE_VERSION but package.json has $PACKAGE_VERSION" exit 1 fi echo "Version matches: $RELEASE_VERSION" - name: Publish to npm run: npm publish --provenance --access public env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} - name: Summary if: success() run: | echo "Successfully published zxc-compress to npm!" echo "Package: https://www.npmjs.com/package/zxc-compress" zxc-0.9.1/.github/workflows/wrapper-python-publish.yml000066400000000000000000000076251515574030100231230ustar00rootroot00000000000000name: Publish Python Package on: workflow_dispatch: release: types: [ published ] permissions: contents: read jobs: build_wheels: name: Build wheels on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - os: ubuntu-latest arch: x86_64 - os: ubuntu-24.04-arm arch: aarch64 - os: macos-latest arch: arm64 - os: macos-15-intel arch: x86_64 - os: windows-latest arch: AMD64 - os: windows-11-arm arch: ARM64 steps: - name: Checkout Repository uses: actions/checkout@v6 with: fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v6 with: python-version: "3.12" - name: Install build tools run: python -m pip install cibuildwheel==3.3.1 setuptools-scm - name: Get version from git id: version run: echo "version=$(python -m setuptools_scm)" >> $GITHUB_OUTPUT working-directory: ./wrappers/python - name: Build wheels run: python -m cibuildwheel wrappers/python --output-dir wheelhouse env: # Build for Python 3.10-3.13 CIBW_BUILD: "cp310-* cp311-* cp312-* cp313-*" # Skip 32-bit, musllinux, and Python 3.10-3.11 on Windows (missing Development.Module) CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux_* cp310-win* cp311-win*" CIBW_ARCHS: "${{ matrix.arch }}" CIBW_ENVIRONMENT: "CMAKE_ARGS='-DZXC_NATIVE_ARCH=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON' SETUPTOOLS_SCM_PRETEND_VERSION=${{ steps.version.outputs.version }}" - name: Upload wheels uses: actions/upload-artifact@v7 with: name: wheels-${{ matrix.os }}-${{ matrix.arch }} path: ./wheelhouse/*.whl build_sdist: name: Build source distribution runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@v6 with: fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v6 with: python-version: "3.12" - name: Install build run: python -m pip install build - name: Build sdist run: python -m build --sdist working-directory: ./wrappers/python - name: Upload sdist uses: actions/upload-artifact@v7 with: name: sdist path: ./wrappers/python/dist/*.tar.gz test_wheels: name: Test wheels on ${{ matrix.os }} ${{ matrix.python }} needs: [build_wheels] runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, ubuntu-24.04-arm, macos-latest, macos-15-intel, windows-latest, windows-11-arm] python: ["3.12", "3.13"] steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Set up Python uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} - name: Download wheels uses: actions/download-artifact@v8 with: path: wheelhouse pattern: wheels-* merge-multiple: true - name: Install wheel and test dependencies run: | pip install --no-index --find-links wheelhouse zxc-compress pip install pytest - name: Run test suite run: pytest wrappers/python/tests/ -v publish: name: Publish to PyPI needs: [build_wheels, build_sdist, test_wheels] runs-on: ubuntu-latest if: github.event_name == 'release' permissions: id-token: write steps: - name: Download all artifacts uses: actions/download-artifact@v8 with: path: dist merge-multiple: true - name: Publish to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: packages-dir: dist/ zxc-0.9.1/.github/workflows/wrapper-rust-publish.yml000066400000000000000000000052341515574030100225710ustar00rootroot00000000000000name: Publish Rust Crates on: workflow_dispatch: release: types: [ published ] permissions: contents: read jobs: test: name: Test on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - os: ubuntu-latest arch: x86_64 - os: ubuntu-24.04-arm arch: aarch64 - os: macos-latest arch: x86_64 arm64 - os: windows-latest arch: AMD64 - os: windows-11-arm arch: ARM64 defaults: run: working-directory: ./wrappers/rust steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install Rust Toolchain uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: stable - name: Run Tests run: cargo test --workspace publish: name: Publish to crates.io needs: [test] runs-on: ubuntu-latest if: github.event_name == 'release' defaults: run: working-directory: ./wrappers/rust steps: - name: Checkout Repository uses: actions/checkout@v6 - name: Install Rust Toolchain uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: stable - name: Verify Version Matches Release Tag run: | RELEASE_VERSION="${{ github.event.release.tag_name }}" # Remove 'v' prefix if present RELEASE_VERSION="${RELEASE_VERSION#v}" CARGO_VERSION=$(grep '^version' Cargo.toml | head -1 | sed 's/.*"\(.*\)"/\1/') if [ "$RELEASE_VERSION" != "$CARGO_VERSION" ]; then echo "Version mismatch: Release tag is $RELEASE_VERSION but Cargo.toml has $CARGO_VERSION" exit 1 fi echo "Version matches: $RELEASE_VERSION" - name: Publish zxc-compress-sys (FFI bindings) run: | cd zxc-sys cargo publish --token ${{ secrets.CARGO_REGISTRY_TOKEN }} continue-on-error: false # Wait for zxc-compress-sys to be available on crates.io before publishing zxc-compress - name: Wait for zxc-compress-sys to propagate run: sleep 30 - name: Publish zxc-compress (safe wrapper) run: | cd zxc cargo publish --token ${{ secrets.CARGO_REGISTRY_TOKEN }} continue-on-error: false - name: Summary if: success() run: | echo "Successfully published:" echo "- zxc-compress-sys (FFI bindings)" echo "- zxc-compress (safe wrapper)" echo "" echo "Crates are now available on crates.io!" zxc-0.9.1/.gitignore000066400000000000000000000022041515574030100143140ustar00rootroot00000000000000# Prerequisites *.d # Object files *.o *.ko *.obj *.elf # Linker output *.ilk *.map *.exp # Precompiled Headers *.gch *.pch # Libraries *.lib *.a *.la *.lo # Shared objects (inc. Windows DLLs) *.dll *.so *.so.* *.dylib # Executables *.exe *.out *.app *.i*86 *.x86_64 *.hex # Debug files *.dSYM/ *.su *.idb *.pdb # Kernel Module Compile Results *.mod* *.cmd .tmp_versions/ modules.order Module.symvers Mkfile.old dkms.conf # debug information files *.dwo CMakeLists.txt.user CMakeCache.txt CMakeFiles CMakeScripts Testing Makefile cmake_install.cmake install_manifest.txt compile_commands.json CTestTestfile.cmake _deps CMakeUserPresets.json # CLion # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #cmake-build-* build/* # Doxygen docs/html/ docs/latex/ docs/rtf/ docs/man/*.1 docs/xml/ *.doxygen_sqlite3 doxyfile.inc searchdata.xmlzxc-0.9.1/.snyk000066400000000000000000000001051515574030100133070ustar00rootroot00000000000000# Snyk (https://snyk.io) policy file exclude: code: - tests/** zxc-0.9.1/CMakeLists.txt000066400000000000000000000454101515574030100150720ustar00rootroot00000000000000# ZXC - High-performance lossless compression # # Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. # SPDX-License-Identifier: BSD-3-Clause cmake_minimum_required(VERSION 3.14) # ============================================================================= # Version extraction from header # ============================================================================= file(READ "include/zxc_constants.h" version_header) string(REGEX MATCH "#define ZXC_VERSION_MAJOR ([0-9]+)" _ "${version_header}") set(MAJOR_VER ${CMAKE_MATCH_1}) string(REGEX MATCH "#define ZXC_VERSION_MINOR ([0-9]+)" _ "${version_header}") set(MINOR_VER ${CMAKE_MATCH_1}) string(REGEX MATCH "#define ZXC_VERSION_PATCH ([0-9]+)" _ "${version_header}") set(PATCH_VER ${CMAKE_MATCH_1}) project(zxc VERSION ${MAJOR_VER}.${MINOR_VER}.${PATCH_VER} LANGUAGES C DESCRIPTION "High-performance asymmetric lossless compression library" ) # ============================================================================= # Build Options # ============================================================================= option(BUILD_SHARED_LIBS "Build shared libraries instead of static" OFF) option(ZXC_NATIVE_ARCH "Enable -march=native for maximum performance" ON) option(ZXC_ENABLE_LTO "Enable Interprocedural Optimization (LTO)" ON) set(ZXC_PGO_MODE "OFF" CACHE STRING "Profile-Guided Optimization mode (OFF/GENERATE/USE)") set_property(CACHE ZXC_PGO_MODE PROPERTY STRINGS OFF GENERATE USE) option(ZXC_BUILD_CLI "Build the command-line interface" ON) option(ZXC_BUILD_TESTS "Build unit tests" ON) option(ZXC_ENABLE_COVERAGE "Enable code coverage generation" OFF) # ============================================================================= # C Standard # ============================================================================= set(CMAKE_C_STANDARD 17) set(CMAKE_C_STANDARD_REQUIRED ON) set(CMAKE_C_EXTENSIONS OFF) # Enable _GNU_SOURCE for ftello/fseeko on Linux if(UNIX) add_compile_definitions(_GNU_SOURCE) endif() # Check for LTO support if(ZXC_ENABLE_LTO AND NOT ZXC_ENABLE_COVERAGE) include(CheckIPOSupported) check_ipo_supported(RESULT result OUTPUT output) if(result) message(STATUS "LTO/IPO is supported and enabled.") else() message(WARNING "LTO/IPO is not supported: ${output}") set(ZXC_ENABLE_LTO OFF) endif() elseif(ZXC_ENABLE_COVERAGE) message(STATUS "Code coverage enabled: Disabling LTO and PGO.") set(ZXC_ENABLE_LTO OFF) set(ZXC_PGO_MODE "OFF") endif() # ============================================================================= # Rapidhash: system-installed (e.g. vcpkg) or vendored fallback # ============================================================================= find_path(RAPIDHASH_INCLUDE_DIR rapidhash.h) if(NOT RAPIDHASH_INCLUDE_DIR) set(RAPIDHASH_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/src/lib/vendors") message(STATUS "Using vendored rapidhash from ${RAPIDHASH_INCLUDE_DIR}") else() message(STATUS "Found system rapidhash in ${RAPIDHASH_INCLUDE_DIR}") endif() # ============================================================================= # Core Library & Runtime Dispatch # ============================================================================= # Function Multi-Versioning Helper # Compiles src/lib/zxc_compress.c and src/lib/zxc_decompress.c with specific flags and suffix. macro(zxc_add_variant suffix flags) add_library(zxc_compress${suffix} OBJECT src/lib/zxc_compress.c) target_compile_options(zxc_compress${suffix} PRIVATE ${flags}) target_compile_definitions(zxc_compress${suffix} PRIVATE ZXC_FUNCTION_SUFFIX=${suffix}) # For static builds, define ZXC_STATIC_DEFINE if(NOT BUILD_SHARED_LIBS) target_compile_definitions(zxc_compress${suffix} PRIVATE ZXC_STATIC_DEFINE) else() # Mark as part of the DLL being built (avoids dllimport on internal symbols) target_compile_definitions(zxc_compress${suffix} PRIVATE zxc_lib_EXPORTS) set_target_properties(zxc_compress${suffix} PROPERTIES POSITION_INDEPENDENT_CODE ON) # Hide variant symbols from shared library public ABI if(NOT MSVC) target_compile_options(zxc_compress${suffix} PRIVATE -fvisibility=hidden) endif() endif() # Inherit include directories target_include_directories(zxc_compress${suffix} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src/lib ${RAPIDHASH_INCLUDE_DIR} PUBLIC $) add_library(zxc_decompress${suffix} OBJECT src/lib/zxc_decompress.c) target_compile_options(zxc_decompress${suffix} PRIVATE ${flags}) target_compile_definitions(zxc_decompress${suffix} PRIVATE ZXC_FUNCTION_SUFFIX=${suffix}) # For static builds, define ZXC_STATIC_DEFINE if(NOT BUILD_SHARED_LIBS) target_compile_definitions(zxc_decompress${suffix} PRIVATE ZXC_STATIC_DEFINE) else() # Mark as part of the DLL being built (avoids dllimport on internal symbols) target_compile_definitions(zxc_decompress${suffix} PRIVATE zxc_lib_EXPORTS) set_target_properties(zxc_decompress${suffix} PROPERTIES POSITION_INDEPENDENT_CODE ON) # Hide variant symbols from shared library public ABI if(NOT MSVC) target_compile_options(zxc_decompress${suffix} PRIVATE -fvisibility=hidden) endif() endif() target_include_directories(zxc_decompress${suffix} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src/lib ${RAPIDHASH_INCLUDE_DIR} PUBLIC $) list(APPEND ZXC_VARIANT_OBJECTS $ $) endmacro() set(ZXC_VARIANT_OBJECTS "") # --- 1. Default Variant (Scalar/Baseline) --- zxc_add_variant(_default "") # --- 2. Architecture Specific Variants --- if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|AMD64") message(STATUS "Building x86_64 AVX2 and AVX512 variants...") if(MSVC) # AVX2 for MSVC (Enables AVX2/BMI2 sets) zxc_add_variant(_avx2 "/arch:AVX2;/D__BMI2__") # AVX512 for MSVC (VS2019 16.10+ supports /arch:AVX512) zxc_add_variant(_avx512 "/arch:AVX512;/D__BMI2__") else() # AVX2 for GCC/Clang zxc_add_variant(_avx2 "-mavx2;-mfma;-mbmi2") # AVX512 for GCC/Clang zxc_add_variant(_avx512 "-mavx512f;-mavx512bw;-mbmi2") endif() elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64|ARM64") message(STATUS "Building AArch64 NEON variant...") if(MSVC) # MSVC for ARM64 implies NEON support by default. zxc_add_variant(_neon "") else() # NEON is usually default on AArch64, but we add a specific variant for structure zxc_add_variant(_neon "-march=armv8-a+simd") endif() elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") message(STATUS "Building ARM NEON variant...") zxc_add_variant(_neon "-march=armv7-a;-mfpu=neon") endif() add_library(zxc_lib src/lib/zxc_common.c src/lib/zxc_driver.c src/lib/zxc_dispatch.c ${ZXC_VARIANT_OBJECTS} ) # ============================================================================= # ABI Versioning (Debian/Linux shared libraries) # ============================================================================= # Increment this number ONLY when breaking the ABI. set(ZXC_SOVERSION 2) # Set library output name and version set_target_properties(zxc_lib PROPERTIES OUTPUT_NAME zxc VERSION ${PROJECT_VERSION} SOVERSION ${ZXC_SOVERSION} ) # Target-based include directories for the main lib target_include_directories(zxc_lib PUBLIC $ $ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src/lib ${RAPIDHASH_INCLUDE_DIR} ) # Symbol visibility for shared libraries if(BUILD_SHARED_LIBS) # Set visibility for GCC/Clang if(NOT MSVC) target_compile_options(zxc_lib PRIVATE -fvisibility=hidden) set_target_properties(zxc_lib PROPERTIES C_VISIBILITY_PRESET hidden VISIBILITY_INLINES_HIDDEN YES ) endif() else() # For static libraries, define ZXC_STATIC_DEFINE to avoid dllimport/dllexport target_compile_definitions(zxc_lib PUBLIC ZXC_STATIC_DEFINE) endif() # ============================================================================= # Compiler-specific options (using generator expressions) # ============================================================================= if(NOT MSVC) target_compile_options(zxc_lib PRIVATE $<$>:-O3> -Wall -Wextra -fomit-frame-pointer -fstrict-aliasing # Native Arch $<$:-march=native> # Dead code elimination -ffunction-sections -fdata-sections ) # Profile-Guided Optimization if(ZXC_PGO_MODE STREQUAL "GENERATE") target_compile_options(zxc_lib PRIVATE -fprofile-generate=${CMAKE_BINARY_DIR}/pgo) target_link_options(zxc_lib PRIVATE -fprofile-generate=${CMAKE_BINARY_DIR}/pgo) message(STATUS "PGO: Generating profile data to ${CMAKE_BINARY_DIR}/pgo") elseif(ZXC_PGO_MODE STREQUAL "USE") if(EXISTS "${CMAKE_BINARY_DIR}/pgo") target_compile_options(zxc_lib PRIVATE -fprofile-use=${CMAKE_BINARY_DIR}/pgo -fprofile-correction) target_link_options(zxc_lib PRIVATE -fprofile-use=${CMAKE_BINARY_DIR}/pgo) message(STATUS "PGO: Using profile data from ${CMAKE_BINARY_DIR}/pgo") else() message(FATAL_ERROR "PGO: Profile data not found. Run with ZXC_PGO_MODE=GENERATE first.") endif() endif() # Linker options for Dead Code Stripping if(APPLE) target_link_options(zxc_lib PRIVATE -Wl,-dead_strip) else() target_link_options(zxc_lib PRIVATE -Wl,--gc-sections) endif() else() target_compile_options(zxc_lib PRIVATE $<$:/O2> /W3) target_compile_definitions(zxc_lib PRIVATE _CRT_SECURE_NO_WARNINGS) endif() target_compile_definitions(zxc_lib PRIVATE $<$>:_GNU_SOURCE> ) # Coverage flags if(ZXC_ENABLE_COVERAGE) if(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang") target_compile_options(zxc_lib PRIVATE --coverage -fprofile-update=atomic) target_link_options(zxc_lib PRIVATE --coverage) endif() endif() # Enable LTO cleanly if(ZXC_ENABLE_LTO) set_property(TARGET zxc_lib PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE) if(NOT MSVC) target_compile_options(zxc_lib PRIVATE -flto) target_link_options(zxc_lib PRIVATE -flto) endif() endif() # Threading support find_package(Threads REQUIRED) target_link_libraries(zxc_lib PRIVATE Threads::Threads) # ============================================================================= # CLI Executable # ============================================================================= if(ZXC_BUILD_CLI) add_executable(zxc src/cli/main.c) target_link_libraries(zxc PRIVATE zxc_lib) target_include_directories(zxc PRIVATE ${RAPIDHASH_INCLUDE_DIR}) # Math library on Unix if(UNIX) target_link_libraries(zxc PRIVATE m) endif() # Propagate compile options and definitions target_compile_options(zxc PRIVATE $<$>,$>:-march=native> ) target_compile_definitions(zxc PRIVATE $<$:_CRT_SECURE_NO_WARNINGS> $<$>:_GNU_SOURCE> ) # Coverage flags for CLI if(ZXC_ENABLE_COVERAGE) if(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang") target_compile_options(zxc PRIVATE --coverage) target_link_options(zxc PRIVATE --coverage) endif() endif() # Enable LTO cleanly if(ZXC_ENABLE_LTO) set_property(TARGET zxc PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE) if(NOT MSVC) target_compile_options(zxc PRIVATE -flto) target_link_options(zxc PRIVATE -flto) endif() endif() # Profile-Guided Optimization for CLI if(NOT MSVC) if(ZXC_PGO_MODE STREQUAL "GENERATE") target_compile_options(zxc PRIVATE -fprofile-generate=${CMAKE_BINARY_DIR}/pgo) target_link_options(zxc PRIVATE -fprofile-generate=${CMAKE_BINARY_DIR}/pgo) elseif(ZXC_PGO_MODE STREQUAL "USE") target_compile_options(zxc PRIVATE -fprofile-use=${CMAKE_BINARY_DIR}/pgo -fprofile-correction) target_link_options(zxc PRIVATE -fprofile-use=${CMAKE_BINARY_DIR}/pgo) endif() endif() # Linker options for Dead Code Stripping if(NOT MSVC) if(APPLE) target_link_options(zxc PRIVATE -Wl,-dead_strip) else() target_link_options(zxc PRIVATE -Wl,--gc-sections) endif() endif() # Strip symbols in Release mode for smaller binary if(NOT MSVC AND CMAKE_BUILD_TYPE STREQUAL "Release") # Set default strip command if not already set (e.g., for cross-compilation) if(NOT CMAKE_STRIP) set(CMAKE_STRIP strip) endif() add_custom_command(TARGET zxc POST_BUILD COMMAND ${CMAKE_STRIP} $ COMMENT "Stripping symbols from zxc" ) endif() endif() # ============================================================================= # Tests # ============================================================================= if(ZXC_BUILD_TESTS) enable_testing() add_executable(zxc_test tests/test.c) # When building shared libraries, create a static version for tests # This allows tests to access internal functions for unit testing if(BUILD_SHARED_LIBS) # Create a static library specifically for tests add_library(zxc_lib_static STATIC src/lib/zxc_common.c src/lib/zxc_driver.c src/lib/zxc_dispatch.c ${ZXC_VARIANT_OBJECTS} ) # Copy all properties from the shared library target_include_directories(zxc_lib_static PUBLIC $ $ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src/lib ${RAPIDHASH_INCLUDE_DIR} ) # Apply same compiler settings as main library target_compile_options(zxc_lib_static PRIVATE $ ) target_compile_definitions(zxc_lib_static PUBLIC ZXC_STATIC_DEFINE) target_link_libraries(zxc_lib_static PRIVATE Threads::Threads) # Link tests against static library target_link_libraries(zxc_test PRIVATE zxc_lib_static) else() # For static builds, use the main library target_link_libraries(zxc_test PRIVATE zxc_lib) endif() # Propagate compile options target_compile_options(zxc_test PRIVATE $<$>,$>:-march=native> ) # Coverage flags for Tests if(ZXC_ENABLE_COVERAGE) if(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang") target_link_options(zxc_test PRIVATE --coverage) endif() endif() # Profile-Guided Optimization for tests if(NOT MSVC) if(ZXC_PGO_MODE STREQUAL "GENERATE") target_compile_options(zxc_test PRIVATE -fprofile-generate=${CMAKE_BINARY_DIR}/pgo) target_link_options(zxc_test PRIVATE -fprofile-generate=${CMAKE_BINARY_DIR}/pgo) elseif(ZXC_PGO_MODE STREQUAL "USE") target_compile_options(zxc_test PRIVATE -fprofile-use=${CMAKE_BINARY_DIR}/pgo -fprofile-correction) target_link_options(zxc_test PRIVATE -fprofile-use=${CMAKE_BINARY_DIR}/pgo) endif() endif() target_include_directories(zxc_test PRIVATE src/lib ${RAPIDHASH_INCLUDE_DIR}) add_test(NAME UnitTests COMMAND zxc_test) endif() # ============================================================================= # Installation # ============================================================================= include(GNUInstallDirs) configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/zxc.pc.in ${CMAKE_CURRENT_BINARY_DIR}/zxc.pc @ONLY ) install(TARGETS zxc_lib EXPORT zxc-targets ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} ) install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} FILES_MATCHING PATTERN "*.h" ) if(ZXC_BUILD_CLI) install(TARGETS zxc RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} ) endif() install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zxc.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig ) # CMake package config files for find_package(zxc) include(CMakePackageConfigHelpers) install(EXPORT zxc-targets FILE zxc-targets.cmake NAMESPACE zxc:: DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/zxc ) configure_package_config_file( ${CMAKE_CURRENT_SOURCE_DIR}/zxcConfig.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/zxcConfig.cmake INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/zxc ) write_basic_package_version_file( ${CMAKE_CURRENT_BINARY_DIR}/zxcConfigVersion.cmake VERSION ${PROJECT_VERSION} COMPATIBILITY SameMajorVersion ) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zxcConfig.cmake ${CMAKE_CURRENT_BINARY_DIR}/zxcConfigVersion.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/zxc ) # ============================================================================= # Summary # ============================================================================= message(STATUS "") message(STATUS "ZXC Configuration Summary:") message(STATUS " Version: ${PROJECT_VERSION}") if(BUILD_SHARED_LIBS) message(STATUS " Library Type: Shared") else() message(STATUS " Library Type: Static") endif() message(STATUS " Native Arch: ${ZXC_NATIVE_ARCH}") message(STATUS " LTO Enabled: ${ZXC_ENABLE_LTO}") message(STATUS " PGO Mode: ${ZXC_PGO_MODE}") message(STATUS " Build CLI: ${ZXC_BUILD_CLI}") message(STATUS " Build Tests: ${ZXC_BUILD_TESTS}") message(STATUS "") # ============================================================================= # Documentation (Doxygen) # ============================================================================= find_package(Doxygen) if(DOXYGEN_FOUND) # Generate the Doxyfile with the current project version configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) # Add a 'doc' target to generate documentation (e.g. 'make doc' or 'cmake --build . --target doc') add_custom_target(doc COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "Generating API documentation with Doxygen" VERBATIM ) endif() zxc-0.9.1/Doxyfile.in000066400000000000000000000251371515574030100144510ustar00rootroot00000000000000# Doxyfile 1.9.1 #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- PROJECT_NAME = "@PROJECT_NAME@" PROJECT_NUMBER = "@PROJECT_VERSION@" PROJECT_BRIEF = "@PROJECT_DESCRIPTION@" OUTPUT_DIRECTORY = docs CREATE_SUBDIRS = NO ALLOW_UNICODE_NAMES = NO OUTPUT_LANGUAGE = English BRIEF_MEMBER_DESC = YES REPEAT_BRIEF = YES ABBREVIATE_BRIEF = ALWAYS_DETAILED_SEC = NO INLINE_INHERITED_MEMB = NO FULL_PATH_NAMES = YES STRIP_FROM_PATH = STRIP_FROM_INC_PATH = SHORT_NAMES = NO JAVADOC_AUTOBRIEF = YES JAVADOC_BANNER = NO QT_AUTOBRIEF = NO MULTILINE_CPP_IS_BRIEF = NO PYTHON_DOCSTRING = YES INHERIT_DOCS = YES SEPARATE_MEMBER_PAGES = NO TAB_SIZE = 4 ALIASES = OPTIMIZE_OUTPUT_FOR_C = YES OPTIMIZE_OUTPUT_JAVA = NO OPTIMIZE_FOR_FORTRAN = NO OPTIMIZE_OUTPUT_VHDL = NO OPTIMIZE_OUTPUT_SLICE = NO EXTENSION_MAPPING = MARKDOWN_SUPPORT = YES TOC_INCLUDE_HEADINGS = 5 AUTOLINK_SUPPORT = YES BUILTIN_STL_SUPPORT = NO CPP_CLI_SUPPORT = NO SIP_SUPPORT = NO IDL_PROPERTY_SUPPORT = YES DISTRIBUTE_GROUP_DOC = NO GROUP_NESTED_COMPOUNDS = NO SUBGROUPING = YES INLINE_GROUPED_CLASSES = NO INLINE_SIMPLE_STRUCTS = NO TYPEDEF_HIDES_STRUCT = NO LOOKUP_CACHE_SIZE = 0 NUM_PROC_THREADS = 1 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- EXTRACT_ALL = NO EXTRACT_PRIVATE = NO EXTRACT_PRIV_VIRTUAL = NO EXTRACT_PACKAGE = NO EXTRACT_STATIC = YES EXTRACT_LOCAL_CLASSES = NO EXTRACT_LOCAL_METHODS = NO EXTRACT_ANON_NSPACES = NO RESOLVE_UNNAMED_PARAMS = YES HIDE_UNDOC_MEMBERS = NO HIDE_UNDOC_CLASSES = NO HIDE_FRIEND_COMPOUNDS = NO HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO CASE_SENSE_NAMES = YES HIDE_SCOPE_NAMES = NO HIDE_COMPOUND_REFERENCE= NO SHOW_HEADERFILE = YES SHOW_INCLUDE_FILES = YES SHOW_GROUPED_MEMB_INC = NO FORCE_LOCAL_INCLUDES = NO INLINE_INFO = YES SORT_MEMBER_DOCS = YES SORT_BRIEF_DOCS = NO SORT_MEMBERS_CTORS_1ST = NO SORT_GROUP_NAMES = NO SORT_BY_SCOPE_NAME = NO STRICT_PROTO_MATCHING = NO GENERATE_TODOLIST = YES GENERATE_TESTLIST = YES GENERATE_BUGLIST = YES GENERATE_DEPRECATEDLIST= YES ENABLED_SECTIONS = MAX_INITIALIZER_LINES = 30 SHOW_USED_FILES = YES SHOW_FILES = YES SHOW_NAMESPACES = YES FILE_VERSION_FILTER = LAYOUT_FILE = CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- QUIET = NO WARNINGS = YES WARN_IF_UNDOCUMENTED = NO WARN_IF_DOC_ERROR = YES WARN_IF_INCOMPLETE_DOC = YES WARN_NO_PARAMDOC = YES WARN_AS_ERROR = NO WARN_FORMAT = "$file:$line: $text" WARN_LINE_FORMAT = "at line $line of file $file" WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- INPUT = include \ src/lib INPUT_ENCODING = UTF-8 INPUT_FILE_ENCODING = FILE_PATTERNS = *.h \ *.c RECURSIVE = YES EXCLUDE = EXCLUDE_SYMLINKS = NO EXCLUDE_PATTERNS = */test/* \ */tests/* \ */build/* \ */target/* \ */src/lib/*.c EXCLUDE_SYMBOLS = EXAMPLE_PATH = EXAMPLE_PATTERNS = * EXAMPLE_RECURSIVE = NO IMAGE_PATH = INPUT_FILTER = FILTER_PATTERNS = FILTER_SOURCE_FILES = NO FILTER_SOURCE_PATTERNS = USE_MDFILE_AS_MAINPAGE = README.md #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- SOURCE_BROWSER = YES INLINE_SOURCES = NO STRIP_CODE_COMMENTS = YES REFERENCED_BY_RELATION = YES REFERENCES_RELATION = YES REFERENCES_LINK_SOURCE = YES SOURCE_TOOLTIPS = YES USE_HTAGS = NO VERBATIM_HEADERS = YES CLANG_ASSISTED_PARSING = NO CLANG_ADD_INC_PATHS = YES CLANG_OPTIONS = CLANG_DATABASE_PATH = #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- ALPHABETICAL_INDEX = YES IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- GENERATE_HTML = YES HTML_OUTPUT = html HTML_FILE_EXTENSION = .html HTML_HEADER = HTML_FOOTER = HTML_STYLESHEET = HTML_EXTRA_STYLESHEET = HTML_EXTRA_FILES = HTML_COLORSTYLE = AUTO_LIGHT HTML_COLORSTYLE_HUE = 220 HTML_COLORSTYLE_SAT = 100 HTML_COLORSTYLE_GAMMA = 80 HTML_TIMESTAMP = NO HTML_DYNAMIC_MENUS = YES HTML_DYNAMIC_SECTIONS = NO HTML_INDEX_NUM_ENTRIES = 100 GENERATE_DOCSET = NO GENERATE_HTMLHELP = NO GENERATE_QHP = NO GENERATE_ECLIPSEHELP = NO DISABLE_INDEX = NO GENERATE_TREEVIEW = YES FULL_SIDEBAR = NO ENUM_VALUES_PER_LINE = 4 TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO OBFUSCATE_EMAILS = YES HTML_FORMULA_FORMAT = png FORMULA_FONTSIZE = 10 FORMULA_MACROFILE = USE_MATHJAX = NO MATHJAX_VERSION = MathJax_2 MATHJAX_FORMAT = HTML-CSS MATHJAX_RELPATH = MATHJAX_EXTENSIONS = MATHJAX_CODEFILE = SEARCHENGINE = YES SERVER_BASED_SEARCH = NO EXTERNAL_SEARCH = NO SEARCHENGINE_URL = SEARCHDATA_FILE = searchdata.xml EXTERNAL_SEARCH_ID = EXTRA_SEARCH_MAPPINGS = #--------------------------------------------------------------------------- # Configuration options related to the LaTeX output #--------------------------------------------------------------------------- GENERATE_LATEX = NO #--------------------------------------------------------------------------- # Configuration options related to the RTF output #--------------------------------------------------------------------------- GENERATE_RTF = NO #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- GENERATE_MAN = NO #--------------------------------------------------------------------------- # Configuration options related to the XML output #--------------------------------------------------------------------------- GENERATE_XML = NO #--------------------------------------------------------------------------- # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- GENERATE_DOCBOOK = NO #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # Configuration options related to Sqlite3 output #--------------------------------------------------------------------------- GENERATE_SQLITE3 = NO #--------------------------------------------------------------------------- # Configuration options related to the Perl module output #--------------------------------------------------------------------------- GENERATE_PERLMOD = NO #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- ENABLE_PREPROCESSING = YES MACRO_EXPANSION = YES EXPAND_ONLY_PREDEF = NO SEARCH_INCLUDES = YES INCLUDE_PATH = include INCLUDE_FILE_PATTERNS = PREDEFINED = ZXC_EXPORT= \ ZXC_NO_EXPORT= \ ZXC_DEPRECATED= \ __attribute__(x)= EXPAND_AS_DEFINED = SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration options related to external references #--------------------------------------------------------------------------- TAGFILES = GENERATE_TAGFILE = ALLEXTERNALS = NO EXTERNAL_GROUPS = YES EXTERNAL_PAGES = YES #--------------------------------------------------------------------------- # Configuration options related to diagram generator tools #--------------------------------------------------------------------------- HIDE_UNDOC_RELATIONS = YES HAVE_DOT = NO DOT_NUM_THREADS = 0 DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10" DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10" DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4" DOT_FONTPATH = CLASS_GRAPH = YES COLLABORATION_GRAPH = YES GROUP_GRAPHS = YES UML_LOOK = NO UML_LIMIT_NUM_FIELDS = 10 DOT_UML_DETAILS = NO DOT_WRAP_THRESHOLD = 17 TEMPLATE_RELATIONS = NO INCLUDE_GRAPH = YES INCLUDED_BY_GRAPH = YES CALL_GRAPH = NO CALLER_GRAPH = NO GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES DIR_GRAPH_MAX_DEPTH = 1 DOT_IMAGE_FORMAT = png INTERACTIVE_SVG = NO DOT_PATH = DOTFILE_DIRS = DIA_PATH = DIAFILE_DIRS = PLANTUML_JAR_PATH = PLANTUML_CFG_FILE = PLANTUML_INCLUDE_PATH = DOT_GRAPH_MAX_NODES = 50 MAX_DOT_GRAPH_DEPTH = 0 DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES DOT_CLEANUP = YES MSCGEN_TOOL = MSCFILE_DIRS = zxc-0.9.1/LICENSE000066400000000000000000000063351515574030100133420ustar00rootroot00000000000000============================================================================== ZXC Copyright (c) 2025-2026, Bertrand Lebonnois and contributors License: BSD 3-Clause ============================================================================== BSD 3-Clause License ==================== Copyright (c) 2025-2026, Bertrand Lebonnois and contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of ZXC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BERTRAND LEBONNOIS OR CONTRIBUTORS BE LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ============================================================================== This project includes code from rapidhash Copyright (C) 2025 Nicolas De Carli License: MIT (Expat) ============================================================================== rapidhash - Very fast, high quality, platform-independent hashing algorithm. Copyright (C) 2025 Nicolas De Carli Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. You can contact the author at: - rapidhash source repository: https://github.com/Nicoshev/rapidhash zxc-0.9.1/README.md000066400000000000000000000516071515574030100136160ustar00rootroot00000000000000# ZXC: High-Performance Asymmetric Lossless Compression [![Build & Release](https://github.com/hellobertrand/zxc/actions/workflows/build.yml/badge.svg)](https://github.com/hellobertrand/zxc/actions/workflows/build.yml) [![Code Quality](https://github.com/hellobertrand/zxc/actions/workflows/quality.yml/badge.svg)](https://github.com/hellobertrand/zxc/actions/workflows/quality.yml) [![Fuzzing](https://github.com/hellobertrand/zxc/actions/workflows/fuzzing.yml/badge.svg)](https://github.com/hellobertrand/zxc/actions/workflows/fuzzing.yml) [![Benchmark](https://github.com/hellobertrand/zxc/actions/workflows/benchmark.yml/badge.svg)](https://github.com/hellobertrand/zxc/actions/workflows/benchmark.yml) [![Code Coverage](https://codecov.io/github/hellobertrand/zxc/branch/main/graph/badge.svg?token=LHA03HOA1X)](https://codecov.io/github/hellobertrand/zxc) [![ConanCenter](https://repology.org/badge/version-for-repo/conancenter/zxc.svg)](https://repology.org/project/zxc/versions) [![Vcpkg](https://repology.org/badge/version-for-repo/vcpkg/zxc.svg)](https://repology.org/project/zxc/versions) [![Homebrew](https://repology.org/badge/version-for-repo/homebrew/zxc.svg)](https://repology.org/project/zxc/versions) [![Debian 14](https://repology.org/badge/version-for-repo/debian_14/zxc.svg)](https://repology.org/project/zxc/versions) [![Ubuntu 26.04](https://repology.org/badge/version-for-repo/ubuntu_26_04/zxc.svg)](https://repology.org/project/zxc/versions) [![Crates.io](https://img.shields.io/crates/v/zxc-compress)](https://crates.io/crates/zxc-compress) [![PyPi](https://img.shields.io/pypi/v/zxc-compress)](https://pypi.org/project/zxc-compress) [![npm](https://img.shields.io/npm/v/zxc-compress)](https://www.npmjs.com/package/zxc-compress) [![License](https://img.shields.io/badge/license-BSD--3--Clause-blue)](LICENSE) **ZXC** is a high-performance, lossless, asymmetric compression library optimized for Content Delivery and Embedded Systems (Game Assets, Firmware, App Bundles). It is designed to be **"Write Once, Read Many"** *(WORM)*. Unlike codecs like LZ4, ZXC trades compression speed (build-time) for **maximum decompression throughput** (run-time). ## TL;DR - **What:** A C library for lossless compression, optimized for **maximum decompression speed**. - **Key Result:** Up to **>40% faster** decompression than LZ4 on Apple Silicon, **>25% faster** on Google Axion (ARM64), **>5% faster** on x86_64, **all with better compression ratios**. - **Use Cases:** Game assets, firmware, app bundles, anything *compressed once, decompressed millions of times*. - **Install:** `conan install --requires="zxc/[*]"` · `vcpkg install zxc` · `brew install zxc` · `pip install zxc-compress` · `cargo add zxc-compress` · `npm i zxc-compress` - **Quality:** Fuzzed, sanitized, formally tested, thread-safe API. BSD-3-Clause. > **Verified:** ZXC has been officially merged into the **[lzbench master branch](https://github.com/inikep/lzbench)**. You can now verify these results independently using the industry-standard benchmark suite. ## ZXC Design Philosophy Traditional codecs often force a trade-off between **symmetric speed** (LZ4) and **archival density** (Zstd). **ZXC focuses on Asymmetric Efficiency.** Designed for the "Write-Once, Read-Many" reality of software distribution, ZXC utilizes a computationally intensive encoder to generate a bitstream specifically structured to **maximize decompression throughput**. By performing heavy analysis upfront, the encoder produces a layout optimized for the instruction pipelining and branch prediction capabilities of modern CPUs, particularly ARMv8, effectively offloading complexity from the decoder to the encoder. * **Build Time:** You generally compress only once (on CI/CD). * **Run Time:** You decompress millions of times (on every user's device). **ZXC respects this asymmetry.** [👉 **Read the Technical Whitepaper**](docs/WHITEPAPER.md) ## Benchmarks To ensure consistent performance, benchmarks are automatically executed on every commit via GitHub Actions. We monitor metrics on both **x86_64** (Linux) and **ARM64** (Apple Silicon M2) runners to track compression speed, decompression speed, and ratios. *(See the [latest benchmark logs](https://github.com/hellobertrand/zxc/actions/workflows/benchmark.yml))* ### 1. Mobile & Client: Apple Silicon (M2) *Scenario: Game Assets loading, App startup.* | Target | ZXC vs Competitor | Decompression Speed | Ratio | Verdict | | :--- | :--- | :--- | :--- | :--- | | **1. Max Speed** | **ZXC -1** vs *LZ4 --fast* | **11,934 MB/s** vs 5,647 MB/s **2.11x Faster** | **61.5** vs 62.2 **Smaller** (-1.0%) | **ZXC** leads in raw throughput. | | **2. Standard** | **ZXC -3** vs *LZ4 Default* | **6,925 MB/s** vs 4,804 MB/s **1.44x Faster** | **46.4** vs 47.6 **Smaller** (-2.6%) | **ZXC** outperforms LZ4 in read speed and ratio. | | **3. High Density** | **ZXC -5** vs *Zstd --fast 1* | **6,090 MB/s** vs 2,167 MB/s **2.81x Faster** | **40.6** vs 41.0 **Equivalent** (-1.0%) | **ZXC** outperforms Zstd in decoding speed. | ### 2. Cloud Server: Google Axion (ARM Neoverse V2) *Scenario: High-throughput Microservices, ARM Cloud Instances.* | Target | ZXC vs Competitor | Decompression Speed | Ratio | Verdict | | :--- | :--- | :--- | :--- | :--- | | **1. Max Speed** | **ZXC -1** vs *LZ4 --fast* | **8,727 MB/s** vs 4,868 MB/s **1.79x Faster** | **61.5** vs 62.2 **Smaller** (-1.0%) | **ZXC** leads in raw throughput. | | **2. Standard** | **ZXC -3** vs *LZ4 Default* | **5,130 MB/s** vs 4,182 MB/s **1.23x Faster** | **46.4** vs 47.6 **Smaller** (-2.6%) | **ZXC** outperforms LZ4 in read speed and ratio. | | **3. High Density** | **ZXC -5** vs *Zstd --fast 1* | **4,475 MB/s** vs 1,763 MB/s **2.54x Faster** | **40.6** vs 41.0 **Equivalent** (-1.0%) | **ZXC** outperforms Zstd in decoding speed. | ### 3. Build Server: x86_64 (AMD EPYC 7763) *Scenario: CI/CD Pipelines compatibility.* | Target | ZXC vs Competitor | Decompression Speed | Ratio | Verdict | | :--- | :--- | :--- | :--- | :--- | | **1. Max Speed** | **ZXC -1** vs *LZ4 --fast* | **6,779 MB/s** vs 4,116 MB/s **1.65x Faster** | **61.5** vs 62.2 **Smaller** (-1.0%) | **ZXC** achieves higher throughput. | | **2. Standard** | **ZXC -3** vs *LZ4 Default* | **3,849 MB/s** vs 3,573 MB/s **1.08x Faster** | **46.4** vs 47.6 **Smaller** (-2.6%) | ZXC offers improved speed and ratio. | | **3. High Density** | **ZXC -5** vs *Zstd --fast 1* | **3,495 MB/s** vs 1,573 MB/s **2.22x Faster** | **40.6** vs 41.0 **Equivalent** (-1.0%) | **ZXC** provides faster decoding. | *(Benchmark Graph ARM64 : Decompression Throughput & Storage Ratio (Normalized to LZ4))* ![Benchmark Graph ARM64](docs/images/benchmark_arm64_0.8.0.webp) ### Benchmark ARM64 (Apple Silicon) Benchmarks were conducted using lzbench 2.2.1 (from @inikep), compiled with Clang 17.0.0 using *MOREFLAGS="-march=native"* on macOS Sequoia 15.7.2 (Build 24G325). The reference hardware is an Apple M2 processor (ARM64). All performance metrics reflect single-threaded execution on the standard Silesia Corpus. | Compressor name | Compression| Decompress.| Compr. size | Ratio | Filename | | --------------- | -----------| -----------| ----------- | ----- | -------- | | memcpy | 52905 MB/s | 52854 MB/s | 211938580 |100.00 | 12 files| | **zxc 0.9.0 -1** | 950 MB/s | **11934 MB/s** | 130408237 | **61.53** | 12 files| | **zxc 0.9.0 -2** | 615 MB/s | **9879 MB/s** | 114657730 | **54.10** | 12 files| | **zxc 0.9.0 -3** | 240 MB/s | **6925 MB/s** | 98234598 | **46.35** | 12 files| | **zxc 0.9.0 -4** | 169 MB/s | **6584 MB/s** | 91698141 | **43.27** | 12 files| | **zxc 0.9.0 -5** | 93.2 MB/s | **6090 MB/s** | 86054926 | **40.60** | 12 files| | lz4 1.10.0 | 815 MB/s | 4804 MB/s | 100880147 | 47.60 | 12 files| | lz4 1.10.0 --fast -17 | 1345 MB/s | 5647 MB/s | 131723524 | 62.15 | 12 files| | lz4hc 1.10.0 -12 | 14.0 MB/s | 4537 MB/s | 77262399 | 36.46 | 12 files| | zstd 1.5.7 -1 | 644 MB/s | 1622 MB/s | 73229468 | 34.55 | 12 files| | zstd 1.5.7 --fast --1 | 725 MB/s | 2167 MB/s | 86932028 | 41.02 | 12 files| | brotli 1.2.0 -0 | 539 MB/s | 405 MB/s | 78306095 | 36.95 | 12 files| | snappy 1.2.2 | 830 MB/s | 3265 MB/s | 101352257 | 47.82 | 12 files| ### Benchmark ARM64 (Google Axion) Benchmarks were conducted using lzbench 2.2.1 (from @inikep), compiled with GCC 12.2.0 using *MOREFLAGS="-march=native"* on Linux 64-bits Debian GNU/Linux 12 (bookworm). The reference hardware is a Google Neoverse-V2 processor (ARM64). All performance metrics reflect single-threaded execution on the standard Silesia Corpus. | Compressor name | Compression| Decompress.| Compr. size | Ratio | Filename | | --------------- | -----------| -----------| ----------- | ----- | -------- | | memcpy | 24237 MB/s | 24131 MB/s | 211938580 |100.00 | 12 files| | **zxc 0.9.0 -1** | 853 MB/s | **8727 MB/s** | 130408237 | **61.53** | 12 files| | **zxc 0.9.0 -2** | 556 MB/s | **7303 MB/s** | 114657730 | **54.10** | 12 files| | **zxc 0.9.0 -3** | 226 MB/s | **5130 MB/s** | 98234598 | **46.35** | 12 files| | **zxc 0.9.0 -4** | 159 MB/s | **4879 MB/s** | 91698141 | **43.27** | 12 files| | **zxc 0.9.0 -5** | 84.9 MB/s | **4475 MB/s** | 86054926 | **40.60** | 12 files| | lz4 1.10.0 | 749 MB/s | 4182 MB/s | 100880147 | 47.60 | 12 files| | lz4 1.10.0 --fast -17 | 1303 MB/s | 4868 MB/s | 131723524 | 62.15 | 12 files| | lz4hc 1.10.0 -12 | 12.9 MB/s | 3796 MB/s | 77262399 | 36.46 | 12 files| | zstd 1.5.7 -1 | 527 MB/s | 1364 MB/s | 73229468 | 34.55 | 12 files| | zstd 1.5.7 --fast --1 | 610 MB/s | 1763 MB/s | 86932028 | 41.02 | 12 files| | brotli 1.2.0 -0 | 429 MB/s | 386 MB/s | 78306095 | 36.95 | 12 files| | snappy 1.2.2 | 756 MB/s | 1849 MB/s | 101352257 | 47.82 | 12 files| ### Benchmark x86_64 Benchmarks were conducted using lzbench 2.2.1 (from @inikep), compiled with GCC 13.3.0 using *MOREFLAGS="-march=native"* on Linux 64-bits Ubuntu 24.04. The reference hardware is an AMD EPYC 7763 processor (x86_64). All performance metrics reflect single-threaded execution on the standard Silesia Corpus. | Compressor name | Compression| Decompress.| Compr. size | Ratio | Filename | | --------------- | -----------| -----------| ----------- | ----- | -------- | | memcpy | 18697 MB/s | 18663 MB/s | 211938580 |100.00 | 12 files| | **zxc 0.9.0 -1** | 642 MB/s | **6779 MB/s** | 130408237 | **61.53** | 12 files| | **zxc 0.9.0 -2** | 414 MB/s | **5778 MB/s** | 114657730 | **54.10** | 12 files| | **zxc 0.9.0 -3** | 171 MB/s | **3849 MB/s** | 98234598 | **46.35** | 12 files| | **zxc 0.9.0 -4** | 122 MB/s | **3674 MB/s** | 91698141 | **43.27** | 12 files| | **zxc 0.9.0 -5** | 67.0 MB/s | **3495 MB/s** | 86054926 | **40.60** | 12 files| | lz4 1.10.0 | 592 MB/s | 3573 MB/s | 100880147 | 47.60 | 12 files| | lz4 1.10.0 --fast -17 | 1033 MB/s | 4116 MB/s | 131723524 | 62.15 | 12 files| | lz4hc 1.10.0 -12 | 11.2 MB/s | 3484 MB/s | 77262399 | 36.46 | 12 files| | zstd 1.5.7 -1 | 412 MB/s | 1198 MB/s | 73229468 | 34.55 | 12 files| | zstd 1.5.7 --fast --1 | 451 MB/s | 1573 MB/s | 86932028 | 41.02 | 12 files| | brotli 1.2.0 -0 | 354 MB/s | 281 MB/s | 78306095 | 36.95 | 12 files| | snappy 1.2.2 | 611 MB/s | 1592 MB/s | 101464727 | 47.87 | 12 files| --- ## Installation ### Option 1: Download Release (GitHub) 1. Go to the [Releases page](https://github.com/hellobertrand/zxc/releases). 2. Download the archive matching your architecture: **macOS:** * `zxc-macos-arm64.tar.gz` (NEON optimizations included). **Linux:** * `zxc-linux-aarch64.tar.gz` (NEON optimizations included). * `zxc-linux-x86_64.tar.gz` (Runtime dispatch for AVX2/AVX512). **Windows:** * `zxc-windows-x64.zip` (Runtime dispatch for AVX2/AVX512). 3. Extract and install: ```bash tar -xzf zxc-linux-x86_64.tar.gz -C /usr/local ``` Each archive contains: ``` bin/zxc # CLI binary include/ # C headers (zxc.h, zxc_buffer.h, ...) lib/libzxc.a # Static library lib/pkgconfig/zxc.pc # pkg-config support lib/cmake/zxc/zxcConfig.cmake # CMake find_package(zxc) support ``` 4. Use in your project: **CMake:** ```cmake find_package(zxc REQUIRED) target_link_libraries(myapp PRIVATE zxc::zxc_lib) ``` **pkg-config:** ```bash cc myapp.c $(pkg-config --cflags --libs zxc) -o myapp ``` ### Option 2: vcpkg **Classic mode:** ```bash vcpkg install zxc ``` **Manifest mode** (add to `vcpkg.json`): ```json { "dependencies": ["zxc"] } ``` Then in your CMake project: ```cmake find_package(zxc CONFIG REQUIRED) target_link_libraries(myapp PRIVATE zxc::zxc_lib) ``` ### Option 3: Conan You also can download and install zxc using the [Conan](https://conan.io/) package manager: ```bash conan install -r conancenter --requires="zxc/[*]" --build=missing ``` Or add to your `conanfile.txt`: ```ini [requires] zxc/[*] ``` The zxc package in Conan Center is kept up to date by [ConanCenterIndex](https://github.com/conan-io/conan-center-index) contributors. If the version is out of date, please create an issue or pull request on the Conan Center Index repository. ### Option 4: Homebrew ```bash brew install zxc ``` The formula is maintained in [homebrew-core](https://formulae.brew.sh/formula/zxc). ### Option 5: Building from Source **Requirements:** CMake (3.14+), C17 Compiler (Clang/GCC/MSVC). ```bash git clone https://github.com/hellobertrand/zxc.git cd zxc cmake -B build -DCMAKE_BUILD_TYPE=Release cmake --build build --parallel # Run tests ctest --test-dir build -C Release --output-on-failure # CLI usage ./build/zxc --help # Install library, headers, and CMake/pkg-config files sudo cmake --install build ``` #### CMake Options | Option | Default | Description | |--------|---------|-------------| | `BUILD_SHARED_LIBS` | OFF | Build shared libraries instead of static (`libzxc.so`, `libzxc.dylib`, `zxc.dll`) | | `ZXC_NATIVE_ARCH` | ON | Enable `-march=native` for maximum performance | | `ZXC_ENABLE_LTO` | ON | Enable Link-Time Optimization (LTO) | | `ZXC_PGO_MODE` | OFF | Profile-Guided Optimization mode (`OFF`, `GENERATE`, `USE`) | | `ZXC_BUILD_CLI` | ON | Build command-line interface | | `ZXC_BUILD_TESTS` | ON | Build unit tests | | `ZXC_ENABLE_COVERAGE` | OFF | Enable code coverage generation (disables LTO/PGO) | ```bash # Build shared library cmake -B build -DBUILD_SHARED_LIBS=ON # Portable build (without -march=native) cmake -B build -DZXC_NATIVE_ARCH=OFF # Library only (no CLI, no tests) cmake -B build -DZXC_BUILD_CLI=OFF -DZXC_BUILD_TESTS=OFF # Code coverage build cmake -B build -DZXC_ENABLE_COVERAGE=ON ``` ### Packaging Status [![Packaging status](https://repology.org/badge/vertical-allrepos/zxc.svg)](https://repology.org/project/zxc/versions) --- ## Compression Levels * **Level 1, 2 (Fast):** Optimized for real-time assets (Gaming, UI). * **Level 3, 4 (Balanced):** A strong middle-ground offering efficient compression speed and a ratio superior to LZ4. * **Level 5 (Compact):** The best choice for Embedded, Firmware, or Archival. Better compression than LZ4 and significantly faster decoding than Zstd. --- ## Usage ### 1. CLI The CLI is perfect for benchmarking or manually compressing assets. ```bash # Basic Compression (Level 3 is default) zxc -z input_file output_file # High Compression (Level 5) zxc -z -5 input_file output_file # -z for compression can be omitted zxc input_file output_file # as well as output file; it will be automatically assigned to input_file.xc zxc input_file # Decompression zxc -d compressed_file output_file # Benchmark Mode (Testing speed on your machine) zxc -b input_file ``` #### Using with `tar` ZXC works as a drop-in external compressor for `tar` (reads stdin, writes stdout, returns 0 on success): ```bash # GNU tar (Linux) tar -I 'zxc -5' -cf archive.tar.zxc data/ tar -I 'zxc -d' -xf archive.tar.zxc # bsdtar (macOS) tar --use-compress-program='zxc -5' -cf archive.tar.zxc data/ tar --use-compress-program='zxc -d' -xf archive.tar.zxc # Pipes (universal) tar cf - data/ | zxc > archive.tar.zxc zxc -d < archive.tar.zxc | tar xf - ``` ### 2. API ZXC provides a **thread-safe API** with two usage patterns. Parameters are passed through dedicated options structs, making call sites self-documenting and forward-compatible. #### Buffer API (In-Memory) ```c #include "zxc.h" // Compression uint64_t bound = zxc_compress_bound(src_size); zxc_compress_opts_t c_opts = { .level = ZXC_LEVEL_DEFAULT, .checksum_enabled = 1, /* .block_size = 0 → 256 KB default */ }; int64_t compressed_size = zxc_compress(src, src_size, dst, bound, &c_opts); // Decompression zxc_decompress_opts_t d_opts = { .checksum_enabled = 1 }; int64_t decompressed_size = zxc_decompress(src, src_size, dst, dst_capacity, &d_opts); ``` #### Stream API (Files, Multi-Threaded) ```c #include "zxc.h" // Compression (auto-detect threads, level 3, checksum on) zxc_compress_opts_t c_opts = { .n_threads = 0, // 0 = auto .level = ZXC_LEVEL_DEFAULT, .checksum_enabled = 1, /* .block_size = 0 → 256 KB default */ }; int64_t bytes_written = zxc_stream_compress(f_in, f_out, &c_opts); // Decompression zxc_decompress_opts_t d_opts = { .n_threads = 0, .checksum_enabled = 1 }; int64_t bytes_out = zxc_stream_decompress(f_in, f_out, &d_opts); ``` #### Reusable Context API (Low-Latency / Embedded) For tight loops (e.g. filesystem plug-ins) where per-call `malloc`/`free` overhead matters, use opaque reusable contexts. Options are **sticky** — settings from `zxc_create_cctx()` are reused when passing `NULL`: ```c #include "zxc.h" zxc_compress_opts_t opts = { .level = 3, .checksum_enabled = 0 }; zxc_cctx* cctx = zxc_create_cctx(&opts); // allocate once, settings remembered zxc_dctx* dctx = zxc_create_dctx(); // allocate once // reuse across many blocks — NULL reuses sticky settings: int64_t csz = zxc_compress_cctx(cctx, src, src_sz, dst, dst_cap, NULL); int64_t dsz = zxc_decompress_dctx(dctx, dst, csz, out, src_sz, NULL); zxc_free_cctx(cctx); zxc_free_dctx(dctx); ``` **Features:** - Caller-allocated buffers with explicit bounds - Thread-safe (stateless) - Configurable block sizes (4 KB – 2 MB, powers of 2) - Multi-threaded streaming (auto-detects CPU cores) - Optional checksum validation - Reusable contexts for high-frequency call sites **[See complete examples and advanced usage →](docs/EXAMPLES.md)** ## Language Bindings [![Crates.io](https://img.shields.io/crates/v/zxc-compress)](https://crates.io/crates/zxc-compress) [![PyPi](https://img.shields.io/pypi/v/zxc-compress)](https://pypi.org/project/zxc-compress) [![npm](https://img.shields.io/npm/v/zxc-compress)](https://www.npmjs.com/package/zxc-compress) Official wrappers maintained in this repository: | Language | Package Manager | Install Command | Documentation | Author | |----------|-----------------|-----------------|---------------|--------| | **Rust** | [`crates.io`](https://crates.io/crates/zxc-compress) | `cargo add zxc-compress` | [README](wrappers/rust/zxc/README.md) | [@hellobertrand](https://github.com/hellobertrand) | | **Python**| [`PyPI`](https://pypi.org/project/zxc-compress) | `pip install zxc-compress` | [README](wrappers/python/README.md) | [@nuberchardzer1](https://github.com/nuberchardzer1) | | **Node.js**| [`npm`](https://www.npmjs.com/package/zxc-compress) | `npm install zxc-compress` | [README](wrappers/nodejs/README.md) | [@hellobertrand](https://github.com/hellobertrand) | Community-maintained bindings: | Language | Package Manager | Install Command | Repository | Author | | -------- | --------------- | --------------- | ---------- | ------ | | **Go** | pkg.go.dev | `go get github.com/meysam81/go-zxc` | | [@meysam81](https://github.com/meysam81) | ## Safety & Quality * **Unit Tests**: Comprehensive test suite with CTest integration. * **Continuous Fuzzing**: Integrated with ClusterFuzzLite suites. * **Static Analysis**: Checked with Cppcheck & Clang Static Analyzer. * **CodeQL Analysis**: GitHub Advanced Security scanning for vulnerabilities. * **Code Coverage**: Automated tracking with Codecov integration. * **Dynamic Analysis**: Validated with Valgrind and ASan/UBSan in CI pipelines. * **Safe API**: Explicit buffer capacity is required for all operations. ## License & Credits **ZXC** Copyright © 2025-2026, Bertrand Lebonnois and contributors. Licensed under the **BSD 3-Clause License**. See LICENSE for details. **Third-Party Components:** - **rapidhash** by Nicolas De Carli (MIT) - Used for high-speed, platform-independent checksums. zxc-0.9.1/codecov.yml000066400000000000000000000001361515574030100144730ustar00rootroot00000000000000coverage: status: project: default: target: 80% ignore: - "src/cli/**" zxc-0.9.1/docs/000077500000000000000000000000001515574030100132565ustar00rootroot00000000000000zxc-0.9.1/docs/API.md000066400000000000000000000445061515574030100142220ustar00rootroot00000000000000# ZXC API & ABI Reference **Library version**: 0.9.1 **SOVERSION**: 2 **License**: BSD-3-Clause This document is the authoritative reference for the public API surface and ABI guarantees of **libzxc**. It is intended for integrators, packagers, and language-binding authors. For usage examples see [`EXAMPLES.md`](EXAMPLES.md). For the on-disk binary format see [`FORMAT.md`](FORMAT.md). --- ## Table of Contents - [1. Headers and Include Graph](#1-headers-and-include-graph) - [2. Symbol Visibility](#2-symbol-visibility) - [3. ABI Versioning](#3-abi-versioning) - [4. Runtime Dependencies](#4-runtime-dependencies) - [5. Constants and Enumerations](#5-constants-and-enumerations) - [6. Type Definitions](#6-type-definitions) - [7. Buffer API](#7-buffer-api) - [8. Reusable Context API](#8-reusable-context-api) - [9. Streaming API](#9-streaming-api) - [10. Sans-IO API](#10-sans-io-api) - [11. Error Handling](#11-error-handling) - [12. Thread Safety](#12-thread-safety) - [13. Exported Symbols Summary](#13-exported-symbols-summary) --- ## 1. Headers and Include Graph ```text zxc.h <- umbrella header (includes everything below) ├── zxc_buffer.h <- Buffer API + Reusable Context API │ ├── zxc_export.h <- visibility macros │ └── zxc_stream.h <- Streaming API + opts structs │ └── zxc_export.h ├── zxc_constants.h <- version macros, compression levels, block sizes ├── zxc_error.h <- error codes + zxc_error_name() │ └── zxc_export.h └── (not included by zxc.h) zxc_sans_io.h <- Low-level primitives (opt-in) └── zxc_export.h ``` Include `` to access everything except the sans-IO layer. Include `` explicitly when building custom drivers. --- ## 2. Symbol Visibility libzxc uses an **opt-in** export strategy: | Build type | How symbols are exposed | |-----------|------------------------| | **Shared library** | Default visibility is `hidden`. Only functions annotated with `ZXC_EXPORT` are exported. Internal FMV variants (`_default`, `_neon`, `_avx2`, `_avx512`) are hidden. | | **Static library** | Define `ZXC_STATIC_DEFINE` (set automatically by CMake) to disable import/export annotations. | ### Macros | Macro | Purpose | |-------|---------| | `ZXC_EXPORT` | Marks a symbol as part of the public API (`__declspec(dllexport/dllimport)` on Windows, `visibility("default")` on GCC/Clang). | | `ZXC_NO_EXPORT` | Forces a symbol to be hidden (`visibility("hidden")`). | | `ZXC_DEPRECATED` | Emits a compiler warning when a deprecated symbol is used. | | `ZXC_STATIC_DEFINE` | Define when building or consuming as a static library. | | `zxc_lib_EXPORTS` | Set automatically by CMake when building the shared library. Do not define manually. | --- ## 3. ABI Versioning libzxc follows the shared-library versioning convention: ``` libzxc.so.{SOVERSION}.{MAJOR}.{MINOR}.{PATCH} ``` | Field | Description | Current | |-------|-------------|---------| | `SOVERSION` | Bumped on **ABI-breaking** changes (struct layout, removed symbols, changed signatures). | **2** | | `VERSION` | Tracks the library release. | **0.9.1** | **Compatibility rule**: any binary compiled against SOVERSION N will load against any libzxc with the same SOVERSION, regardless of the `VERSION` triple. ### Platform naming | Platform | Files | |----------|-------| | Linux | `libzxc.so` → `libzxc.so.2` → `libzxc.so.0.9.1` | | macOS | `libzxc.dylib` → `libzxc.2.dylib` → `libzxc.0.9.1.dylib` | | Windows | `zxc.dll` + `zxc.lib` (import) | --- ## 4. Runtime Dependencies libzxc has **zero external dependencies**. | Dependency | Notes | |-----------|-------| | C standard library | ``, ``, ``, `` | | POSIX threads | `pthread` on Unix/macOS; Windows threads on Win32 | No dependency on OpenSSL, zlib, or any other compression library. --- ## 5. Constants and Enumerations ### 5.1 Version (compile-time) Defined in `zxc_constants.h`: ```c #define ZXC_VERSION_MAJOR 0 #define ZXC_VERSION_MINOR 9 #define ZXC_VERSION_PATCH 1 #define ZXC_LIB_VERSION_STR "0.9.1" ``` ### 5.2 Block Size Constraints ```c #define ZXC_BLOCK_SIZE_MIN_LOG2 12 // exponent for minimum #define ZXC_BLOCK_SIZE_MAX_LOG2 21 // exponent for maximum #define ZXC_BLOCK_SIZE_MIN (1U << 12) // 4 KB #define ZXC_BLOCK_SIZE_MAX (1U << 21) // 2 MB #define ZXC_BLOCK_SIZE_DEFAULT (256 * 1024) // 256 KB ``` Block size must be a power of two within `[ZXC_BLOCK_SIZE_MIN, ZXC_BLOCK_SIZE_MAX]`. Pass `0` to any API to use `ZXC_BLOCK_SIZE_DEFAULT`. ### 5.3 Compression Levels ```c typedef enum { ZXC_LEVEL_FASTEST = 1, // Best throughput, lowest ratio ZXC_LEVEL_FAST = 2, // Fast, good for real-time ZXC_LEVEL_DEFAULT = 3, // Recommended balance ZXC_LEVEL_BALANCED = 4, // Higher ratio, good speed ZXC_LEVEL_COMPACT = 5 // Highest density } zxc_compression_level_t; ``` All levels produce data decompressible at the **same speed**. Pass `0` for level to use `ZXC_LEVEL_DEFAULT`. ### 5.4 Error Codes ```c typedef enum { ZXC_OK = 0, ZXC_ERROR_MEMORY = -1, // malloc failure ZXC_ERROR_DST_TOO_SMALL = -2, // output buffer too small ZXC_ERROR_SRC_TOO_SMALL = -3, // input truncated ZXC_ERROR_BAD_MAGIC = -4, // invalid magic word ZXC_ERROR_BAD_VERSION = -5, // unsupported format version ZXC_ERROR_BAD_HEADER = -6, // corrupted header (CRC mismatch) ZXC_ERROR_BAD_CHECKSUM = -7, // checksum verification failed ZXC_ERROR_CORRUPT_DATA = -8, // corrupted compressed data ZXC_ERROR_BAD_OFFSET = -9, // invalid match offset ZXC_ERROR_OVERFLOW = -10, // buffer overflow detected ZXC_ERROR_IO = -11, // file read/write/seek failure ZXC_ERROR_NULL_INPUT = -12, // required pointer is NULL ZXC_ERROR_BAD_BLOCK_TYPE = -13, // unknown block type ZXC_ERROR_BAD_BLOCK_SIZE = -14 // invalid block size } zxc_error_t; ``` All public functions that can fail return negative `zxc_error_t` values on error. --- ## 6. Type Definitions ### 6.1 Options Structs **Compression options** (defined in `zxc_stream.h`, used by all compression functions): ```c typedef struct { int n_threads; // Worker thread count (0 = auto-detect). int level; // Compression level 1–5 (0 = default). size_t block_size; // Block size in bytes (0 = 256 KB default). int checksum_enabled; // 1 = enable checksums, 0 = disable. zxc_progress_callback_t progress_cb; // Optional callback (NULL to disable). void* user_data; // Passed through to progress_cb. } zxc_compress_opts_t; ``` **Decompression options**: ```c typedef struct { int n_threads; // Worker thread count (0 = auto-detect). int checksum_enabled; // 1 = verify checksums, 0 = skip. zxc_progress_callback_t progress_cb; // Optional callback. void* user_data; // Passed through to progress_cb. } zxc_decompress_opts_t; ``` Both structs are safe to zero-initialize for default behavior. Pass `NULL` instead of an options pointer to use all defaults. ### 6.2 Progress Callback ```c typedef void (*zxc_progress_callback_t)( uint64_t bytes_processed, // Total input bytes processed so far uint64_t bytes_total, // Total input bytes (0 if unknown, e.g. stdin) const void* user_data // User-provided context ); ``` Called from the writer thread after each block is processed. Must be fast and non-blocking. ### 6.3 Opaque Context Types ```c typedef struct zxc_cctx_s zxc_cctx; // Opaque compression context typedef struct zxc_dctx_s zxc_dctx; // Opaque decompression context ``` Internal layout is hidden. Interact only through `zxc_create_*` / `zxc_free_*` / `zxc_*_cctx` / `zxc_*_dctx` functions. ### 6.4 Sans-IO Types **Compression context** (public struct, for advanced use only): ```c typedef struct { uint32_t* hash_table; // LZ77 hash table uint16_t* chain_table; // Collision chain table void* memory_block; // Single allocation owner uint32_t epoch; // Lazy hash invalidation counter uint32_t* buf_sequences; // Packed sequence records uint8_t* buf_tokens; // Token buffer uint16_t* buf_offsets; // Offset buffer uint8_t* buf_extras; // Extra-length buffer uint8_t* literals; // Literal bytes uint8_t* lit_buffer; // Scratch buffer for RLE size_t lit_buffer_cap; uint8_t* work_buf; // Padded scratch for buffer-API decompression size_t work_buf_cap; int checksum_enabled; int compression_level; size_t chunk_size; // Effective block size uint32_t offset_bits; // log2(chunk_size) uint32_t offset_mask; // (1 << offset_bits) - 1 uint32_t max_epoch; // 1 << (32 - offset_bits) } zxc_cctx_t; ``` **Block header** (8 bytes on disk): ```c typedef struct { uint8_t block_type; // See FORMAT.md §4 uint8_t block_flags; uint8_t reserved; uint8_t header_crc; // 1-byte header CRC uint32_t comp_size; // Compressed payload size (excl. header) } zxc_block_header_t; ``` --- ## 7. Buffer API Declared in `zxc_buffer.h`. Single-threaded, blocking, in-memory operations. ### `zxc_compress_bound` ```c ZXC_EXPORT uint64_t zxc_compress_bound(size_t input_size); ``` Returns the worst-case compressed size for `input_size` bytes. Use to allocate the destination buffer before compression. ### `zxc_compress` ```c ZXC_EXPORT int64_t zxc_compress( const void* src, size_t src_size, void* dst, size_t dst_capacity, const zxc_compress_opts_t* opts // NULL = defaults ); ``` Compresses `src` into `dst`. Only `level`, `block_size`, and `checksum_enabled` fields of `opts` are used. `n_threads` is ignored (always single-threaded). **Returns**: compressed size (> 0) on success, or negative `zxc_error_t`. ### `zxc_decompress` ```c ZXC_EXPORT int64_t zxc_decompress( const void* src, size_t src_size, void* dst, size_t dst_capacity, const zxc_decompress_opts_t* opts // NULL = defaults ); ``` Decompresses `src` into `dst`. Only `checksum_enabled` is used. **Returns**: decompressed size (> 0) on success, or negative `zxc_error_t`. ### `zxc_get_decompressed_size` ```c ZXC_EXPORT uint64_t zxc_get_decompressed_size( const void* src, size_t src_size ); ``` Reads the original size from the file footer without decompressing. **Returns**: original size, or `0` if the buffer is invalid. --- ## 8. Reusable Context API Declared in `zxc_buffer.h`. Eliminates per-call allocation overhead for hot-path integrations (filesystem plug-ins, batch processing). ### `zxc_create_cctx` ```c ZXC_EXPORT zxc_cctx* zxc_create_cctx(const zxc_compress_opts_t* opts); ``` Creates a heap-allocated compression context. When `opts` is non-NULL, internal buffers are pre-allocated (eager init). When `opts` is NULL, allocation is deferred to first use (lazy init). **Returns**: context pointer, or `NULL` on allocation failure. ### `zxc_free_cctx` ```c ZXC_EXPORT void zxc_free_cctx(zxc_cctx* cctx); ``` Frees all resources. Safe to pass `NULL`. ### `zxc_compress_cctx` ```c ZXC_EXPORT int64_t zxc_compress_cctx( zxc_cctx* cctx, const void* src, size_t src_size, void* dst, size_t dst_capacity, const zxc_compress_opts_t* opts ); ``` Same as `zxc_compress()` but reuses internal buffers from `cctx`. Automatically re-initializes when `block_size` or `level` changes. ### `zxc_create_dctx` ```c ZXC_EXPORT zxc_dctx* zxc_create_dctx(void); ``` Creates a heap-allocated decompression context. ### `zxc_free_dctx` ```c ZXC_EXPORT void zxc_free_dctx(zxc_dctx* dctx); ``` Frees all resources. Safe to pass `NULL`. ### `zxc_decompress_dctx` ```c ZXC_EXPORT int64_t zxc_decompress_dctx( zxc_dctx* dctx, const void* src, size_t src_size, void* dst, size_t dst_capacity, const zxc_decompress_opts_t* opts ); ``` Same as `zxc_decompress()` but reuses buffers from `dctx`. --- ## 9. Streaming API Declared in `zxc_stream.h`. Multi-threaded, `FILE*`-based pipeline (reader → workers → writer). ### `zxc_stream_compress` ```c ZXC_EXPORT int64_t zxc_stream_compress( FILE* f_in, FILE* f_out, const zxc_compress_opts_t* opts ); ``` Compresses `f_in` → `f_out` using a parallel pipeline. All fields of `opts` are used, including `n_threads` and `progress_cb`. **Returns**: total compressed bytes written, or negative `zxc_error_t`. ### `zxc_stream_decompress` ```c ZXC_EXPORT int64_t zxc_stream_decompress( FILE* f_in, FILE* f_out, const zxc_decompress_opts_t* opts ); ``` Decompresses `f_in` → `f_out` using a parallel pipeline. **Returns**: total decompressed bytes written, or negative `zxc_error_t`. ### `zxc_stream_get_decompressed_size` ```c ZXC_EXPORT int64_t zxc_stream_get_decompressed_size(FILE* f_in); ``` Reads the original size from the file footer. File position is restored. **Returns**: original size, or negative `zxc_error_t`. --- ## 10. Sans-IO API Declared in `zxc_sans_io.h` (not included by `zxc.h` — opt-in). Low-level primitives for building custom compression drivers. ### `zxc_cctx_init` ```c ZXC_EXPORT int zxc_cctx_init( zxc_cctx_t* ctx, size_t chunk_size, int mode, // 1 = compression, 0 = decompression int level, int checksum_enabled ); ``` Allocates internal buffers sized for `chunk_size`. **Returns**: `ZXC_OK` or `ZXC_ERROR_MEMORY`. ### `zxc_cctx_free` ```c ZXC_EXPORT void zxc_cctx_free(zxc_cctx_t* ctx); ``` Frees internal buffers. Does **not** free `ctx` itself. ### `zxc_write_file_header` ```c ZXC_EXPORT int zxc_write_file_header( uint8_t* dst, size_t dst_capacity, size_t chunk_size, int has_checksum ); ``` Writes the 16-byte file header. **Returns**: bytes written, or `ZXC_ERROR_DST_TOO_SMALL`. ### `zxc_read_file_header` ```c ZXC_EXPORT int zxc_read_file_header( const uint8_t* src, size_t src_size, size_t* out_block_size, // optional int* out_has_checksum // optional ); ``` Validates the file header (magic, version, CRC16). **Returns**: `ZXC_OK`, or `ZXC_ERROR_BAD_MAGIC` / `ZXC_ERROR_BAD_VERSION` / `ZXC_ERROR_SRC_TOO_SMALL`. ### `zxc_write_block_header` ```c ZXC_EXPORT int zxc_write_block_header( uint8_t* dst, size_t dst_capacity, const zxc_block_header_t* bh ); ``` Serializes a block header (8 bytes, little-endian). **Returns**: bytes written, or `ZXC_ERROR_DST_TOO_SMALL`. ### `zxc_read_block_header` ```c ZXC_EXPORT int zxc_read_block_header( const uint8_t* src, size_t src_size, zxc_block_header_t* bh ); ``` Parses a block header (endianness conversion included). **Returns**: `ZXC_OK` or `ZXC_ERROR_SRC_TOO_SMALL`. ### `zxc_write_file_footer` ```c ZXC_EXPORT int zxc_write_file_footer( uint8_t* dst, size_t dst_capacity, uint64_t src_size, uint32_t global_hash, int checksum_enabled ); ``` Writes the 12-byte footer (original size + optional global hash). **Returns**: bytes written, or `ZXC_ERROR_DST_TOO_SMALL`. --- ## 11. Error Handling ### `zxc_error_name` ```c ZXC_EXPORT const char* zxc_error_name(int code); ``` Returns a constant, null-terminated string for any error code (e.g. `"ZXC_OK"`, `"ZXC_ERROR_MEMORY"`). Returns `"ZXC_UNKNOWN_ERROR"` for unrecognized codes. ### Pattern All functions that can fail use the same convention: ```c int64_t result = zxc_compress(src, src_size, dst, dst_cap, &opts); if (result < 0) { fprintf(stderr, "Error: %s\n", zxc_error_name((int)result)); } ``` --- ## 12. Thread Safety | API Layer | Safe to call concurrently? | Notes | |-----------|---------------------------|-------| | **Buffer API** | Yes (stateless) | Each call is self-contained. Multiple threads can compress/decompress simultaneously with independent buffers. | | **Context API** | Per-context | A single `zxc_cctx` / `zxc_dctx` must not be shared between threads. Create one context per thread. | | **Streaming API** | Per-call | Each `zxc_stream_*` call manages its own thread pool internally. Do not call from multiple threads on the same `FILE*`. | | **Sans-IO API** | Per-context | Same rule as context API — one `zxc_cctx_t` per thread. | | `zxc_error_name` | Yes | Returns a pointer to a static string. | --- ## 13. Exported Symbols Summary The shared library exports exactly **21 symbols** (verified with `nm -gU`): | # | Symbol | API Layer | Header | |---|--------|-----------|--------| | 1 | `zxc_compress_bound` | Buffer | `zxc_buffer.h` | | 2 | `zxc_compress` | Buffer | `zxc_buffer.h` | | 3 | `zxc_decompress` | Buffer | `zxc_buffer.h` | | 4 | `zxc_get_decompressed_size` | Buffer | `zxc_buffer.h` | | 5 | `zxc_create_cctx` | Context | `zxc_buffer.h` | | 6 | `zxc_free_cctx` | Context | `zxc_buffer.h` | | 7 | `zxc_compress_cctx` | Context | `zxc_buffer.h` | | 8 | `zxc_create_dctx` | Context | `zxc_buffer.h` | | 9 | `zxc_free_dctx` | Context | `zxc_buffer.h` | | 10 | `zxc_decompress_dctx` | Context | `zxc_buffer.h` | | 11 | `zxc_stream_compress` | Streaming | `zxc_stream.h` | | 12 | `zxc_stream_decompress` | Streaming | `zxc_stream.h` | | 13 | `zxc_stream_get_decompressed_size` | Streaming | `zxc_stream.h` | | 14 | `zxc_cctx_init` | Sans-IO | `zxc_sans_io.h` | | 15 | `zxc_cctx_free` | Sans-IO | `zxc_sans_io.h` | | 16 | `zxc_write_file_header` | Sans-IO | `zxc_sans_io.h` | | 17 | `zxc_read_file_header` | Sans-IO | `zxc_sans_io.h` | | 18 | `zxc_write_block_header` | Sans-IO | `zxc_sans_io.h` | | 19 | `zxc_read_block_header` | Sans-IO | `zxc_sans_io.h` | | 20 | `zxc_write_file_footer` | Sans-IO | `zxc_sans_io.h` | | 21 | `zxc_error_name` | Error | `zxc_error.h` | No internal symbols leak into the public ABI. FMV dispatch variants (`_default`, `_neon`, `_avx2`, `_avx512`) are compiled with `-fvisibility=hidden` and are not exported. zxc-0.9.1/docs/EXAMPLES.md000066400000000000000000000252731515574030100150270ustar00rootroot00000000000000# ZXC API Examples This document provides complete, working examples for using the ZXC compression library in C. ## Table of Contents - [Buffer API (In-Memory)](#buffer-api-in-memory) - [Stream API (Multi-Threaded)](#stream-api-multi-threaded) - [Reusable Context API](#reusable-context-api) - [Language Bindings](#language-bindings) --- ## Buffer API (In-Memory) Ideal for small assets or simple integrations. Thread-safe and ready for highly concurrent environments (Go routines, Node.js workers, Python threads). ```c #include "zxc.h" #include #include #include int main(void) { // Original data to compress const char* original = "Hello, ZXC! This is a sample text for compression."; size_t original_size = strlen(original) + 1; // Include null terminator // Step 1: Calculate maximum compressed size uint64_t max_compressed_size = zxc_compress_bound(original_size); // Step 2: Allocate buffers void* compressed = malloc(max_compressed_size); void* decompressed = malloc(original_size); if (!compressed || !decompressed) { fprintf(stderr, "Memory allocation failed\n"); free(compressed); free(decompressed); return 1; } // Step 3: Compress data (level 3, checksum enabled, default block size) zxc_compress_opts_t c_opts = { .level = ZXC_LEVEL_DEFAULT, .checksum_enabled = 1, /* .block_size = 0 -> 256 KB default */ }; int64_t compressed_size = zxc_compress( original, // Source buffer original_size, // Source size compressed, // Destination buffer max_compressed_size, // Destination capacity &c_opts // Options (NULL = all defaults) ); if (compressed_size <= 0) { fprintf(stderr, "Compression failed (error %lld)\n", (long long)compressed_size); free(compressed); free(decompressed); return 1; } printf("Original size: %zu bytes\n", original_size); printf("Compressed size: %lld bytes (%.1f%% ratio)\n", (long long)compressed_size, 100.0 * (double)compressed_size / (double)original_size); // Step 4: Decompress data (checksum verification enabled) zxc_decompress_opts_t d_opts = { .checksum_enabled = 1 }; int64_t decompressed_size = zxc_decompress( compressed, // Source buffer (size_t)compressed_size, decompressed, // Destination buffer original_size, // Destination capacity &d_opts // Options (NULL = all defaults) ); if (decompressed_size <= 0) { fprintf(stderr, "Decompression failed (error %lld)\n", (long long)decompressed_size); free(compressed); free(decompressed); return 1; } // Step 5: Verify integrity if ((size_t)decompressed_size == original_size && memcmp(original, decompressed, original_size) == 0) { printf("Success! Data integrity verified.\n"); printf("Decompressed: %s\n", (char*)decompressed); } else { fprintf(stderr, "Data mismatch after decompression\n"); } // Cleanup free(compressed); free(decompressed); return 0; } ``` **Compilation:** ```bash gcc -o buffer_example buffer_example.c -I include -L build -lzxc_lib ``` --- ## Stream API (Multi-Threaded) For large files, use the streaming API to process data in parallel chunks. ```c #include "zxc.h" #include #include int main(int argc, char* argv[]) { if (argc != 4) { fprintf(stderr, "Usage: %s \n", argv[0]); return 1; } const char* input_path = argv[1]; const char* compressed_path = argv[2]; const char* output_path = argv[3]; /* ------------------------------------------------------------------ */ /* Step 1: Compress */ /* ------------------------------------------------------------------ */ printf("Compressing '%s' to '%s'...\n", input_path, compressed_path); FILE* f_in = fopen(input_path, "rb"); if (!f_in) { fprintf(stderr, "Error: cannot open '%s'\n", input_path); return 1; } FILE* f_out = fopen(compressed_path, "wb"); if (!f_out) { fprintf(stderr, "Error: cannot create '%s'\n", compressed_path); fclose(f_in); return 1; } // 0 threads = auto-detect CPU cores; 0 block_size = 256 KB default zxc_compress_opts_t c_opts = { .n_threads = 0, .level = ZXC_LEVEL_DEFAULT, .checksum_enabled = 1, /* .block_size = 0 -> 256 KB */ }; int64_t compressed_bytes = zxc_stream_compress(f_in, f_out, &c_opts); fclose(f_in); fclose(f_out); if (compressed_bytes < 0) { fprintf(stderr, "Compression failed (error %lld)\n", (long long)compressed_bytes); return 1; } printf("Compression complete: %lld bytes written\n", (long long)compressed_bytes); /* ------------------------------------------------------------------ */ /* Step 2: Decompress */ /* ------------------------------------------------------------------ */ printf("\nDecompressing '%s' to '%s'...\n", compressed_path, output_path); FILE* f_compressed = fopen(compressed_path, "rb"); if (!f_compressed) { fprintf(stderr, "Error: cannot open '%s'\n", compressed_path); return 1; } FILE* f_decompressed = fopen(output_path, "wb"); if (!f_decompressed) { fprintf(stderr, "Error: cannot create '%s'\n", output_path); fclose(f_compressed); return 1; } zxc_decompress_opts_t d_opts = { .n_threads = 0, .checksum_enabled = 1 }; int64_t decompressed_bytes = zxc_stream_decompress(f_compressed, f_decompressed, &d_opts); fclose(f_compressed); fclose(f_decompressed); if (decompressed_bytes < 0) { fprintf(stderr, "Decompression failed (error %lld)\n", (long long)decompressed_bytes); return 1; } printf("Decompression complete: %lld bytes written\n", (long long)decompressed_bytes); printf("\nSuccess! Verify the output file matches the original.\n"); return 0; } ``` **Compilation:** ```bash gcc -o stream_example stream_example.c -I include -L build -lzxc_lib -lpthread -lm ``` **Usage:** ```bash ./stream_example large_file.bin compressed.xc decompressed.bin ``` **Features demonstrated:** - Multi-threaded parallel processing (auto-detects CPU cores) - Checksum validation for data integrity - Error handling for file operations --- ## Reusable Context API For tight loops — such as filesystem plug-ins (squashfs, dwarfs, erofs) — where allocating and freeing internal buffers on every call would add latency, ZXC provides opaque reusable contexts. Internal buffers are only reallocated when the **block size changes**, so the common case (same block size, different data) is completely allocation-free. Options are **sticky**: settings passed via `opts` to `zxc_create_cctx()` or `zxc_compress_cctx()` are remembered and reused on subsequent calls where `opts` is `NULL`. ```c #include "zxc.h" #include #include #include #define BLOCK_SIZE (64 * 1024) // 64 KB — typical squashfs block size #define NUM_BLOCKS 32 int main(void) { // Create context with sticky options (level 3, no checksum, 64 KB blocks). // These settings are remembered for all subsequent calls. zxc_compress_opts_t create_opts = { .level = 3, .checksum_enabled = 0, .block_size = BLOCK_SIZE, }; zxc_cctx* cctx = zxc_create_cctx(&create_opts); zxc_dctx* dctx = zxc_create_dctx(); if (!cctx || !dctx) { fprintf(stderr, "Context creation failed\n"); zxc_free_cctx(cctx); zxc_free_dctx(dctx); return 1; } const size_t comp_cap = (size_t)zxc_compress_bound(BLOCK_SIZE); uint8_t* comp = malloc(comp_cap); uint8_t* src = malloc(BLOCK_SIZE); uint8_t* dec = malloc(BLOCK_SIZE); for (int i = 0; i < NUM_BLOCKS; i++) { // Fill block with pseudo-random data for (size_t j = 0; j < BLOCK_SIZE; j++) src[j] = (uint8_t)(i ^ j); // Compress — pass NULL to reuse sticky settings from create_opts. // No malloc/free inside, no need to pass opts again. int64_t csz = zxc_compress_cctx(cctx, src, BLOCK_SIZE, comp, comp_cap, NULL); if (csz <= 0) { fprintf(stderr, "Block %d: compress error %lld\n", i, (long long)csz); break; } // Decompress — no malloc/free inside int64_t dsz = zxc_decompress_dctx(dctx, comp, (size_t)csz, dec, BLOCK_SIZE, NULL); if (dsz != BLOCK_SIZE || memcmp(src, dec, BLOCK_SIZE) != 0) { fprintf(stderr, "Block %d: roundtrip mismatch\n", i); break; } } // Override sticky settings for a single call (e.g. switch to level 5). // This also updates the sticky settings for future NULL calls. zxc_compress_opts_t high_opts = { .level = 5 }; int64_t csz = zxc_compress_cctx(cctx, src, BLOCK_SIZE, comp, comp_cap, &high_opts); printf("Level-5 compressed: %lld bytes\n", (long long)csz); printf("Processed %d blocks of %d KB — no per-block allocation.\n", NUM_BLOCKS, BLOCK_SIZE / 1024); zxc_free_cctx(cctx); zxc_free_dctx(dctx); free(src); free(comp); free(dec); return 0; } ``` **Compilation:** ```bash gcc -o ctx_example ctx_example.c -I include -L build -lzxc_lib ``` --- ## Writing Your Own Streaming Driver The streaming multi-threaded API shown above is the default provided driver. However, ZXC is written in a **"sans-IO" style** that separates compute from I/O and multitasking. This allows you to write your own driver in any language of your choice, using the native I/O and multitasking capabilities of that language. To implement a custom driver: 1. Include the extra public header `zxc_sans_io.h` 2. Study the reference implementation in `src/lib/zxc_driver.c` 3. Implement your own I/O and threading logic --- ## Language Bindings For non-C languages, see the official bindings: | Language | Package | Install Command | Documentation | |----------|---------|-----------------|---------------| | **Rust** | [`crates.io`](https://crates.io/crates/zxc-compress) | `cargo add zxc-compress` | [README](../wrappers/rust/zxc/README.md) | | **Python**| [`PyPI`](https://pypi.org/project/zxc-compress) | `pip install zxc-compress` | [README](../wrappers/python/README.md) | | **Node.js**| [`npm`](https://www.npmjs.com/package/zxc-compress) | `npm install zxc-compress` | [README](../wrappers/nodejs/README.md) | Community-maintained: - **Go**: https://github.com/meysam81/go-zxc zxc-0.9.1/docs/FORMAT.md000066400000000000000000000277701515574030100146050ustar00rootroot00000000000000# ZXC Compressed File Format (Technical Specification) **Date**: March 2026 **Format Version**: 5 This document describes the on-disk binary format of a ZXC compressed file. It formalizes the current reference implementation of format version **5**. ## 1. Conventions - **Byte order**: all multi-byte integers are **little-endian**. - **Unit**: offsets are in bytes, zero-based from the start of each structure. - **Checksum mode**: enabled globally by a flag in the file header. - **Block model**: a file is a sequence of blocks terminated by an EOF block, then a footer. --- ## 2. Full File Layout ```text +----------------------+ 16 bytes | File Header | +----------------------+ | Block #0 | | - 8B Block Header | | - Block Payload | | - Optional 4B CRC32 | +----------------------+ | Block #1 | | ... | +----------------------+ | EOF Block | 8 bytes (type=255, comp_size=0) +----------------------+ | File Footer | 12 bytes +----------------------+ ``` --- ## 3. File Header (16 bytes) ```text Offset Size Field 0x00 4 Magic Word 0x04 1 Format Version 0x05 1 Chunk Size Code 0x06 1 Flags 0x07 7 Reserved (must be 0) 0x0E 2 Header CRC16 ``` ### 3.1 Field definitions - **Magic Word** (`u32`): `0x9CB02EF5`. - **Format Version** (`u8`): currently `5`. - **Chunk Size Code** (`u8`): - If the value is in the range `[12, 21]`, it is an **exponent**: `block_size = 2^code`. - `12` = 4 KB, `13` = 8 KB, ..., `18` = 256 KB (default), ..., `21` = 2 MB. - The legacy value `64` (from older encoders) is accepted and maps to 256 KB (default). - All other values are rejected (`ZXC_ERROR_BAD_BLOCK_SIZE`). - Valid block sizes are powers of 2 in the range **4 KB – 2 MB**. - **Flags** (`u8`): - Bit 7 (`0x80`): `HAS_CHECKSUM`. - Bits 0..3: checksum algorithm id (`0` = RapidHash-based folding). - Bits 4..6: reserved. - **Reserved**: 7 bytes set to zero. - **Header CRC16** (`u16`): computed with `zxc_hash16` on the 16-byte header where bytes `0x0E..0x0F` are zeroed. --- ## 4. Generic Block Container Each block starts with a fixed 8-byte block header. ```text Offset Size Field 0x00 1 Block Type 0x01 1 Block Flags 0x02 1 Reserved 0x03 4 Compressed Payload Size (comp_size) 0x07 1 Header CRC8 ``` ### 4.1 Header semantics - **Block Type**: - `0` = RAW - `1` = GLO - `2` = NUM - `3` = GHI - `255` = EOF - **Block Flags**: currently not used by implementation (written as `0`). - **Reserved**: must be 0. - **comp_size**: payload size in bytes (does **not** include the optional trailing 4-byte block checksum). - **Header CRC8**: `zxc_hash8` over the 8-byte header with byte `0x07` forced to zero before hashing. ### 4.2 Block physical layout ```text [8B Block Header] + [comp_size bytes payload] + [optional 4B checksum] ``` When checksums are enabled at file level, each non-EOF block carries one trailing 4-byte checksum of its compressed payload. --- ## 5. Block Types and Payload Formats ## 5.1 RAW block (`type=0`) Payload is uncompressed data. ```text Payload = raw bytes raw_size = comp_size ``` No internal sub-header. --- ## 5.2 NUM block (`type=2`) Used for numeric data (32-bit integer stream), delta/zigzag + bitpacking. ### NUM payload layout ```text +--------------------------+ | NUM Header (16 bytes) | +--------------------------+ | Frame #0 header (16B) | | Frame #0 packed bits | +--------------------------+ | Frame #1 header (16B) | | Frame #1 packed bits | +--------------------------+ | ... | +--------------------------+ ``` ### NUM Header (16 bytes) ```text Offset Size Field 0x00 8 n_values (u64) 0x08 2 frame_size (u16, currently 128) 0x0A 6 reserved ``` ### NUM frame record (repeated) ```text Offset Size Field 0x00 2 nvals in frame (u16) 0x02 2 bits per value (u16) 0x04 8 base/running seed (u64) 0x0C 4 packed_size in bytes (u32) 0x10 ... packed delta bitstream ``` Notes: - Values are reconstructed by bit-unpacking, zigzag decode, then prefix accumulation. - `packed_size` bytes immediately follow each 16-byte frame header. --- ## 5.3 GLO block (`type=1`) General LZ-style format with separated streams. ### GLO payload layout ```text +-------------------------------+ | GLO Header (16 bytes) | +-------------------------------+ | 4 Section Descriptors (32B) | +-------------------------------+ | Literals stream | +-------------------------------+ | Tokens stream | +-------------------------------+ | Offsets stream | +-------------------------------+ | Extras stream | +-------------------------------+ ``` ### GLO Header (16 bytes) ```text Offset Size Field 0x00 4 n_sequences (u32) 0x04 4 n_literals (u32) 0x08 1 enc_lit (0=RAW, 1=RLE) 0x09 1 enc_litlen (reserved) 0x0A 1 enc_mlen (reserved) 0x0B 1 enc_off (0=16-bit offsets, 1=8-bit offsets) 0x0C 4 reserved ``` ### GLO section descriptors (4 × 8 bytes) Descriptor format (packed `u64`): - low 32 bits: compressed size - high 32 bits: raw size Section order: 1. Literals 2. Tokens 3. Offsets 4. Extras ### GLO stream content - **Literals stream**: - raw literal bytes, or - RLE tokenized if `enc_lit=1`. - **Tokens stream**: - one byte per sequence: `(LL << 4) | ML`. - `LL` and `ML` are 4-bit fields. - **Offsets stream**: - `n_sequences × 1` byte if `enc_off=1`, else `n_sequences × 2` bytes LE. - Values are **biased**: stored value = `actual_offset - 1`. Decoder adds `+ 1`. - This makes `offset == 0` impossible by construction (minimum decoded offset = 1). - **Extras stream**: - Prefix-varint overflow values for token saturations: - if `LL == 15`, read varint and add to LL - if `ML == 15`, read varint and add to ML - actual match length is `ML + 5` (minimum match = 5). --- ## 5.4 GHI block (`type=3`) High-throughput LZ format with packed 32-bit sequences. ### GHI payload layout ```text +-------------------------------+ | GHI Header (16 bytes) | +-------------------------------+ | 3 Section Descriptors (24B) | +-------------------------------+ | Literals stream | +-------------------------------+ | Sequences stream (N * 4B) | +-------------------------------+ | Extras stream | +-------------------------------+ ``` ### GHI Header (16 bytes) Same binary layout as GLO header: - `n_sequences`, `n_literals`, `enc_lit`, `enc_litlen`, `enc_mlen`, `enc_off`, reserved. In practice for GHI: - `enc_lit = 0` (raw literals) - `enc_off` is metadata (sequence words always store 16-bit offsets) ### GHI section descriptors (3 × 8 bytes) Section order: 1. Literals 2. Sequences 3. Extras Each descriptor uses the same packed size encoding as GLO (`u64`: comp32|raw32). ### GHI sequence word format (32 bits) ```text Bits 31..24 : LL (literal length, 8 bits) Bits 23..16 : ML (match length minus 5, 8 bits) Bits 15..0 : Offset - 1 (16 bits, biased; decode: stored + 1) ``` Memory order (little-endian word): ```text byte0 = offset low byte1 = offset high byte2 = ML byte3 = LL ``` Overflow rules: - if `LL == 255`, read varint from Extras and add it to LL. - if `ML == 255`, read varint, then add minimum match (`+5`). - otherwise decoded match length is `ML + 5`. --- ## 5.5 EOF block (`type=255`) EOF marks end of block stream. Constraints: - block header is present (8 bytes) - `comp_size` **must be 0** - no payload - no per-block trailing checksum Immediately after EOF block header comes the 12-byte file footer. --- ## 6. Prefix Varint (Extras stream) ZXC extras use a prefix-length varint. Length is encoded in unary form in the high bits of first byte: - `0xxxxxxx` → 1 byte total - `10xxxxxx` → 2 bytes total - `110xxxxx` → 3 bytes total - `1110xxxx` → 4 bytes total - `11110xxx` → 5 bytes total Payload bits from following bytes are concatenated little-endian style (low bits first). Used by GLO/GHI to carry LL/ML overflows beyond token/sequence inline limits. --- ## 7. Checksums and Integrity ## 7.1 Header checksums - File header: 16-bit (`zxc_hash16`). - Block header: 8-bit (`zxc_hash8`). These protect metadata/navigation fields. ## 7.2 Per-block checksum (optional) When file header has `HAS_CHECKSUM=1`: - each data block appends a 4-byte checksum after payload. - checksum input is **compressed payload bytes only** (not block header). - algorithm id currently `0` (RapidHash folded to 32-bit). ## 7.3 Global stream hash A rolling global hash is maintained from per-block checksums in stream order: ```text global = 0 for each data block checksum b: global = ((global << 1) | (global >> 31)) XOR b ``` This value is stored in the file footer (or zeroed when checksum mode is disabled). --- ## 8. File Footer (12 bytes) Footer is mandatory and placed immediately after EOF block header. ```text Offset Size Field 0x00 8 original_source_size (u64) 0x08 4 global_hash (u32) ``` - **original_source_size**: full uncompressed size of the file. - **global_hash**: - valid when checksum mode is active; - set to zero when checksum mode is disabled. --- ## 9. Decoder Validation Checklist (Practical) 1. Validate file header magic/version/CRC16. 2. Parse blocks sequentially: - validate block header CRC8, - check block bounds using `comp_size`, - if enabled, verify trailing block checksum. 3. Decode payload according to block type. 4. On EOF: - require `comp_size == 0`, - read footer, - compare footer `original_source_size` with produced output size, - if enabled, compare footer `global_hash` with recomputed rolling hash. --- ## 10. Summary of Useful Fixed Sizes - File header: **16** bytes - Block header: **8** bytes - Block checksum (optional): **4** bytes - NUM header: **16** bytes - GLO header: **16** bytes - GHI header: **16** bytes - Section descriptor: **8** bytes - GLO descriptors total: **32** bytes - GHI descriptors total: **24** bytes - File footer: **12** bytes --- ## 11. Worked Example (Real Hexdump) This example was produced with the CLI from a 10-byte input (`Hello ZXC\n`) using: ```bash zxc -z -C -1 sample.txt ``` Generated archive size: **58 bytes**. ### 11.1 Full hexdump ```text 00000000: F5 2E B0 9C 05 12 80 00 00 00 00 00 00 00 9E 53 00000010: 00 00 00 0A 00 00 00 69 48 65 6C 6C 6F 20 5A 58 00000020: 43 0A 90 BB A1 75 FF 00 00 00 00 00 00 02 0A 00 00000030: 00 00 00 00 00 00 90 BB A1 75 ``` ### 11.2 Byte-level decoding #### A) File Header (offset `0x00`, 16 bytes) ```text F5 2E B0 9C | 05 | 12 | 80 | 00 00 00 00 00 00 00 | 9E 53 ``` - `F5 2E B0 9C` → magic word (LE) = `0x9CB02EF5`. - `05` → format version 5. - `12` → chunk-size code 18 (exponent encoding: `2^18 = 262144` bytes, i.e. 256 KiB). - `80` → checksum enabled (`HAS_CHECKSUM=1`, algo id 0). - next 7 bytes are reserved zeros. - `9E 53` → header CRC16. #### B) Data Block #0 (RAW) Block header at offset `0x10`: ```text 00 | 00 | 00 | 0A 00 00 00 | 69 ``` - type `00` = RAW. - flags `00`, reserved `00`. - `comp_size = 0x0000000A = 10` bytes. - header CRC8 = `0x69`. Payload at `0x18..0x21` (10 bytes): ```text 48 65 6C 6C 6F 20 5A 58 43 0A ``` ASCII: `Hello ZXC\n`. Trailing block checksum at `0x22..0x25`: ```text 90 BB A1 75 ``` LE value: `0x75A1BB90`. #### C) EOF Block (offset `0x26`, 8 bytes) ```text FF | 00 | 00 | 00 00 00 00 | 02 ``` - type `FF` = EOF. - `comp_size = 0` (mandatory). - header CRC8 = `0x02`. #### D) File Footer (offset `0x2E`, 12 bytes) ```text 0A 00 00 00 00 00 00 00 | 90 BB A1 75 ``` - original source size = `10` bytes. - global hash = `0x75A1BB90`. Since there is exactly one data block, the global hash equals that block checksum: ```text global0 = 0 global1 = rotl1(global0) XOR block_crc = block_crc ``` ### 11.3 Structural view with absolute offsets ```text 0x00..0x0F File Header (16) 0x10..0x17 RAW Block Header (8) 0x18..0x21 RAW Payload (10) 0x22..0x25 RAW Block Checksum (4) 0x26..0x2D EOF Block Header (8) 0x2E..0x39 File Footer (12) ``` zxc-0.9.1/docs/WHITEPAPER.md000066400000000000000000001172461515574030100152630ustar00rootroot00000000000000# ZXC: High-Performance Asymmetric Lossless Compression **Version**: 0.9.0 **Date**: March 2026 **Author**: Bertrand Lebonnois --- ## 1. Executive Summary In modern software delivery pipelines—specifically **Mobile Gaming**, **Embedded Systems**, and **FOTA (Firmware Over-The-Air)**—data is typically generated on high-performance x86 workstations but consumed on energy-constrained ARM devices. Standard industry codecs like LZ4 offer excellent performance but fail to exploit the "Write-Once, Read-Many" (WORM) nature of these pipelines. **ZXC** is a lossless codec designed to bridge this gap. By utilizing an **asymmetric compression model**, ZXC achieves a **>40% increase in decompression speed on ARM** compared to LZ4, while simultaneously reducing storage footprints. On x86 development architecture, ZXC maintains competitive throughput, ensuring no disruption to build pipelines. ## 2. The Efficiency Gap The industry standard, LZ4, prioritizes symmetric speed (fast compression and fast decompression). While ideal for real-time logs or RAM swapping, this symmetry is useless for asset distribution. * **Wasted Cycles**: CPU cycles saved during the single compression event (on a build server) do not benefit the millions of end-users decoding the data. * **The Battery Tax**: On mobile devices, slower decompression keeps the CPU active longer, draining battery and generating heat. ## 3. The ZXC Solution ZXC utilizes a computationally intensive encoder to generate a bitstream specifically structured to **maximize decompression throughput**. By performing heavy analysis upfront, the encoder produces a layout optimized for the instruction pipelining and branch prediction capabilities of modern CPUs, particularly ARMv8, effectively offloading complexity from the decoder to the encoder. ### 3.1 Asymmetric Pipeline ZXC employs a Producer-Consumer architecture to decouple I/O operations from CPU-intensive tasks. This allows for parallel processing where input reading, compression/decompression, and output writing occur simultaneously, effectively hiding I/O latency. ### 3.2 Modular Architecture The ZXC file format is inherently modular. **Each block is independent and can be encoded and decoded using the algorithm best suited** for that specific data type. This flexibility allows the format to evolve and incorporate new compression strategies without breaking backward compatibility. ## 4. Core Algorithms ZXC utilizes a hybrid approach combining LZ77 (Lempel-Ziv) dictionary matching with advanced entropy coding and specialized data transforms. ### 4.1 LZ77 Engine The heart of ZXC is a heavily optimized LZ77 engine that adapts its behavior based on the requested compression level: * **Hash Chain & Collision Resolution**: Uses a fast hash table with chaining to find matches in the history window (configurable sliding window, power-of-2 from 4 KB to 2 MB, default 256 KB). * **Lazy Matching**: Implements a "lookahead" strategy to find better matches at the cost of slight encoding speed, significantly improving decompression density. ### 4.2 Specialized SIMD Acceleration & Hardware Hashing ZXC leverages modern instruction sets to maximize throughput on both ARM and x86 architectures. * **ARM NEON Optimization**: Extensive usage of vld1q_u8 (vector load) and vceqq_u8 (parallel comparison) allows scanning data at wire speed, while vminvq_u8 provides fast rejection of non-matches. * **x86 Vectorization**: Maintains high performance on Intel/AMD platforms via dedicated AVX2 and AVX512 paths (falling back to SSE4.1 on older hardware), ensuring parity with ARM throughput. * **High-Speed Integrity**: Block validation relies on **rapidhash**, a modern non-cryptographic hash algorithm that fully exploits hardware acceleration to verify data integrity without bottlenecking the decompression pipeline. ### 4.3 Entropy Coding & Bitpacking * **RLE (Run-Length Encoding)**: Automatically detects runs of identical bytes. * **Prefix Varint Encoding**: Variable-length integer encoding (similar to LEB128 but prefix-based) for overflow values. * **Bit-Packing**: Compressed sequences are packed into dedicated streams using minimal bit widths. #### Prefix Varint Format ZXC uses a **Prefix Varint** encoding for overflow values. Unlike standard VByte (which uses a continuation bit in every byte), Prefix Varint encodes the total length of the integer in the **unary prefix of the first byte**. This allows the decoder to determine the sequence length immediately, enabling branchless or highly predictable decoding without serial dependencies. **Encoding Scheme:** | Prefix (Binary) | Total Bytes | Data Bits (1st Byte) | Total Data Bits | Range (Value < X) | |-----------------|-------------|----------------------|-----------------|-------------------| | `0xxxxxxx` | 1 | 7 | 7 | 128 | | `10xxxxxx` | 2 | 6 | 14 (6+8) | 16,384 | | `110xxxxx` | 3 | 5 | 21 (5+8+8) | 2,097,152 | | `1110xxxx` | 4 | 4 | 28 (4+8+8+8) | 268,435,456 | | `11110xxx` | 5 | 3 | 35 (3+8+8+8+8) | 34,359,738,368 | **Example**: Encoding value `300` (binary: `100101100`): ```text Value 300 > 127 and < 16383 -> Uses 2-byte format (Prefix '10'). Step 1: Low 6 bits 300 & 0x3F = 44 (0x2C, binary 101100) Byte 1 = Prefix '10' | 101100 = 10101100 (0xAC) Step 2: Remaining high bits 300 >> 6 = 4 (0x04, binary 00000100) Byte 2 = 0x04 Result: 0xAC 0x04 Decoding Verification: Byte 1 (0xAC) & 0x3F = 44 Byte 2 (0x04) << 6 = 256 Total = 256 + 44 = 300 ``` ## 5. File Format Specification The ZXC file format is block-based, robust, and designed for parallel processing. ### 5.1 Global Structure (File Header) The file begins with a **16-byte** header that identifies the format and specifies decompression parameters. **FILE Header (16 bytes):** ``` Offset: 0 4 5 6 7 14 16 +---------------+-------+-------+-------+-----------------------+-------+ | Magic Word | Ver | Chunk | Flags | Reserved | CRC | | (4 bytes) | (1B) | (1B) | (1B) | (7 bytes, must be 0) | (2B) | +---------------+-------+-------+-------+-----------------------+-------+ ``` * **Magic Word (4 bytes)**: `0x9 0xCB 0x02E 0xF5`. * **Version (1 byte)**: Current version is `5`. * **Chunk Size Code (1 byte)**: Defines the processing block size using **exponent encoding**: - If the value is in `[12, 21]`: block size = `2^value` bytes (4 KB to 2 MB). - `12` = 4 KB, `13` = 8 KB, `14` = 16 KB, `15` = 32 KB, `16` = 64 KB, `17` = 128 KB, `18` = 256 KB (default), `19` = 512 KB, `20` = 1 MB, `21` = 2 MB. - Legacy value `64` is accepted for backward compatibility (maps to 256 KB). - Block sizes must be powers of 2. * **Flags (1 byte)**: Global configuration flags. - **Bit 7 (MSB)**: `HAS_CHECKSUM`. If `1`, checksums are enabled for the stream. Every block will carry a trailing 4-byte checksum, and the footer will contain a global checksum. If `0`, no checksums are present. - **Bits 4-6**: Reserved. - **Bits 0-3**: Checksum Algorithm ID (e.g., `0` = RapidHash). * **Reserved (7 bytes)**: Reserved for future use (must be 0). * **CRC (2 bytes)**: 16-bit Header Checksum. Calculated on the 16-byte header (with CRC bytes set to 0) using `zxc_hash16`. ### 5.2 Block Header Structure Each data block consists of an **8-byte** generic header that precedes the specific payload. This header allows the decoder to navigate the stream and identify the processing method required for the next chunk of data. **BLOCK Header (8 bytes):** ``` Offset: 0 1 2 3 7 8 +-------+-------+-------+-----------------------+-------+ | Type | Flags | Rsrvd | Comp Size | CRC | | (1B) | (1B) | (1B) | (4 bytes) | (1B) | +-------+-------+-------+-----------------------+-------+ Block Layout: [ Header (8B) ] + [ Compressed Payload (Comp Size bytes) ] + [ Optional Checksum (4B) ] ``` **Note**: The Checksum (if enabled in File Header) is **4 bytes** (32-bit), is always located **at the end** of the compressed data, and is calculated **on the compressed payload**. * **Type**: Block encoding type (0=RAW, 1=GLO, 2=NUM, 3=GHI, 255=EOF). * **Flags**: Not used for now. * **Rsrvd**: Reserved for future use (must be 0). * **Comp Size**: Compressed payload size (excluding header and optional checksum). * **CRC**: 1-byte Header Checksum (located at the end of the header). Calculated on the 8-byte header (with CRC byte set to 0) using `zxc_hash8`. > **Note**: The decompressed size is not stored in the block header. It is derived from internal Section Descriptors within the compressed payload (for GLO/GHI blocks), from the NUM header (for NUM blocks), or equals `Comp Size` (for RAW blocks). > **Note**: While the format is designed for threaded execution, a single-threaded API is also available for constrained environments or simple integration cases. ### 5.3 Specific Header: NUM (Numeric) (Present immediately after the Block Header) **NUM Header (16 bytes):** ``` Offset: 0 8 10 16 +-------------------------------+-------+-------------------------+ | N Values | Frame | Reserved | | (8 bytes) | (2B) | (6 bytes) | +-------------------------------+-------+-------------------------+ ``` * **N Values**: Total count of integers encoded in the block. * **Frame**: Processing window size (currently always 128). * **Reserved**: Padding for alignment. ### 5.4 Specific Header: GLO (Generic Low) (Present immediately after the Block Header) **GLO Header (16 bytes):** ``` Offset: 0 4 8 9 10 11 12 16 +---------------+---------------+---+---+---+---+---------------+ | N Sequences | N Literals |Lit|LL |ML |Off| Reserved | | (4 bytes) | (4 bytes) |Enc|Enc|Enc|Enc| (4 bytes) | +---------------+---------------+---+---+---+---+---------------+ ``` * **N Sequences**: Total count of LZ sequences in the block. * **N Literals**: Total count of literal bytes. * **Encoding Types** - `Lit Enc`: Literal stream encoding (0=RAW, 1=RLE). **Currently used.** - `LL Enc`: Literal lengths encoding. **Reserved for future use** (lengths are packed in tokens). - `ML Enc`: Match lengths encoding. **Reserved for future use** (lengths are packed in tokens). - `Off Enc`: Offset encoding mode. **Currently used** - `0` = 16-bit offsets (2 bytes each, max distance 65535) - `1` = 8-bit offsets (1 byte each, max distance 255) * **Reserved**: Padding for alignment. **Section Descriptors (4 × 8 bytes = 32 bytes total):** Each descriptor stores sizes as a packed 64-bit value: ``` Single Descriptor (8 bytes): +-----------------------------------+-----------------------------------+ | Compressed Size (4 bytes) | Raw Size (4 bytes) | | (low 32 bits) | (high 32 bits) | +-----------------------------------+-----------------------------------+ Full Layout (32 bytes): Offset: 0 8 16 24 32 +---------------+---------------+---------------+---------------+ | Literals Desc | Tokens Desc | Offsets Desc | Extras Desc | | (8 bytes) | (8 bytes) | (8 bytes) | (8 bytes) | +---------------+---------------+---------------+---------------+ ``` **Section Contents:** | # | Section | Description | |---|-------------|-------------------------------------------------------| | 0 | **Literals**| Raw bytes to copy, or RLE-compressed if `enc_lit=1` | | 1 | **Tokens** | Packed bytes: `(LiteralLen << 4) \| MatchLen` | | 2 | **Offsets** | Match distances: 8-bit if `enc_off=1`, else 16-bit LE | | 3 | **Extras** | Prefix Varint overflow values when LitLen or MatchLen ≥ 15 | **Data Flow Example:** ``` GLO Block Data Layout: +------------------------------------------------------------------------+ | Literals Stream | Tokens Stream | Offsets Stream | Extras Stream | | (desc[0] bytes) | (desc[1] bytes)| (desc[2] bytes)| (desc[3] bytes) | +------------------------------------------------------------------------+ ↓ ↓ ↓ ↓ Raw bytes Token parsing Match lookup Length overflow ``` **Why Comp Size and Raw Size?** Each descriptor stores both a compressed and raw size to support secondary encoding of streams: | Section | Comp Size | Raw Size | Different? | |-------------|----------------------|---------------------|----------------------| | **Literals**| RLE size (if used) | Original byte count | Yes, if RLE enabled | | **Tokens** | Stream size | Stream size | No | | **Offsets** | N×1 or N×2 bytes | N×1 or N×2 bytes | No (size depends on `enc_off`) | | **Extras** | Prefix Varint stream size | Prefix Varint stream size | No | Currently, the **Literals** section uses different sizes when RLE compression is applied (`enc_lit=1`). The **Offsets** section size depends on `enc_off`: N sequences × 1 byte (if `enc_off=1`) or N sequences × 2 bytes (if `enc_off=0`). > **Design Note**: This format is designed for future extensibility. The dual-size architecture allows adding entropy coding (FSE/ANS) or bitpacking to any stream without breaking backward compatibility. ### 5.5 Specific Header: GHI (Generic High) (Present immediately after the Block Header) The **GHI** (Generic High-Velocity) block format is optimized for maximum decompression speed. It uses a **packed 32-bit sequence** format that allows 4-byte aligned reads, reducing memory access latency and enabling efficient SIMD processing. **GHI Header (16 bytes):** ``` Offset: 0 4 8 9 10 11 12 16 +---------------+---------------+---+---+---+---+---------------+ | N Sequences | N Literals |Lit|LL |ML |Off| Reserved | | (4 bytes) | (4 bytes) |Enc|Enc|Enc|Enc| (4 bytes) | +---------------+---------------+---+---+---+---+---------------+ ``` * **N Sequences**: Total count of LZ sequences in the block. * **N Literals**: Total count of literal bytes. * **Encoding Types** - `Lit Enc`: Literal stream encoding (0=RAW). - `LL Enc`: Reserved for future use. - `ML Enc`: Reserved for future use. - `Off Enc`: Offset encoding mode: - `0` = 16-bit offsets (max distance 65535) - `1` = 8-bit offsets (max distance 255, enables smaller sequence packing) * **Reserved**: Padding for alignment. **Section Descriptors (3 × 8 bytes = 24 bytes total):** ``` Full Layout (24 bytes): Offset: 0 8 16 24 +---------------+---------------+---------------+ | Literals Desc | Sequences Desc| Extras Desc | | (8 bytes) | (8 bytes) | (8 bytes) | +---------------+---------------+---------------+ ``` **Section Contents:** | # | Section | Description | |---|---------------|-------------------------------------------------------| | 0 | **Literals** | Raw bytes to copy | | 1 | **Sequences** | Packed 32-bit sequences (see format below) | | 2 | **Extras** | Prefix Varint overflow values when LitLen or MatchLen ≥ 255 | **Packed Sequence Format (32 bits):** Unlike GLO which uses separate token and offset streams, GHI packs all sequence data into a single 32-bit word for cache-friendly sequential access: ``` 32-bit Sequence Word (Little Endian): +--------+--------+------------------+ | LL | ML | Offset | | 8 bits | 8 bits | 16 bits | +--------+--------+------------------+ [31:24] [23:16] [15:0] Byte Layout in Memory: Offset: 0 1 2 3 +--------+--------+--------+--------+ | Off Lo | Off Hi | ML | LL | +--------+--------+--------+--------+ ``` * **LL (Literal Length)**: 8 bits (0-254, value 255 triggers Prefix Varint overflow) * **ML (Match Length - 5)**: 8 bits (actual length = ML + 5, range 5-259, value 255 triggers Prefix Varint overflow) * **Offset**: 16 bits (match distance, 1-65535) **Data Flow Example:** ``` GHI Block Data Layout: +------------------------------------------------------------+ | Literals Stream | Sequences Stream | Extras Stream | | (desc[0] bytes) | (desc[1] bytes = N×4) | (desc[2] bytes) | +------------------------------------------------------------+ ↓ ↓ ↓ Raw bytes 32-bit seq read Length overflow ``` **Key Differences: GLO vs GHI** | Feature | GLO (Global) | GHI (High-Velocity) | |--------------------|---------------------------------|----------------------------------| | **Sections** | 4 (Lit, Tokens, Offsets, Extras)| 3 (Lit, Sequences, Extras) | | **Sequence Format**| 1-byte token + separate offset | Packed 32-bit word | | **LL/ML Bits** | 4 bits each (overflow at 15) | 8 bits each (overflow at 255) | | **Memory Access** | Multiple stream pointers | Single aligned 4-byte reads | | **Decoder Speed** | Fast | Fastest (optimized for ARM/x86) | | **RLE Support** | Yes (literals) | No | | **Best For** | General data, good compression | Maximum decode throughput | > **Design Rationale**: The 32-bit packed format eliminates pointer chasing between token and offset streams. By reading a single aligned word per sequence, the decoder achieves better cache utilization and enables aggressive loop unrolling (4x) for maximum throughput on modern CPUs. ### 5.6 Specific Header: EOF (End of File) (Block Type 255) The **EOF** block marks the end of the ZXC stream. It ensures that the decompressor knows exactly when to stop processing, allowing for robust stream termination even when file size metadata is unavailable or when concatenating streams. * **Structure**: Standard 8-byte Block Header. * **Flags**: * **Bit 7 (0x80)**: `has_checksum`. If set, implies the **Global Stream Checksum** in the footer is valid and should be verified. * **Comp Size**: Unlike other blocks, these **MUST be set to 0**. The decoder enforces strict validation (`Type == EOF` AND `Comp Size == 0`) to prevent processing of malformed termination blocks. * **CRC**: 1-byte Header Checksum (located at the end of the header). Calculated on the 8-byte header (with CRC byte set to 0) using `zxc_hash8`. ### 5.7 File Footer (Present immediately after the EOF Block) A mandatory **12-byte footer** closes the stream, providing total source size information and the global checksum. **Footer Structure (12 bytes):** ``` Offset: 0 8 12 +-------------------------------+---------------+ | Original Source Size | Global Hash | | (8 bytes) | (4 bytes) | +-------------------------------+---------------+ ``` * **Original Source Size** (8 bytes): Total size of the uncompressed data. * **Global Hash** (4 bytes): The **Global Stream Checksum**. Valid only if the EOF block has the `has_checksum` flag set (or the decoder context requires it). * **Algorithm**: `Rotation + XOR`. * For each block with a checksum: `global_hash = (global_hash << 1) | (global_hash >> 31); global_hash ^= block_hash;` ### 5.8 Block Encoding & Processing Algorithms The efficiency of ZXC relies on specialized algorithmic pipelines for each block type. #### Type 1: GLO (Global) This format is used for standard data. It employs a **multi-stage encoding pipeline**: **Encoding Process**: 1. **LZ77 Parsing**: The encoder iterates through the input using a rolling hash to detect matches. * *Hash Chain*: Collisions are resolved via a chain table to find optimal matches in dense data. * *Lazy Matching*: If a match is found, the encoder checks the next position. If a better match starts there, the current byte is emitted as a literal (deferred matching). 2. **Tokenization**: Matches are split into three components: * *Literal Length*: Number of raw bytes before the match. * *Match Length*: Duration of the repeated pattern. * *Offset*: Distance back to the pattern start. 3. **Stream Separation**: These components are routed to separate buffers: * *Literals Buffer*: Raw bytes. * *Tokens Buffer*: Packed `(LitLen << 4) | MatchLen`. * *Offsets Buffer*: Variable-width distances (8-bit or 16-bit, see below). * *Extras Buffer*: Overflow values for lengths >= 15 (Prefix Varint encoded). * *Offset Mode Selection*: The encoder tracks the maximum offset across all sequences. If all offsets are ≤ 255, the 8-bit mode (`enc_off=1`) is selected, saving 1 byte per sequence compared to 16-bit mode. 4. **RLE Pass**: The literals buffer is scanned for run-length encoding opportunities (runs of identical bytes). If beneficial (>10% gain), it is compressed in place. 5. **Final Serialization**: All buffers are concatenated into the payload, preceded by section descriptors. **Decoding Process**: 1. **Deserizalization**: The decoder reads the section descriptors to obtain pointers to the start of each stream (Literals, Tokens, Offsets). 2. **Vertical Execution**: The main loop reads from all three streams simultaneously. 3. **Wild Copy**: * *Literals*: Copied using unaligned 16-byte SIMD loads/stores (`vld1/vst1` on ARM). * *Matches*: Copied using 16-byte stores. Overlapping matches (e.g., repeating pattern "ABC" for 100 bytes) are handled naturally by the CPU's store forwarding or by specific overlapped-copy primitives. * **Safety**: A "Safe Zone" at the end of the buffer forces a switch to a cautious byte-by-byte loop, allowing the main loop to run without bounds checks. #### Type 3: GHI (High-Velocity) This format prioritizes decompression throughput over compression ratio. It uses a **unified sequence stream**: **Encoding Process**: 1. **LZ77 Parsing**: Same as GLO, with aggressive lazy matching and step skipping for optimal matches. 2. **Sequence Packing**: Each match is packed into a 32-bit word: * Bits [31:24]: Literal Length (8 bits) * Bits [23:16]: Match Length - 5 (8 bits) * Bits [15:0]: Offset (16 bits) 3. **Stream Assembly**: Only three streams are generated: * *Literals Buffer*: Raw bytes (no RLE). * *Sequences Buffer*: Packed 32-bit words (4 bytes each). * *Extras Buffer*: Prefix Varint overflow values for lengths >= 255. 4. **Final Serialization**: Streams are concatenated with 3 section descriptors. **Decoding Process**: 1. **Single-Read Loop**: The decoder reads one 32-bit word per sequence, extracting LL, ML, and offset in a single operation. 2. **4x Unrolled Fast Path**: When sufficient buffer margin exists, the decoder processes 4 sequences per iteration: * Pre-reads 4 sequences into registers * Copies literals and matches with 32-byte SIMD operations * Minimal branching for maximum instruction-level parallelism 3. **Offset Validation Threshold**: For the first 256 (8-bit mode) or 65536 (16-bit mode) bytes, offsets are validated against written bytes. After this threshold, all offsets are guaranteed valid. 4. **Wild Copy**: Same 32-byte SIMD copies as GLO, with special handling for overlapping matches (offset < 32). #### Type 2: NUM (Numeric) Triggered when data is detected as a dense array of 32-bit integers. **Encoding Process**: 1. **Vectorized Delta**: Computes `delta[i] = val[i] - val[i-1]` using SIMD integers (AVX2/NEON). 2. **ZigZag Transform**: Maps signed deltas to unsigned space: `(d << 1) ^ (d >> 31)`. 3. **Bit Analysis**: Determines the maximum number of bits `B` needed to represent the deltas in a 128-value frame. 4. **Bit-Packing**: Packs 128 integers into `128 * B` bits. **Decoding Process**: 1. **Bit-Unpacking**: Unpacks bitstreams back into integers. 2. **ZigZag Decode**: Reverses the mapping. 3. **Integration**: Computes the prefix sum (cumulative addition) to restore original values. *Note: ZXC utilizes a 4x unrolled loop here to pipeline the dependency chain.* ### 5.9 Data Integrity Every compressed block can optionally be protected by a **32-bit checksum** to ensure data reliability. #### Post-Compression Verification Unlike traditional codecs that verify the integrity of the original uncompressed data, ZXC calculates checksums on the **compressed** payload. * **Zero-Overhead Decompression**: Verifying uncompressed data requires computing a hash over the output *after* decompression, contending for cache and CPU resources with the decompression logic itself. By checksumming the compressed stream, verification happens *during* the read phase, before the data even enters the decoder. * **Early Failure Detection**: Corruption is detected before attempting to decompress, preventing potential crashes or buffer overruns in the decoder caused by malformed data. * **Reduced Memory Bandwidth**: The checksum is computed over a much smaller dataset (the compressed block), saving significant memory bandwidth. #### Multi-Algorithm Support ZXC supports multiple integrity verification algorithms (though currently standardized on rapidhash). * **Identified Algorithm (0x00: rapidhash)**: The default algorithm. The 64-bit rapidhash result is folded (XORed) into a 32-bit value to minimize storage overhead while maintaining strong collision resistance for block-level integrity. * **Performance First**: By using a modern non-cryptographic hash, ZXC ensures that integrity checks do not bottleneck decompression throughput. #### Credit The default `rapidhash` algorithm is based on wyhash and was developed by Nicolas De Carli. It is designed to fully exploit hardware performance while maintaining top-tier mathematical distribution qualities. ## 6. System Architecture (Threading) ZXC leverages a threaded **Producer-Consumer** model to saturate modern multi-core CPUs. ### 6.1 Asynchronous Compression Pipeline 1. **Block Splitting (Main Thread)**: The input file is read and sliced into fixed-size chunks (configurable, default 256 KB, power of 2 from 4 KB to 2 MB). 2. **Ring Buffer Submission**: Chunks are placed into a lock-free ring buffer. 3. **Parallel Compression (Worker Threads)**: * Workers pull chunks from the queue. * Each worker compresses its chunk independently in its own context (`zxc_cctx_t`). * Output is written to a thread-local buffer. 4. **Reordering & Write (Writer Thread)**: The writer thread ensures chunks are written to disk in the correct original order, regardless of which worker finished first. ### 6.2 Asynchronous Decompression Pipeline 1. **Header Parsing (Main Thread)**: The main thread scans block headers to identify boundaries and payload sizes. 2. **Dispatch**: Compressed payloads are fed into the worker job queue. 3. **Parallel Decoding (Worker Threads)**: * Workers decode chunks into pre-allocated output buffers. * **Fast Path**: If the output buffer has sufficient margin, the decoder uses "wild copies" (16-byte SIMD stores) to bypass bounds checking for maximal speed. 4. **Serialization**: Decompressed blocks are committed to the output stream sequentially. ## 7. Performance Analysis (Benchmarks) **Methodology:** Benchmarks were conducted using `lzbench` (by inikep). * **Target 1 (Client):** Apple M2 / macOS 15 (Clang 17) * **Target 2 (Cloud):** Google Axion / Linux (GCC 12) * **Target 3 (Build):** AMD EPYC 7763 / Linux (GCC 13) **Figure A**: Decompression Throughput & Storage Ratio (Normalized to LZ4) ![Benchmark Graph ARM64](./images/benchmark_arm64_0.8.0.webp) ### 7.1 Client ARM64 Summary (Apple Silicon) | Compressor | Decompression Speed (Ratio vs LZ4) | Compressed Size (Index LZ4=100) (Lower is Better) | | :--- | :--- | :--- | | **zxc 0.9.0 -1** | **2.48x** | **129.27** | | **zxc 0.9.0 -2** | **2.06x** | **113.66** | | **zxc 0.9.0 -3** | **1.44x** | **97.38** | | **zxc 0.9.0 -4** | **1.37x** | **90.90** | | **zxc 0.9.0 -5** | **1.27x** | **85.30** | | lz4 1.10.0 --fast -17 | 1.18x | 130.57 | | lz4 1.10.0 (Ref) | 1.00x | 100.00 | | lz4hc 1.10.0 -12 | 0.94x | 76.59 | | snappy 1.2.2 | 0.68x | 100.47 | | brotli 1.2.0 -0 | 0.08x | 77.62 | | zstd 1.5.7 --fast --1 | 0.45x | 86.17 | | zstd 1.5.7 -1 | 0.34x | 72.59 | **Decompression Efficiency (Cycles per Byte @ 3.5 GHz)** | Compressor. | Cycles/Byte | Performance vs memcpy (*) | | ----------------------- | ----------- | --------------------- | | memcpy | 0.066 | 1.00x (baseline) | | **zxc 0.9.0 -1** | **0.293** | **4.4x** | | **zxc 0.9.0 -2** | **0.354** | **5.4x** | | **zxc 0.9.0 -3** | **0.505** | **7.6x** | | **zxc 0.9.0 -4** | **0.532** | **8.0x** | | **zxc 0.9.0 -5** | **0.575** | **8.7x** | | lz4 1.10.0 | 0.729 | 11.0x | | lz4 1.10.0 --fast -17 | 0.620 | 9.4x | | lz4hc 1.10.0 -12 | 0.771 | 11.6x | | zstd 1.5.7 -1 | 2.158 | 32.6x | | zstd 1.5.7 --fast --1 | 1.615 | 24.4x | | snappy 1.2.2 | 1.072 | 16.2x | | brotli 1.2.0 -0 | 8.642 | 130x | *Lower is better. Calculated using Apple M2 Performance Core frequency (3.5 GHz).* ### 7.2 Cloud Server Summary (ARM / Google Axion) | Compressor | Decompression Speed (Ratio vs LZ4) | Compressed Size (Index LZ4=100) (Lower is Better) | | :--- | :--- | :--- | | **zxc 0.9.0 -1** | **2.09x** | **129.27** | | **zxc 0.9.0 -2** | **1.75x** | **113.66** | | **zxc 0.9.0 -3** | **1.23x** | **97.38** | | **zxc 0.9.0 -4** | **1.17x** | **90.90** | | **zxc 0.9.0 -5** | **1.07x** | **85.30** | | lz4 1.10.0 --fast -17 | 1.16x | 130.57 | | lz4 1.10.0 (Ref) | 1.00x | 100.00 | | lz4hc 1.10.0 -12 | 0.91x | 76.59 | | snappy 1.2.2 | 0.44x | 100.47 | | brotli 1.2.0 -0 | 0.09x | 77.62 | | zstd 1.5.7 --fast --1 | 0.42x | 86.17 | | zstd 1.5.7 -1 | 0.33x | 72.59 | **Decompression Efficiency (Cycles per Byte @ 2.6 GHz)** | Compressor. | Cycles/Byte | Performance vs memcpy (*) | | ----------------------- | ----------- | --------------------- | | memcpy | 0.108 | 1.00x (baseline) | | **zxc 0.9.0 -1** | **0.298** | **2.8x** | | **zxc 0.9.0 -2** | **0.356** | **3.3x** | | **zxc 0.9.0 -3** | **0.507** | **4.7x** | | **zxc 0.9.0 -4** | **0.533** | **4.9x** | | **zxc 0.9.0 -5** | **0.581** | **5.4x** | | lz4 1.10.0 | 0.622 | 5.8x | | lz4 1.10.0 --fast -17 | 0.534 | 5.0x | | lz4hc 1.10.0 -12 | 0.685 | 6.4x | | zstd 1.5.7 -1 | 1.906 | 17.7x | | zstd 1.5.7 --fast --1 | 1.475 | 13.7x | | snappy 1.2.2 | 1.406 | 13.1x | | brotli 1.2.0 -0 | 6.736 | 62.5x | *Lower is better. Calculated using Neoverse-V2 base frequency (2.6 GHz).* ### 7.3 Build Server Summary (x86_64 / AMD EPYC) | Compressor | Decompression Speed (Ratio vs LZ4) | Compressed Size (Index LZ4=100) (Lower is Better) | | :--- | :--- | :--- | | **zxc 0.9.0 -1** | **1.90x** | **129.27** | | **zxc 0.9.0 -2** | **1.62x** | **113.66** | | **zxc 0.9.0 -3** | **1.08x** | **97.38** | | **zxc 0.9.0 -4** | **1.03x** | **90.90** | | **zxc 0.9.0 -5** | **0.98x** | **85.30** | | lz4 1.10.0 --fast -17 | 1.15x | 130.57 | | lz4 1.10.0 (Ref) | 1.00x | 100.00 | | lz4hc 1.10.0 -12 | 0.98x | 76.59 | | snappy 1.2.2 | 0.45x | 100.58 | | brotli 1.2.0 -0 | 0.08x | 77.62 | | zstd 1.5.7 --fast --1 | 0.44x | 86.17 | | zstd 1.5.7 -1 | 0.34x | 72.59 | **Decompression Efficiency (Cycles per Byte @ 2.45 GHz)** | Compressor. | Cycles/Byte | Performance vs memcpy (*) | | ----------------------- | ----------- | --------------------- | | memcpy | 0.131 | 1.00x (baseline) | | **zxc 0.9.0 -1** | **0.361** | **2.8x** | | **zxc 0.9.0 -2** | **0.424** | **3.2x** | | **zxc 0.9.0 -3** | **0.637** | **4.9x** | | **zxc 0.9.0 -4** | **0.667** | **5.1x** | | **zxc 0.9.0 -5** | **0.701** | **5.4x** | | lz4 1.10.0 | 0.686 | 5.2x | | lz4 1.10.0 --fast -17 | 0.595 | 4.5x | | lz4hc 1.10.0 -12 | 0.703 | 5.4x | | zstd 1.5.7 -1 | 2.045 | 15.6x | | zstd 1.5.7 --fast --1 | 1.558 | 11.9x | | snappy 1.2.2 | 1.539 | 11.7x | | brotli 1.2.0 -0 | 8.719 | 66.6x | *Lower is better. Calculated using AMD EPYC 7763 base frequency (2.45 GHz).* ### 7.4 Benchmarks Results **Figure B**: Decompression Efficiency : Cycles Per Byte Comparaison ![Benchmark Cycles Per Byte](./images/benchmark_decompression_cycles_0.8.0.webp) #### 7.4.1 ARM64 Architecture (Apple Silicon) Benchmarks were conducted using lzbench 2.2.1 (from @inikep), compiled with Clang 17.0.0 using *MOREFLAGS="-march=native"* on macOS Sequoia 15.7.2 (Build 24G325). The reference hardware is an Apple M2 processor (ARM64). **All performance metrics reflect single-threaded execution on the standard Silesia Corpus.** | Compressor name | Compression| Decompress.| Compr. size | Ratio | Filename | | --------------- | -----------| -----------| ----------- | ----- | -------- | | memcpy | 52905 MB/s | 52854 MB/s | 211938580 |100.00 | 12 files| | **zxc 0.9.0 -1** | 950 MB/s | **11934 MB/s** | 130408237 | **61.53** | 12 files| | **zxc 0.9.0 -2** | 615 MB/s | **9879 MB/s** | 114657730 | **54.10** | 12 files| | **zxc 0.9.0 -3** | 240 MB/s | **6925 MB/s** | 98234598 | **46.35** | 12 files| | **zxc 0.9.0 -4** | 169 MB/s | **6584 MB/s** | 91698141 | **43.27** | 12 files| | **zxc 0.9.0 -5** | 93.2 MB/s | **6090 MB/s** | 86054926 | **40.60** | 12 files| | lz4 1.10.0 | 815 MB/s | 4804 MB/s | 100880147 | 47.60 | 12 files| | lz4 1.10.0 --fast -17 | 1345 MB/s | 5647 MB/s | 131723524 | 62.15 | 12 files| | lz4hc 1.10.0 -12 | 14.0 MB/s | 4537 MB/s | 77262399 | 36.46 | 12 files| | zstd 1.5.7 -1 | 644 MB/s | 1622 MB/s | 73229468 | 34.55 | 12 files| | zstd 1.5.7 --fast --1 | 725 MB/s | 2167 MB/s | 86932028 | 41.02 | 12 files| | brotli 1.2.0 -0 | 539 MB/s | 405 MB/s | 78306095 | 36.95 | 12 files| | snappy 1.2.2 | 830 MB/s | 3265 MB/s | 101352257 | 47.82 | 12 files| ### 7.4.2 ARM64 Architecture (Google Axion) Benchmarks were conducted using lzbench 2.2.1 (from @inikep), compiled with GCC 12.2.0 using *MOREFLAGS="-march=native"* on Linux 64-bits Debian GNU/Linux 12 (bookworm). The reference hardware is a Google Neoverse-V2 processor (ARM64). **All performance metrics reflect single-threaded execution on the standard Silesia Corpus.** | Compressor name | Compression| Decompress.| Compr. size | Ratio | Filename | | --------------- | -----------| -----------| ----------- | ----- | -------- | | memcpy | 24237 MB/s | 24131 MB/s | 211938580 |100.00 | 12 files| | **zxc 0.9.0 -1** | 853 MB/s | **8727 MB/s** | 130408237 | **61.53** | 12 files| | **zxc 0.9.0 -2** | 556 MB/s | **7303 MB/s** | 114657730 | **54.10** | 12 files| | **zxc 0.9.0 -3** | 226 MB/s | **5130 MB/s** | 98234598 | **46.35** | 12 files| | **zxc 0.9.0 -4** | 159 MB/s | **4879 MB/s** | 91698141 | **43.27** | 12 files| | **zxc 0.9.0 -5** | 84.9 MB/s | **4475 MB/s** | 86054926 | **40.60** | 12 files| | lz4 1.10.0 | 749 MB/s | 4182 MB/s | 100880147 | 47.60 | 12 files| | lz4 1.10.0 --fast -17 | 1303 MB/s | 4868 MB/s | 131723524 | 62.15 | 12 files| | lz4hc 1.10.0 -12 | 12.9 MB/s | 3796 MB/s | 77262399 | 36.46 | 12 files| | zstd 1.5.7 -1 | 527 MB/s | 1364 MB/s | 73229468 | 34.55 | 12 files| | zstd 1.5.7 --fast --1 | 610 MB/s | 1763 MB/s | 86932028 | 41.02 | 12 files| | brotli 1.2.0 -0 | 429 MB/s | 386 MB/s | 78306095 | 36.95 | 12 files| | snappy 1.2.2 | 756 MB/s | 1849 MB/s | 101352257 | 47.82 | 12 files| #### 7.4.3 x86_64 Architecture (AMD EPYC) Benchmarks were conducted using lzbench 2.2.1 (from @inikep), compiled with GCC 13.3.0 using *MOREFLAGS="-march=native"* on Linux 64-bits Ubuntu 24.04. The reference hardware is an AMD EPYC 7763 processor (x86_64). **All performance metrics reflect single-threaded execution on the standard Silesia Corpus.** | Compressor name | Compression| Decompress.| Compr. size | Ratio | Filename | | --------------- | -----------| -----------| ----------- | ----- | -------- | | memcpy | 18697 MB/s | 18663 MB/s | 211938580 |100.00 | 12 files| | **zxc 0.9.0 -1** | 642 MB/s | **6779 MB/s** | 130408237 | **61.53** | 12 files| | **zxc 0.9.0 -2** | 414 MB/s | **5778 MB/s** | 114657730 | **54.10** | 12 files| | **zxc 0.9.0 -3** | 171 MB/s | **3849 MB/s** | 98234598 | **46.35** | 12 files| | **zxc 0.9.0 -4** | 122 MB/s | **3674 MB/s** | 91698141 | **43.27** | 12 files| | **zxc 0.9.0 -5** | 67.0 MB/s | **3495 MB/s** | 86054926 | **40.60** | 12 files| | lz4 1.10.0 | 592 MB/s | 3573 MB/s | 100880147 | 47.60 | 12 files| | lz4 1.10.0 --fast -17 | 1033 MB/s | 4116 MB/s | 131723524 | 62.15 | 12 files| | lz4hc 1.10.0 -12 | 11.2 MB/s | 3484 MB/s | 77262399 | 36.46 | 12 files| | zstd 1.5.7 -1 | 412 MB/s | 1198 MB/s | 73229468 | 34.55 | 12 files| | zstd 1.5.7 --fast --1 | 451 MB/s | 1573 MB/s | 86932028 | 41.02 | 12 files| | brotli 1.2.0 -0 | 354 MB/s | 281 MB/s | 78306095 | 36.95 | 12 files| | snappy 1.2.2 | 611 MB/s | 1592 MB/s | 101464727 | 47.87 | 12 files| ## 8. Strategic Implementation ZXC is designed to adapt to various deployment scenarios by selecting the appropriate compression level: * **Interactive Media & Gaming (Levels 1-2-3)**: Optimized for hard real-time constraints. Ideal for texture streaming and asset loading, offering **~40% faster** load times to minimize latency and frame drops. * **Embedded Systems & Firmware (Levels 3-4-5)**: The sweet spot for maximizing storage density on limited flash memory (e.g., Kernel, Initramfs) while ensuring rapid "instant-on" (XIP-like) boot performance. * **Data Archival (Levels 4-5)**: A high-efficiency alternative for cold storage, providing better compression ratios than LZ4 and significantly faster retrieval speeds than Zstd. ## 9. Conclusion ZXC redefines asset distribution by prioritizing the end-user experience. Through its asymmetric design and modular architecture, it shifts computational cost to the build pipeline, unlocking unparalleled decompression speeds on ARM devices. This efficiency translates directly into faster load times, reduced battery consumption, and a smoother user experience, making ZXC a best choice for modern, high-performance deployment constraints. zxc-0.9.1/docs/images/000077500000000000000000000000001515574030100145235ustar00rootroot00000000000000zxc-0.9.1/docs/images/benchmark_arm64_0.9.0.webp000066400000000000000000005125441515574030100211040ustar00rootroot00000000000000RIFF\WEBPVP8 Pv* >m6I"! hȠ ind hA\|?F_(3ojP𥽋O>Ǵ? 7P?{ Oc??yysWw\?B##w??zu??3c =?vo_ Kǟo_c}O?_w ?~tlz~}???_;7;oo?w???o;g_???eO??& uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛Rf~pqjB5KW`6~2hP6uS⃛7sF_'h(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;LjxJb,$([ F@Or zdc8LXB,9&)]^ݿM|FA0$6K0h:A| Q\qYm ?+ =!8cv"7gJ[Bg_B-)vl:$1n⎊n QsH)Es=oIb4` 6ý6O4@v@AdhU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvO՟  Mpx42%/b\hw" :!lusQ Tp|'b\;2B5gRc:IW_zJچ+iV͹ҿ 9 =}άH uXp$r0i7&oV\FPoaXv:rp+>Ve Ƞl}:JǙS { ,!@PLÆYOYvtHJܸ,1Cn'?nG)㪁ꎅphǽ'5]b_x\!GwC926 GZʟs"Ʃ>X6JHY:t=7޴>7 3:w&vSv8 )x.O&1\[H=·mHFug Ds#I4[}/26Jx Ż(@ә}@8JyHxC+@SzqvjgsG=gZ*=Df|TqxRx>,Q*. %pƲ88vOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MRM`2/ k%uv?^e&:ɘq7 :&U+(ٌ:7pY3J ?$f!q<yw:49`c.Ǐ{'ZWΩP>K;{C ckY3caEeKS\v+cR%J7IIE '7*DR uc-kakWK`tDG%.fc$K@"]w֧i@ѓ i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|Lja qMS:2Q֤ z,cTIr6YAk TV@CG}ѫ9/))i6c+D5ȍ% 5nZv(2ud)3uIas p̬0\ )3 J9A~nWQh3W2#Q5\*USƥCUW/]ӈMY0 #+ÚA4pG;w8-M6\ F85ܮ61 T?I2haa%TEФRi+X4;^50)f^)oZejv?B}w1~oegFN$||״7J0chX\UX)ہH; _TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tõ)j6iK ^5}6GajWk`&,S(95KvOl;MR6uS⃛TA͇i]Tô.|Psf53D|QN.gN[`@iY ]Thv.ÄԲƸz0.Rfdf,{.$9ól16wTݏZPT ](95KvOl;MR6uS⃛TA͇i]Tô.|PsajMI={KB9)~&-N؁eWu$Sˁ5>!z,"ˋ)C--s}yFI$%F]J.>sL`_lt8a.)>L(O'ĽFi_AA0 (!󂪻ZF;O`ơ{_`KQfMfeS| }^eYݞaę U>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛Tl{U!AEV7qŀ?X$8A(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95Kv:p q( EkR,z{& ?48&׃CVHx%  >J{ )|jNTvҮn8^+(22e@1/),= L\Rz}%SFI|演dDuw/˶Q1_o*zg#lqdm!snu14A:5@.B {_+)c& ,@.3p!ルx¶b0 z„:L1o߲rSɞT{vOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]TCtߺ8VSU>(95K@l;]D!U-ZQs Ol;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô׊Ol;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA̓I^Q|F*ЌTB1Pb @4#hF*ЌTB1Pb @4#hF*ЌTB1Pb @4#hF*ЌTB1Pb >rhF*ЌTB1Pb @4#hF*ЌTB1Pb @4#hF*ЌTB1Pb @4#hF*ЌTB1Pb @4#hF*ЌTB0цrSU>(95K|b^Q|F*ЌTwR  @4#hF*ЌTB1Pb @0Y/@4#hF*ЌTB1lx|/t#hF*ЌTB1Pb @4 NQ>+W_:4#hF*ЌTB1PbK(UwZЌTB1Pb @4#h?tD^Q|F*ЌTB1Pb @=Or1Pb @4#hF*ЌTB1OhMTA͇#T-z ޝc{!,Kdޜ1Ti]Tô.ώA͇i]Tô i]Tô.z'eiA͇i]TÎ9# e5S⃛TTA͇i]T؈zAjӠ5jU>(95K P##s41[߬L%@>H7-hJt?3퍒`y:TFC<}o-dbM; ]TôԽuS⃛T2Aj6uS⃖E{5KvOqT;MR6uS;MR6uS/ܒhTA͇i]TTA͇i3;C'ZMlGְgW&&<`"'Xd%;9Zi1­SoQq.wiI<:P05KvAô.|PsajM>Ol;MR6|8,TA͇iL?59k]Tô.|P&y;MR6uSi9 `*U>(95KaajU>(skW# ֪'Z좂dm~JS1 J/`zۭR`J!mJc:0 q$LT /B%;TA͇iKvOZ딋zT i]Tô-j!P6uS⃛Tnq5KvROl;MRz<`lF x&^ ?L0i &ӡ,ok9#2eRq͚ӂ $lpGm 09Uu:\jC/D,j?Қ෹ JkoYq&j>$Zu)+,krELZ],uX:%nO9PykU{,R2^ipӚy}%s2%qoR:J.P8q3 }mluԇ^Y8<*5 opt!0@JP' C4;Xm8kab$ɌTư|-#7$8Ci_WSzfT)oG](O |kB@ )!u8-u >>5ni7gqe: Gʫ4{*@I [S/8r i=e GqmM!JLŨB7K#;nuJR ?BaxZz Ob1v+޸ISVo{'\UQ-`>@ JZ<9ZRpkF Z(T赜A]7W~v]@4_s [, c ܯ@nc$kM]H0&Y@d=Lvpq'=#%@G. '3U}}6%h>n   k."j@[(l \pz|Psaj\3C{]'[ѽOfWڟl:-KS$4%c*7Ѧj7䄐"8dNݶǢHKZBX~d jgi] NAuT'T93MRO |PL%GC9K\]TitrlM_sõe:;9]UKOzf9*G B7}MЍ1vNO)i Lb*;D | 0 D'sN(9kl;MR6t-Ix A;N @vӊxô.|Pvb˶Ev"3Ѫ1,WTAv őHF<qܣYY;_~+]Tô% Ol;MR)|PsajU>(93nmi]Tô.|$lDR6uSfP]Tô.|P h-}=lZseұ!QOl;MR6vOl;MRy;Mx.|Nz$Kv jU>(95K0Z.|PsajU>(9yB0jU>(95K;bU>(95Kv7w$KvON}j6uS⃛h۪vOl(@$)|"&QV#̱k kqgZ?죕 BͨS΁5Kqɹ`l;MR6uS_l;MR6tؾix)vOlFgpˠl;MR6uS6uS⃛S,7X*U>(95K@ajU>(95I4oA"PRu:mv%K`s 8^~#*8)Ilk,f.KQ\L(95Kտo=uS⃛TAZ*jSU>(95KϭoTA͇i]TU>(95Kv:mA͇i]Tñj6uShO޹^(,xUp6x@Iv%vQ53@l$a,z k :*L z #}1 xt>bqeV|PsajU>5KvOZSU>(95Kqݘ-}5KvORTA͇i]T-;MR6uSeTA͇i]TrTA͇i]}ǖJݷ΄b2B1P.|Psaj>a]Tô.|PsWQ1 T i]Tô-װGl;MR6oڬR6uS⃛gvOl;MR/Pk=2A͇i]T;TA͇iL޸/}NMZ"q1SU>(95KOԺA͇i]TÍHS⃛TA͇iȐgJl;MR6tqd͇i]Tô.|L i]Tô%')@l;MR62>(95KCajU>)vOlcavOl;MRn|PsajU>(9r9Ol;MR6t+@l;MR6uI j6uS⃛?`2A͇i]Tô.RyP6uS⃛)v6u5MR6uS⃚r7)vOjSTA͇i]TW]1~`e5S⃛TAqhCxNoTA͇i]TtR6uS2yPuS⃛T49vOl;MRe|PsajXvOl;MRd)vOjQU>(95Kv6#dIA͇i]Tô.؇TA͇i[zjU>(95K׫; ]Tô.|P+uS⃛Tj6t_MR6uSg< 6uS⃛TnM)vO[!J25KvOOY%6uS⃛TTA͇i]T?`e5S⃛TA^6uS⃛S$jv=Tô.|Psag³KuS⃛TAbFA͇i]TôQ8MTô.|Prsvj6uS⃛# i]Tô.B1ΊD i]Tô.zT6uS⃛AAj5oGVô.|PsajTl;MR6og/Ýi]Tô.|#}cxô.|PsajUsR6uSfBTA͇i]TBqER6uS⃛;MR62>(95IV큔Ol;MR-5KvOB)vO[-j 2A͇i]T+xEOl;MR6~TA͇i]T.uS⃛TA^6uS⃛S$jv]Tô.|Pr_jU>(95KFA͇i]TôblnΈ i]Tô&oS⃛TA͇i}7 Ol;MRI7U>(95Kv SU>(95KT i]T{ouS⃛TAZN|PsajU>(9m;TA͇i]TGPSU>(95KERzbajU>(95K |PsajU>(6hivOl;MRꧥJsajU>(94̦|PsVqKvOl;UWô.|PsajMs,FA͇i]Tôg@=A͇i]TñW,vOl;MRꧡuR6uS⃛>caָô.|PsajN)⃛TA͇#uS0 !uS⃛TAXuS⃛T2Aj6uS⃖J.|PsajU>(6ѪU>(95KqKvOle5S⃛TA^6uS⃛S$jv6uS⃛TSU.|PsajU>(6?)⃛TA͇i[ޠc U>(95Kv9[o͇i]Tô.|%Xw88vOl;ML9kA͇i]Tñj6uShs5K)ivOl;Ib{*?MTô.|PsVbA͇i]TñUC2]Tô.|Pr\ TA͇i]TP]Tô.|Ps`ɤߛTA͇i]TMl;MR6H>(95/+ l;MR6t;jU>(95K(4̦|PsajU>(9m95Ol;MR6}>(삩uS⃛x{"!$N8XDŽQ|ԵS⃛TA͇i%MTô.|P+uS⃛Tj6P\PsajU>(95KMapq5Kve5S⃛TAsb |PsajU>(9~|Ps`/'؁fge1aCC#@*|Y2w,p=܊A%bzON1Ngp!ZuWU>(95Kvo J95KvOJô.|Psah(?MTϦuS⃛TA\)eOl;MR6~vOl;MLhs |PsajU>(9|QCJc\m1>(9kF|PsajU>(950*66uS⃛MqA͇i]Tô%A* ›e3lZ5~Ïf탛TA͇i]T^=${^)vOwô.|PsajMTñrA͇i]TôюFOuS⃛TAZH7)vO[@vOl;MRY djT;0Ē] ڔ.|PsVOl;MR6: uS⃛TA͇#o0#c@n/pP 4zsTaOSJ]:涑MTñgOl;MR5p'uS⃛TA\I{rTA͇i]TiQ@vOl;MRb۹T i/C+)}6uSuS⃛T@rSU>(95KaaKA; n71c~AtT4D\^}8\|v\rta]wL㞑G<=®ME-o--ErKӘl7e5Nb@2{dBuk*dZOC J30P!3u-Jwly!0vk{.ld-E-Ф"飯gChg>@$#>ܰn%z )ʃֿK dIG- `%|t[X.'n"`~b'c_a'$ ÇByu2CC=/mI9=z~&4G(u;n`}gղ6po]a6Pza"B72b;5S]C5P.2P%!tOU3ݪD~W -`Nƿ"OIQ[_W]$:d36ԓ A Yz'ohLHOxz,M] ~pRd`zJ•<0RXcmWl4*SZ,4`xHGd0@ @FQQq19xuB~"|UMQICjTx`ř < ` H5@K@oBoHOz8WB ~ D|Վ1B6p7LSfᴖ0p_ֈ pU$2Ӛ<0""N,muDFK3-GPp.k}k0nFg8V~^[8D`hs7!5IK,  |JˀBbe*<=zzRÄ S[NLNө ;f(9K e5W*>/՘5S<ajs8]hZe ej:l;MU-S⃚}ĸf+se}5K6S@l z St6i\fhU.|Q5KTqA͇j/>/&ϼ y K Ȫe5S⃚ȊdKvO BuS⃛TA̓wô.|PsajU>&]\D@l;MR6,l2 e5S⃛TAˁ\Ol;MRMhhS⃛TA͇i4mOj̆ 6S!R6uU.|PsajU>(9>*rSU>(95K M@XajU>(95KKwG٪|PsajU>(935E[r U>(95Kv8 2A͇i]TsmhTA͇i]TW|;MR ~@l;Ȧ|PsaۭOl;MR5o^TdSU>(95Kq|PsajU>(93l" SU>(95K=SPi]Tô.|LL0TA͇i]TL׬@l;MR6uJʁ%k(9sNTA͇i~ô.|PsajFOl;MR6)ɰ5KvODMTô.|PsW<~TA͇i]T{}ô.|PsajU>&;T9B}S⃛;MRZj6uS⃛[)vOl; >(95Kv {> T i]Tô.BTvOl;L⃛TA͇i[{iô.|PsajMTô.|MvuSe-5S⃛TA͇i-+@l;MR6o1_6uS⃛S[UOl;MR6$PuS⃛T2A͇i]Tôe>(95KvÎzYxDBuSl"l;MR"rjU>(95Kե`5KvO(95Kv{o𓃈 i]Tô%n<]U.|PsajU>()״.|PsajU>(9o+Q+&Ol;MR6ѷU>(9rAl;MR'ajTVJ|PsajU>(95Ix1qMMTô.|PlG.|PsajU>(9Q&jvOl;Mz=j6uS⃚ |PsajU>(94[D«)vOj.c~vFTôkiajU>(95K+ l;MR6t?Ol;MR6x#ϗPTA͇i]5LyxvOl;MRP -Tô.|Psag) 6uS⃛TE@vjtՁjU2TTA͇i]T懟@l;MR6tOl;MR6x7R6uSJBajU>(95/+ l;MR6tݤ]l;MR6oOl;MRajU>&pQҠl;MR6uIұ;MR6uS"x|Oj6uS⃛Y\1/*ô.|PsajM5dajU>(95K:h i]Tô.z. et |PsajU>(9rôԾN!6{9 e5Shs5K6}@l;MR6oN_AA͇i]Tô.9 tj6uS⃛;CJ&SU>(95K!w}A͇i]Tô%2A͇i]TÏ-l;MR6uSҥ95.rOOm=)ey_.zT6uSкuS⃛TA͇6]Tô.|PsaWA͇i]TôS⃛TA͇im2 jU>(95KqKvOl;:R6uSl"l;LyjYcʋАU.zT6uSuvOl;MR^׵TA͇i]TقթuS⃛TA̓RBSU>(95Ke+@vOl;MR&V Ol;MR6R S⃛TA͇i5OjWU6[249vO@xϥOl;MR6*U>(95K=JSTA͇i]T%(v |PsajU>(9}5]pq5Kv A͇i]Tôqp=@vOl;MRajo4l#e JsajMTñPe5S⃛TA̓ߊ57ZA͇i]Tñ⃛TA͇iE^ ;MR6uS*ٿ<ajU>(95KKf\Ol;MRm\P삩uS⃛TAP6ToSU>(6*ô.b3kz.|PsajU>(9KbuS⃛TA̓wô.|PsajU>&d.|PsajU>(6sض.|PsajU>(9xQajU>(95/#; ]Tô.|PmT i]P|;MRAajU0|PsajU>(95K|PC Ol;MR(Ol;MR6wiF i]Tô.>/́Ol;MR5fTô.|PsagX)vOj!5IKiJ({G;MR'ajTjU>(95Kվ/SU>(95K՘n|PsajU>(9lwL |PsajU>(9p `TA͇i]TC]Tô.|Psau0J5KvӊxÎK . hYЦ  69vOd5KvO #vj6uS⃛>r7!5KvOE-Tô.|Psaޓ-r2A͇i]T|Tk2A͇i]TA͇i]Tô6ɽ }%Ѐ U+n_ˣr$va]WqBrST<TS⃛TA͇iKATA͇i]RXvuS⃛T l;MR6uS_ jU>(95KJ5Kv(XOvq5Kvӊxô.S6vO@j6uSgU.|PsajU>(9zFA͇i]TôH躩A͇i]TÐi D |PsajU>(9kR6uSi=MR6uS⃚l;MR޾Ё&6kFS6vOZ;MR6uSfN*5KvL n|PsajU>(9lT@l;MR6/DM(8vOl;MRm2A͇i]TÏ &sajU>(95KT i iwu"D@ $xe=jE@vLF:15Kv' MjU>(95K1xCajU>(95KYbkajU>(95KQ2ɹj6uS⃛`کA͇i]TôTA͇i5Oj8D1{`&]x[l;In|PsaI)C\Ol;MRi]Tô.|P$vOl;MRꧢŔ@?j6uS⃚>ô.|Psaj(95Kvy1$|PsajU>(9"@˽ٷ/`DyU6USiLOl;MRU;MR6uS> ;MR6uS QU>(95Kv9o_U>(95Kv:" MRz |PsajU>&RU>(95Kv>h5KvOJôԿUmXضZI$kn7'jK|ԱBBdKH|Psa.|PsajU>(9x }5KvO -ZU>(95K=|lW銁5Kv'GU>(95Kv7w$KvO E@e5S⃛TÄ́vowU(@DW! Y@@0ᇜ00z:F9 4 |vD ^%L 0Fb RIqa\gÆ9)(sa9M`0{ FX`Zп#c0xPD!^ /q 4PPϟ`a=.=l([fqhafVЫ0FR1P`܀/nF&}t81S6g8"QH!xt(}K}e2>(9q\FTVMԅ`-A$vn d^7ÖmGae\S⃛RxTA͇i]TLt̐vOl;MR9-)vO[@E ZD3ˤ_ G%mUUfAqMiJ[: lovZkfO@l;MR-֑pq5Kv/TA͇i]Tŝ?yR6uSPÏ>L?8BSH85pkpiP9G|;MRg I8vOl;MRc<|PsajU>(9s5Kv1Un" ,уAU׸J*ݼ NX/`eoa ce-v5ÆV`}(EaA͇i]Tô._zw0i]Tô.|!e5S⃛TA͇#ts!75rZ v9?UKÄ́nO[&c)vO[c5Kv'ϑMTô.:T1 yN: 0KY~@fvi W#ۯ6 l;MR. l;MR6uIjO)vOl9)2újU>(95KCaj2C0c,yk=}v*2uJʁ5KK}5KvOj](Ol;MR6tj6to(pPm ҃'&k(Ol;InrSU>(95Kj6uS⃚vSU>(95KCajBڛ/fw|Z) KJ.TjU>(|PsajU>(95KLIajU>(95KP6o ryvV.DXpsqԬV3!uRc7D$ցv\e!~EMv$0oq.97Gs~pdY^"w:!*-=NOl;MR6ѭ"2A͇i]Tb5KvOE}@vOl;MRAaj+[I@lcl;Iz|PsaƽvOl;Lz5KvO1;.Ζ^6Lp:}PF>JD0(J2eXB.Ktt͠?즚H՛V8Q#R^XD6rfñ $vOl;MRꧣrx15KvL% Ol;MR6u⃛TA͇i[E5Se$0Vqa p~>rH2++3H>(950\Ol;MR˶ސq5Kv'Gd=F:e^U-T+8ôQ5Kv(G@vOl;MRꧣкS⃛TA͇iK|(55KvO\94:w~ AڳLvpEIM[Py;Iz|Psa4i]Tô.|L̏#vOl;MR+2FZ% 8*U>(95Kϲ&ô.|PsajU=Ol;MR6vGTA͇i]RcTA͇i]TNl;MRR,z}du/8ln W>82SN)⃛Tm!Z *U>(95K(95KQ`e5S⃛TArRBTA͇i'U.|PsajU>(9ô-\aʄ)F=fSk]TE|PsajU>(95Im0ԛj6eiz&>rDJE4J{)o+k:谰8 3H3)l3*|PsajU>(9l>j6uS⃖YA͇i]TôԼjU>(95KE2x[5KvOJô¬?E"zjԯ w cOl;z |PsajU>&k:R5b'i0uC@;i 炌Fq8Pa&Bn8j_:'~13 v qUbX+ZGPHt=bjۄC4gW Yu2A͇iKhZA͇i]Tñ㠶jU>(95K-w Ol;MRvSU>(95KCajwY\AǺp7 +2#"q:jTf⃛Tߥ;MR6uS@l;MR/uB@8㘽0aWy:#qIDXBU_Y~`\@pˢqH: l23[{#N-=tİ:,6{vOl;LTA͇i]6[5S⃛TA͇(V. 6uS⃛Tl=⃛TA͇i]Rl;Ibv~(%ZKfVKz)3%~ jMTñ>KvOltSU>(9q|G ûPP5j̔6uS⃛T  l;MR6uJ&f i]TôĈ;MR6uS;4gx삩uS⃛TAP6XXѽt~v|PrMuS( 6uS⃛TbvO]"T_.5+SU>(6`j]Tô.|Ps`"⃛TA͇il |PsajU>(9m 9j6uS⃛\MTô.|P+t۸fr'&FoICi]T:gvOl;MR_dYô.|Q8vS⃛TA͇iȐ?|PsajU>(95Isk=@vOl;MR.|PsajU>(-l8M0Ȧb> NX"'@98Jsaj`S ļߋ>Шzhb#z#ŪCۦuS⃖]Tô.|Psaǵ.|PsajU>(6`j]Tô.|Ps`i>(95KvX, |PsajU>(98Ol;MR6WPPC:꥿;$-zw4 @2" ;<4%j @(PkɧXG Qҥf0wGwal;S XKUc˅|Y5k⃚l;MR-uS⃛TA͇cBEA͇i]Tñw;MR6uS#ŐR6uS⃛BGi]Tô.|Pl n5KvL /qV7'Q{X[,# cSig$! NeoF -)*\8pc&09ŧ)k+D e 8\y!QQ`~ a"F.%́ӊ`T㠦ajU>(95/Aô.|PsajT i]Tô.?#ŷU.|PsajU>(9pjZ~l;MR6uS'e-*U>(95Kr bc\[XtL5KH|P!r8:S雹ևBpqJzjN)⃛Tj*5KvO@vOl;MRn|PsajU>(9o6uS⃛TΠvOl;MR.|PsajU>(9GQ|5 H)uS⃛TA˔6uUJdp[ uShP" 6uS⃛TOl;MR6%JsajU>(95K9@vOl;MR ";MR6uSh|PsajU>(950`|(GU.|PsajU>(9rTyW831TI6J:8@ajU>&Y> |PsajU>(7"9n6uS⃛T2;uS⃛T3? l;MR6uN, J5KvOOj6uS⃛ rk2A͇i]T@F$ 5\;{{\|^m[>yM8v5Kc5KvOl|PsajU>(950txô.|PsajU4TA͇i]RuF:l;MR6oYaI)vOô.|PsajU=*SS Ǐ+T&:ݒa> /VHPmA6AT~vdJ5Kv^{TA͇i]T""l;MR6uIy3_ ;MR6uS*~Ol;MR 6uS⃛TikSU>(95K@aj(X'+:$Xh+iwIN׃6:Es &Tj_TA͇i]T弸.|PsajU= >E5S⃛TA͇=Uq֩A͇i]Tô ך9q5Kv%Ol;MR5so]r5KvO܆ô%DsDW D%C2)*B:W i]TIȊdKvO BuS⃛TA̓wô.|PsajU>&]bJG^g @IfYc*^w׺1ƱlΆ(95KvxΟkvOl;IG봞A͇i]Tô5i< Xr_`DVR \9(.Áӊ`T7U.|PsajU>(9n,kvOl;MRW?MTô.|PnK)˟okϑ&94 @tiynj( \&=ܘ(AfJO1X^`EzJ\-@aMTô.|LNz@l;MR6uSхc |PsajU>(9܆ô%/و_U7,"ʪl Fbڞ] b9D{3i$~vK)A͇i]Tô%{xvOl;MR6uS⃛T{l$Kz --CI]~jϋBfw؅)5bN-# }ξ "ÝcKOV}d{)vOjsajU>(95KOvOl;Ȧ|P$n @/ptƲYX>[^})ѕ Cmeѳ3-ۛsa'V7 l/.l}O됖N~WP hEy=Z@rX)1FVMPkoNW%cBLNZiUd2M$@cuT*!$j#Fr>&)[վ%5#)5Dl g_GWx۶}nweMl!rG1jS _B_bf)d4#""Y2 KfBJYm*|,n֓Ke5Q{~N%!™S,ͯ=S*Thv+W$`T&V1!o19i[jqWNDL){DbQY!9[b 2]Ki΄[Sě5!<|G q_ IԅZL-{$Ye+"]0Ub7>rW6aSynYr\t܄ [*`uCVzwf.vY''h2'P} cU0+b#!BȪ]+WP~Oqv>F%娐 S:BCV'j ^Tk}I, TG>~H&A0O}u#"vA]Cy,>'6%'aP}P@EFZjY" $%0rx l(?h\(m9҅!1`2rR $rIf6su b" pï7+1I؂bŕV^FSU>(9616uS⃛Tkn6A͇i]Tñ?٪vOl;A6uT'T i(; ]Tô.|P(RpU.|PsajU>(9_o&Ol;MR6 U>(7fM"-wJ/zZeOȦ|PsaׁfSU>(95ITҳt~SU>ظvOl;MR@(?MTô.|Pl꺔! U.|PsajU>(6MU>(95Kq'hMTô.|LDTA͇i]TNl;MLs Fc+rZ=&XDU.dP6uShVvOQ|w+)Lô.|PsajU0mA͇i]Tñ@VRô.|PsajU='ʑ2A͇i]T:Ol;MR-vOl;MRajwx?Ӥ [w;MR(95K՘n|PsajU>(9pҧ8TA͇i]TzER6uS⃛ш i]Tô.bmOKvOl s5K|pgSa h@` yO i5Ol;MKJOl;MRB;y;i^˗`NxJ4p*N (F:Kӄe5S⃛T i]Tô.z$i]Tô.|!Wo88vOl;MKJMTô.|Lxj6uS⃛G|;MRқlI*Ĭ@l;MRe|PsajjU>(95KA6#V)K7dGgN0S1&ඏgŐ g6]ԺA͇i]T(Ol;MR6}y GajU>(95K \葯O ]Tô.|PrKvOl9ǜ2KvOl s5K{i3xb{MTrT9R6uIa/ao(I[$atbߍwzj3mOl;MR͖SU>(95KTbb@j6uSh2[ô.|PsajU>&t9TA͇i]T\W.|PsajU>(9rôWw:ta8Oϸj.RvOF2A͇i]T;G:Q\Ol;MRꓐH|PsajU>(9őMTô.|PsaDZY16uS⃛TvOl;MRꧢ 6uS⃛T49vG9zfޑ2qOvROl;MR3TA͇i]TuS⃛TA͇cj?n|PsajU>(9΋3fR6uS⃛KMTô.|Psa^p{_o>ATA͇i]TSS KN=l-uXcn+rTj6/p l;MR6uSG5KvO -ZU>(95K=F_Mg2A͇i]T5KvO[vOl;MRVlP6uS⃛T`jU>mA˔6uS@Wi]Tô.|P%f/}U>(95Kq 1l5MTô.|P&n6uS⃛Tw.vOl;MRꓒ4*U>(95K>dK|SU>(95K@aj7 nKx)Ci]TDpq5Kvr|j6uS⃛A Ol;MR6R6uSHOA͇i]TÍZuS⃛TA͇ q:"ô.|PsajN+݀;MKc>K)>(9wô.|P(& /\Ol;MRZj6uS⃛N|CajU>(95KFTA͇i]TB CajU>(95K; 6e5S⃛TAZvl;MR6uJʁ%d^hm1gZ(MTMl;MR 6uS⃛T6rSU>(95KSU>(95Kq!AOl;MR6Y_HTA͇i]TGTA͇i]T88vOl;MR) H#[f ) [K]TTTA͇i]TV@uS⃛T(۪vOl;59bl;MR6uJ2pTA͇i]Tٔ*U>(95K>AIOl;MR62>(9{Jmx8uk|G@i(9kw6jّK6t=2IJ_'Ol;MR67@l;MR65KvO\95IHɬ!@jjU>(RajU>(95KLje95KvL j]Tô.|Ps`iIpt"a6_ef %>)eqJI44IN 21q2LS۷}4Q<aˢ=`l&;@@uS⃛T=dTA͇i]R`TA͇i]8l;Lre 5⃛h۪vx]uS⃛TApp(95Kv+66uS⃛ [1ZqgX DZJ\d.CJ“7V2]DΕ1,Чp z9~$A`f_rdqΝop!>_gg r ָeڊ>>L[U>(95Kv6Ol;MR6/pl;MR6t(9sB9Y :yj.RvOB}5KvO Q@vOl;MRMU>(95Kv6q<,`)\Swj4w0lWJ43A͇i]RU>(95Kv4iOl;MR6|Py6TA͇i]T"l;L1MK 5Ol s5K2OvOl;MRomYj6uS⃚h۪vOl;!1u d.T&RU>(95Kv8A͇i]Tñ⃛TA͇iB6uS⃛SH"pq5Kv6uS⃛Tm dn(95Kvӊxñ]Eι ' (6*ô.aՀô.|PsajB~vATA͇i]TE"5Ol;MR6" i]Tô.D!S⃛TA͇iH2A͇i]TuS⃛T@IvfN\`@l95K RH i]Tô.aHR6uS⃛ƭTA͇i]Ra}\L |PsajU>(9oԥOl;MR6_WMTô.|Psan(95K~~@vOl;MR&YTA͇i]TĈU.|PsajU>(9zd3:uS⃛TA͇#uS⃛Tj6;vOl;MRMR6uSf VOl;MR6%I !88vOl;MRUQ]Tô.|Ps`" i]Tô.z;@l;MR6H>(95KJsajU=f;2A͇i]T -MTô.|PsaK=VOl;MR6vOl9}"zjU>(95Kհ[Ol;MR5scI}5KvO 5K-ɸKQujꙥRvOBOl;MR6Ri)A͇i]Tô%RTA͇i]R^"e_|PsajU>(95I5]Tô.|Psa%vOl;rTA͇i[E5S⃛T)+`ҕtR&QePZT]iY7\rZMj1ڂImY[!o;~ 9dx1堔 /x@+|Xp/KW( גTNr?#؛ѫ\ti2CÔU&2@/v@,&k$<@zY1W%ܩH]XEY͏PZP:*I_W,9 -`&A$e,۷:&GSḲU#X $ުalFZ5  u7OZ ,]0U#*j7+O\yjoDx ] Vp3c~*ON€2YXmUI+֐_JSu!ED֣$9E[vSD j~ii=V시XP84P<ɉN dY=j_Ai=7nʳw<57rD<H®zć[ޫ81jP['da@Y,Vu0z}VXxĦC}|D73~t] j}X3*тLm ̐H,CQ1Vsy0D ` fr.y76Gdyq)S5Z<(M%W|,Ķ8GqɆdY懂-\l'=A|2cNaa&u$_qJ sڰ:v"r11}:XKL$ ŒiK: h(9zwBTA͇i]6+⃛TA͇izɔOl;MR6yQX<TA͇i]T;MR6S!R6{L5KvO%|;l;MRꧢGpl;MRaTA͇iK(VOl;MR5t'gTA͇iJi]Tô.|P'TA͇i]T;MR62>(95Iz檗U>(95KqFj6$ 8%ô)vOl8TO`TA͇i]TXPsajU>(95KJuS⃛TA}16q5Kvӊ`TA͇ie5S⃚'TA͇iK'vOFK4$>ܮ%~n/#o*Se MU>(95K0k]Tô.|P(|; ,88vOl;MK|PsajU>(9urHL+)vO\95Kv SU>(9z: 6uS⃛TjU>(9#sΣ , Ǖ&!90F8s1xn?ugC6* !J/&fa'Aj6-I(95Kvk6fuS⃛T&ize5S⃛TA͇pR6uSl"l;MR6ѷU>(9$@vOl;MR̠{^uS⃛R3tbhw9|}+>H+dX)i'FВZ$OVnNm0tiGN,ٻfATA͇ik;MR6uSf^ď\Ol;MRS⃛TA͇iN萀vOl;MRAajU>(9&A͇i'E6uS⃛T*U>(95/y 5, QP!,wj))8 3ô.|Psaǐ$T*ô.|PsajT ô.|PsajU04@l;MR60=|1uS⃛TA͇cOl;MRajU>&X MT=ѡs i]Tanl;MR6u&" |PsajMzs)vOj޴4:.5KvL1i*U>(95KչT+TA͇i]8v5Kv SU>(9;+TN˥՞]Tô.b0LT@j6uS⃖+)vOl8֚i%6uS⃛TnQiw⃛TA͇i]R# e5S⃛T3i4~TA͇i]TW|;MR6oOl; ;*ȜNgMTô%YکA͇i]Tôϱ)⃛TA͇i[WSX@vOl;MR19+ l;MR6t؎ 6uS⃛SMTô.|Pr)͇i]TôP2A\i7~iPW%aBE[n;1ؾ1 .inaLV,FHQW}R5vOl;6uS⃛?Ư i]Tô.=GR6uS⃛B0XuS⃛TAIb6uS⃛)vOô.|L|Ps`D9F83ٓ@>Xnj1[>/QWwaz$Ԑ bf6uS⃛N:i]Tô.|L֣I88vOl;MR]TA͇i]TP]Tô.|Ps`ɤߛTA͇i]TMl;MR6H>(95/+ l;ML;fņ08?g N7/(SY8`a !xg _$:_2J`k'C, e5S⃛TAYTA͇i]TU)!ajU>(95KK5KvOBOl;MR6j_TA͇i]Tʛ_MR6oOl;Jô.|PsajTTajU>(95/O*ô.|PsajMgK}.|PsajU>(9~ěHTA͇i]RmU>(95KvqLI i]Tô.|&6uS⃛S$jv i]Tô.b/Ol;MR6~vOl;ML  |PsajU>(9|QCJ5Kv%i]Tô.|P'5W@j6uShs5Kvô%S*]Tô.|Psa*U>(95KմT i]Tô.8Gl;MR6.r?vATA͇i]TLajU>(95KO ?Ol;MR5{vOl;ML|Psaj_TA͇i]Tm&U]0jU>(95K,jU>(95K+iBô.|PsajU>:@|PsajU>(9$P*6uS⃛InPuS⃛T49vOl;MRe|Psaj`8vOl;MRq uS⃛TA̓ٶ6uS⃛Ra i]Tô.:5vOl;MRIDMR6uS⃚`KvOj!5Kv#TôzD|PsajU>(9mJA͇i]Tô%ۓAj6uS⃖E Ol;MRԭɭW\;MR6uSBِTA͇i]Tzc})vOl s5Kvô%^.|PsajU>(ҼQ5Kv%ajU>(95Kϲ&ô.|PsajU= =}6uS⃛T]TA͇i]TÐ-Ol;MR6vOl;MR)v;(95KvNV@,QVi]Tô.|LNz@l;MR6uSхc |PsajU>(9܆ô.|PsajdS⃛ROl;MR Y29 lX#bX VE偸QCWv/ C0uc|xc^W:?Z -&Q _ɧLô.|!~vOl;ML(ô.|Psaj6E-Tô.|Psaـ8vOl;MRϬvOl;Ȧ|PsajU>&;T2!TA͇Hߍ.ښ 7+H)-rMv*U>(9w ۪vOl;ݒOl;MR637@l;MR6vP6uS⃛Tl6uS⃛TTjU>(95KH|PsabajU>(9{Q'6u@[3/4ô.|PsajMA i]Tô.z(4Nô.|Psaja޶xô.|PsajU0*U>(95K=d >(95Kv#Tô.|Pr)͇i]T.]Tô.|PsaѪ]Tô.|Ps`Qʁ5Kvf Ol;MR6& 6uS⃛TTA͇i]Tvô.|PsajN)⃛TA͇#uS/{MR6uS⃖:fuS⃛T(۪vOl;ߚ@l;MR6uIz~)@l;MR6.j5KvOZغA͇i]Tñj6))N ~xm-VdBkqYdpE@vLYvOl;MR$\Ol;MR )vO[)&ô.|PsajU>&p|އ(TA͇i]TbajU>(95KSR6uS⃛;MR5tf:`C^eҗDn2D^Q|F) N9'c8΄b @4#hF*ЌTB1P@tyEB1Pb @4#hF*Ý yEB1Pb @4#hF*Ef+ȪB1Pb ! ЌBH @0ca 4Ol#hF*ЌTB1Pb >]^Q|F*ЌTB1Pb @0>R/@4#hF*ЌTB1OTA͇i"2MX ߾VO7mO&%2ʙ#vOl;MRW/`TA͇i]T/&yZN{ɽ:Bt{Fl)Skʁ 60[MTô.|PsW)vOl;nK5/@37:8_t]Tôj R6Rǝz# pb0>,ѥS⃛TA͇bjQHt{ !͇i]Tô-&BC]!!* GSb6uS⃖z`<&$RlPzfBE|@l;MR6c2φ.bYI:f&P3 S⃛T@VqY-.-ݚa|m4æly5Kv`=[ gzih=jؿ)vOcK32Y$LB:wTA͇i]Tô E<`7)._KvO vdHN' XՏLYe |PsajU>hh59OJĕߘ]nnjvOlnqefrS*s=Z5Kv^XHXQt5vOl;Ivߏ1XyRyuEVY3Z5Kv@Vx,.#!7˟apq5K؇ZSQ(1^#!pu ?(T i]Tô.|PrѪ4HYއ7v05KvLQ bK.viTp p͇i]Tô'NZ"~bYWBe Ol;MR%+5"@@3;MV5ger6uS⃛?ՀK%UA?JKܖL )vPmM옑)c\*,(95Kv8YkPX`iN. U>(95K3= z/R ʜ@.S@ O5KvOl; sW1mU>(95K=폥s삩uS⃛TWjhSU>(95KEi{`,H2@j6uJ_)vOKD^w|rSU>(95K<[-VVQA.|PsajU2*+ jU>(95Kv7TA͇i]VajU>(95K򟸟O[2A̓O)w LOJޔ7AHi]`jAm% }0@vOl;MR6uS⃛]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô)x̧QH?RM&)"Wm6z"lFOCZCnzdH;ݑvrYd9 9cR' "5]YTttlq Fȣ'nu+TOLw/sjϐzexF֌dd7SM 0aZ\T~/R/tE$N*5On0vOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i\"~؄3;~U2@ +rӍV,|(95KvOl;MR6uS⃛TA͇i]Tôϙ+{X[? ",ĭ9(]vǎ0mŌÆtl㯉Mﺌ1io|h:cކP 9r<Oq eR~$ KҪ;5m;35ՙ6]c0KfT<-ɣ4<ә42VAiNjⴭ0qrO38Z)ĄXY˦a#d@)%&r]Cb2ҮllC@#DbIƔHaI#"VQeʜՁ ,56ΐFJ\&i= }`VZ*C5TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(9,Jb ݯ0z%|HLtcJ;|jHܘޒWN}CNы_;H9|PJW!wk-sW7:K:뒦 9Al;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]TôhŌJG _(6i]RaM7ςTMEfp@vOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|Psajx`[ih_g|eF7+5+"`LT[+r[3w+(悬>쬁ybtm+VP}I[qP, TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MRR}4Ow^:Y ̂OPU.P.c[[__e&'aSZz, %,1b7Q0J8 9&{4![ͱ%%~.ti>qCjhKR_t$_^΢l;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(9%/F҄QK-+T ,l𥗂xÔo4U"^RC iS1w _s+9&A۽P anu(W2 kq'%H>jKCE%uUOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95Kp 6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]Tô.|PsajU>(95KvOl;MR6uS⃛TA͇i]4&9VIVc)?EvLu<׫XH?srޢ ̑toVb&]^u/gf5wAe9U%3XZI+‚ #px_)qĞQ/ Y e2-Mz6}I4 /9aGl"~dq_QJqħgxSDPr&­{-Snh4ldGOBJ(3>d.|8o8 h1>@FA'!ܧb\#PRƻ~ 6E#DhGo899lBlܢ^lP}J4zbTF?PQ^m |i5ďKȝ:F=+[e#tlrĊb&Vn7'tR#m,CQ<.)qm(7)Β^ NT@S%eahDNח'`A;uliul\$ va ":M<16f ݩm6A?T1c}UD=ĉ6-hdHf#Z=C*V>҄T$3WW1B幪aIܥʘ;Y Ɋbdj#{x #Ϝς F"CԎV%kt8-dlgyX8[(2[`8/dm;{yY7@ 2S,cq0%pY ߹Rb%Cwy#S_\؍OfJw4cuݹeE)s=e LUd(0ӹ-3j9 Zaſ+!YivrɧLJ^㊸CPP %9XUWDftS|wA "\e t#(]E2N/{@S ۫ TM` T(^q0 2V-Pf)wWMt7M+#] W3nɘ}TԣM0d%f];KQpN)O~m\Q!{ŶE~klQa86NPsLB֌i *^Ь);F<["U-5 M;\j{޲&jc9Va%-DPghuhBL Nf?]Ĩt A"8NBŭ&Tw0 ߟ9Cމ#\Ja[eǷA⁾w-#!,Z{)MN̦Q V%  =~1d!  DAk[D`C}+D=Jee7eK' ̥U3 +4+hoS^%Qh>ǽ Ҍ2tsE$=h)(/@8p8}=Q'fxtМgcC'e7_I"[zja1f ߽.*u5tM%:P0C*4_DUy;;>BJ-3 ob< Y`,; 7dw+ J( ~v`=%wRUx~ AA]Q|7}ހ@3R` ugUG^:qgQy~܉`d[MTԋ6$dr;WGOiE* |#?iahV`)$$<^Af2R{w' ףcߚT<3f7PD'pdAn >=\7N=&njh7s"^p@cHBq#B^u"a 2YMg.ҩB`zh\u|iwpaѷzv\0xacZH)saM|N]Υ%NnDF^C2֐-oxYY}`Ocoe@5dt^nAye7P9=wMn$ښo)X%2r'f%Cwg&^73UȒG) bծj=M##|_j&(VPo_iWD m:YqioW./zLW,ύNWd|ۊvdv,Nmh^C(Alֲc mر  $_U8HhjC?8 5=y@q985] ۪`~b{4a tk ƔE~d\q2ɧsKi4&A] ڍhE9nTJ|0˞:<ݏ]FK: XNfqhLN*C(ܳVcٕ$y&aEd0B /PH%0יLrY:~ԁ`!rF#WQa;(lB_TyܙucAgr)sp(rF}/)ҡ\5g]=}f Gvk !Kkz#q*,_Ѝ>P+V0ItTwaBR7mOYRߴȒե 6Rk["(a`ٸ؃^Y^QfQQmGN]ƺ4mooL2dѯ-Zm{՟QӋs76 lFP5(m5Ymcď["ɞ`83qcNOn4.IW>9jjcfjׯN[As]lJr?V SkqV{PGk#u;[;!Є ϠfG+Օ_*d4͓*Dt;a GG1z=@I%„Ac-L2+l߲-a F/h+" >D%i2ٸL8fr|^T\#b&=$>EϚ8؆޳';mjzL7@1$06YؤԀײ8.EFƹ")FOSN kFU\xv+],\=QIJ>%'NI = 7rɈ6NS=m;k(J1wxQcDv9J FѩkZ(F4>kWQh0[?h{Y46/ѧB&5@aۈ=Ͼ?pƹZ Pb {p9T-=c>d~5i $Q# $k51{BY4|X6zT7_xty~L+(Gs]0dn.kcLk8CRx&Atwţp%XcH#c f;:r W♚.6̷$z:mxVwɘ@R#_06Zp%l~O8vT?Dvgm,9ژY'84GCoY% ?@^aԖ=➩~ގpu6:C?<^>7LWaW3+VI ɖʋS# ix`&Έ㺹*ގRv8c(s[CB[l03uirh!`kS~RΟ;HD`-KBr H^Tb|Z6e{  薽7=#4sYYWs;5+=ݥ̒5XBn "BQ˳6BeůwC6WVTR?v:Qʀ8Aufg?ӋEm4t>~9oT;@S]I@ _[j`kxz0zƭZ@f#y!k@X/PuE\AT#/$ A^Vֽ.&nXʅzl؟ }>o8fRGq7¸!HIc%!D %딷 Z|j9T8RPDǘUe6Դ`<ηfM_",k}bG5hW{=XqlG54ꛣԏ Q |Uj{g{>qr g ʛ6V 5|0TH@n (Osժ(z0'28 ?ޘ>0 I{<`c]3d;J'Kh 8XG^]MG򀏀2w|֐ADSX%p_)~)KbwlbgPL mC~]]o:P EH*ÈW"KofȁR'NV^^Z^,/pV$kO]YBt% Dُ&(N 4i#\_ E}d֞V7w/Qd)Dd^X?Q+<68FF jVn V)-^g+֢Aqt>yfal dx% ].M?v֊Ø9*DJgNbFSQMjӾ͜@\fG $2Ry}0;{B%7NwoGG!)f sIQKn  vY??I؋Eg0s; 'sk t!& 2w΃-/E'mʖ gsB^;6m?0|Kȕ(U^ٌ)W9)rQa0ygZ4Oo9 !}l+d3LkNI.CFcԘ<Fz1 r8Dc4ל v"dF3vr(0Zl"'+/p1.t=%x jZꙩ٣~?K P"^dztBcNSA veT*tخ#!M.U`xKL1~BFCA`kVtuqKHh@o)x rzֿ/Mthչꇬ$  ?kCY \zqТH X~dsB$z; PDW YGyGvy#QS 0*\Sorr5FuGTN3k21{@ Um@(K~{U:&B*ʝrK6%D La0z\!uNn]ۋnjb-7eK{I'ʄqc8VT>^ xX !~"Ejsvǀ?X^yF{QGTuц9ȑD1)^Oh$Wh>[F iB:h">(|fv 洂 $jű+*OAJ]v|meE/:uemt[oL:yY\pD2vU1 ìRS׎  ܚrqq@מ%o4_5;&.*nc(&:%CM! "y {)BRQ$vV= &㔻h\D~m-ă>ݷTWLCCvsОh2Ǭp s6`IP5]Tj$*\*x_.z@ 4/RG3s[]U|n~V.Rwf|O j.D*;'o|VV ]9*~OvޓզYckT>+|Nd CG BŹXPzHVdp' p_eUd&^y'^eA$+2k~ z,]deRF݋*AjG8o gKƓ$˶3KXq>JW;WY@Lݾɏ?ci"1t0S o2B#n½_}N6ȄFl"B ? _d}Ł Ҹ;+:SZ~渧BERDkH<³\iN 6ZfSUɶ[:@n0"aq%# 4Orq*i\6R(Q*MfgTQyi 6,< S&">P5e<,y4+} HLD]O?5ŨţPW\%7Ԧraj 1QoyUئ\m4\:G譴> ȱ|Ä]wXheXT_Afoɜkuv-ꅀbv''ȭ-[)ٗ vL-Ⴆ3_3Vm<$~clUQ]}cz5ONTbURtM/}A=bn5mJFa\pI/<ǡT_nlG_s1.P:vd`GkEPߙ4^ yR$'4(t '0{QrA8?M3hQFC_ۋ:̋gEf`pmG6lBS |4l$<\kF frzGz6L+Md=&I2J[__"jGwɘL-OXs\ )p nz+ːPR"wTG`uw+呬yoFmU}'.5 ђf:iHMN/ uSiYb]ubGƧå_ϔi?LÜ6cvd}&^7aBSO,=psɆF15bM`MYN)H_~˃j_W 4sd7ӷ`#C<g;-kxA׋cVwts%oéP/Uo(.Ep%և1='K7%Ω^c'wAbu)m~mNcƣ?o6lߏƀm*\HrsJJaVY9SN~!8qG>*쓨:Bw7DT`@O#Te e4\jC&ﳁkTr1CIR0!)zm'ƒn&2x6N彸'@SW>XX|rJppSO/D>͓ LOj@ܾɫ'(~G߇we7B袬Q hyLꤠwB!V7t)ڊ\e}sZ$5"U̽lB( 'L.Zc`WgOď Ѹw>) $ԧmi7+Q \Y[A8_7p4m/lŅ"X'#wk/~3Qo@S@NllJ68mz9˖elɐ pc` 7|.G G˹7c:̱2~ AJ'T8i\~ݦ*ԡEYn{YrkdZZgObaz )kj<8Ɲ!%XT_A0TC{PPwlj$ dax"( [D8Jġ&T4x~>-F%cjE% zX,_NA NeUP_J WH L.6m=јꔼ__kzیMWt_И"AR1XC\*5)`? !*[S`Jp ֐j 7kTps\NL;9Ć y vUF˙M#Ӗݹ^Q4#Mχ5рlK3NC7ᑊg;@3fr fr!6l45Ұ_besЁwn-% ɮm]FOU+8!.T;Ïk5$3*Zf_L#^:i×5x}>ȴ$x #7+Ḹz_b#h ]P]H^{aeHך"i 5_..{mPhiR! KkBؑ^&˭!\8>u1Vl2]>Ʀ|v&i22H9\@ *"w&\oM7tCt8 F̼lΉg3LR"uaмrMx bR?^6co[eyLr_?'2_Oq!$wiRA*wn:XQJ~*ΪpY O>,/屿H> IDKsmt>"R3<.2m>~Q~Kt$Ĵ3ߠ@oNO s4OϞV=!\\(Kf;E::ip{,*U9 S#&g<)%R<6jP ϻ +D;Y~ȯݐuC+lwMreRjL*C_xO>,b]2W˲׎bc ?67* X`.?ec 172]:-;ٱfDנ-y-TnpCZ42g‡+؛!˒]RN. )x.2F9Sͣ/i)E}gG69;,[9` B"#餍ݣ5Cy|ni{R-TlZ24ag &F,sJN3#$9d|CoerXQڰ[*{P/q7&&3pu_Y%OY@? 7\ۆ9U_틆*̘0}nwMt<:,X, jNZfD# D3(,ˠ)\ q-&d^~tjNtQcج\  ZJd3@{d6=j.f%Z+"/]t삡pʽ쏠ʻ3>>Om+^yܕ܄\ Ɉr^lb X܃}`[ IvSw\ܕ6oIGU`g |%M%DS(RxxO {7.}T}bUq|nbf*KRݖǓE`qgGt7Gشgq}[H)DJGh|'Hv,g DJ1ZiG5Bg}ֽ6⽴},LMۤ[YZĪ Rǁ5oߡ3=)LU*C;5<I#O X` `W=g*,I5~GxVJ6H_,>Nș`m&w 3ԩG&@uw$cD!@CO1všpg uVcF"bXв>kq4 m9G.[uˤfQ IK!uԟ۴fs&Ў]VAWLsڡ#m uy)_޶ak9*f7S Β"F%{OE-Q'+VP*z,z<oLTǐ b~#~ G. ƛ 0?y AhU݅pT&kykLsՙ5<xXѾB.;Cb_@u՟R:̴}0zB 61sa~V%T]{PL`֍oǙE{0`o[TmMd]G}|o_V,M&55 :) 50୶~H%^A.и JH$â"芿sP0EhƢH6E~ɪ] Fw\oI㤗Spn|e7me5iP Xw֎2jn±7+)ݔQG?ZwA䘦si)5xwHAWaH 5(E`$XK}j,I*p-!ovDkyl1Ѯ?(lP*U26 |aRǒ\]QMT7m/x.-7tYXR,)#C2˝@wB>ҙo%WJ .,QJH0+N&!?|l?Y®8c2FoNlxA|| '6 ȂP'8 Z/]4ݧrvwr\ۣt| .Kg >ߝ+b[f.WBx3 V0LZQEu-L#HgB82].*Y\ws69 F9 tZI@!g^r33)".3(f,&f ,bSa1c/̔\֦QUOᝆwɖE(Σx 9sQ``jȠ1zt/FKMdPm?T+\Ќ[ӜնA:m5ae%'bu!QtTk,xź[d %OxȲ^ǡmpxh4w]KvHIT wjK?BR)t.ׅA_"VVM;@hQ}#PqlJ^;{M6K/ޜpۇ}yG#GtH1Os" bMM`pVO3 VҩFS .ɅW?F=J,l7 Ąe89v! :FsDr=Pb YgJ]7SD ?FI(VZ,CM Y[ǷG{XE~\}G| ˆ˃KilE|p6@u"Vd.7hޔ7y$4`MRj:*A(}2a;т ~2 ЮT@ PG r"^<6DVHHd6*u\ይ2*ػTogNBKV6yug&FZ͊l0 EV(5Vz.DViK QЀI(fnHYo:~ySꈠo.b8hGֺs.{Nr]Ӊmp̤3OBHܦͿtpk0))n-k>,=h0ޝ0R-XQROd"e걿A>av^(;\ \qdž̂*=P񴻂\r28roj"#@F@ͳTLWcO WHUUllѐsY|hZ.61)m Oĉ+y3|p*;RFv>'Kҭ|(-}o3*5jr]M=(./[a ~=$n1()̂׼QhIϲ]&QSwAs3. {E[Z}Z|7UWn 2]]⒨#vR4 S{@Js0~hinu3Y;Ǐ!jCV񋶳 w BtW N*%$W͞ww BN~X(vVr? HDs^CU/;rWMlC"<thI ժ7x}%~[ntK -gݩ=|b9,Dp ;?ˈH?3LQbmv2^R6Oľga 2%Yn_n蓾i7VK\V:ZzĈ~Lіnbt~P\P'MR9TiѴQ$.ی-ponR{c(@}&k ,`w 1ȫr*ܩޠ`m}wsz" iwK9i>zCӺux800JR8HpeP6Ժ՚v^ SSZ_U5m:`;tSK_;J)H#O4FmvxlI{Zbo/ Fd 7V ;9}Xo\E;Ԯ$9 V頖'z5 Js,7X"srV2E{JXK:@#i&.&v(:25,7OEőEj@plZwʌZ+x7vD-7 \%YhE7=AF4A;ӡsQAl ؾ$U.&(kTJ)zǓSm ·j~QvɮA*xS#Zg 7ioW#5yq 0>t8@xvi]>dXɵbl!N#x0XY(z}r3o,UN RAJ*3,G͵ π;6߬8NmiywPd;*SBi䶏!`Ǿ“0Cһ'GBˎUXRx4_;-m.@87ߚ='"ʃ-C~ B#/9(6%D%;& 49..B/ԘCsAW)ay8Q;kVղ/$w,ʖ]7SmK4A܄*0]zVȌ9٥cM7AísYYR$Os d%]9 T• k͟ӑFքxGю2Ռ$37do}o\GͰ TqN -_ ֺ#p c),x_vM 7f[MjA.i~q-ALM3ŲDC=Z:Aʵ ͖GSK@s..iegA ZB́HV-]ib%G\ˈ4Cowgqۈ=t_$~\¨_fֵۋe~1.l;@*֗bpZvQM#&~.A$ѓ~n-V,7 #ƀ-s4pZfh0Ԑ<S-)pkbn`i.sKVC*rwOQKHjevܺ:n-#Ol =+x 8**ُYrT;ɳ(n00E'TsITƇp?펽S4G3tYdn=4e/N`zqi|>]69r0E *NryLW_f^U *^ܜ맗}{V!Kkԫ!&O #rf{f&)y ZE$Ivp.$]aLYSr׽TWb2_2Uρ.~28dC"*HhPbLk"uC ؿxm㖺Lѵ|ņL>@,E* 5;"msBB71* +OH1YIN7'J՘sZHk֒69ҽn!а<P"sL?#x=Wsu>ʹ4jRX1fS^r3 Hr'0q7|vy G䝸e =$Ljy%*K\·ЌzMI*iH-ꯙ8zS(~Q'g Ή O me1gdB%DCNg?5?™WjcWzl XrdQ{X\W^D0 Hȗ̯VGo}6'98I Vg1,=;'a =FA/'ȌczI|M"PY{]ΪX%G {fX3ښObHjNA,nX6X0Тh/`q?U^h dD,V: R%U,GCF^)bۖCuݙjԶlAcPzVj;@ifVJ ;6i}Lt'څ[-N0M P^-/=X4UMP"򼃷'߅jn:iZPOlE2 J*׫,Ka;@WTE4^U4^;JWqkT~ۣO{g7;"`d^-8i=1FeŦ{Dž,=8BpL&B׍HJ=:ǮbF@[o:\] !@MrXt;5 Rd[I$yJO6(;@_}2QptYzi!k= :Ēք1ً1CT=(kr5MyKC8r5bq q+fdبV4}R;q ޛ"6ڪʹwo7m'f,XJFx XGgI){AhUZ"}D̑YF9Č-2XI\qDݎ7%DEXMhaOԑ0귾ʻ[x o6SV߉ 8/a)Eʓٯ<0Z 03ɀ_Gk3lŘVkmF6G|yJ:?᮱7MeNmb %Y)aOȁ'qo(: &4}!!nY_mYcpn0FNWB<pudToieYdMiˋ[U]$W(*+LW.Ȋ3Y&7uwv%a) KT /lgGdhf6F_7oZpI# XĩY3+ƣJi8}{ 96 Dr,-z0mF ܸeV>w9=2sqv2f+4 ΣC&iyr(V*~TV̙KiJOTXB(-Ŕq6Ymh~6?8j*['FeJL7h (ʻakRRWVch8o%v Ԛ\VU>V`۲yei0;D\ʙJ%ݗjY"LPk4Wa᧠'ۿ":wUfȌ$8S}sWD>sRLK98he6 +f+һGA^J r.<@Ƭ C-[p,ͲEe)ٟljDkk݅HQʥ}8V$WME_Q4`[%/ +Eg7a4obyZ=H:{+zoԈ/Oj*FL}-#ݴ͘c%+!"!`"pP.Qa]$:V`捡Uj2FgKY0LˈHdIb+%qƚqv8Hvc 4`=?SRDê*o /AڽL#Xo~$7$͇ ħ+*OfMh$L f&Q}ͳaX# YvDW)1l:Tfd6jOwA40[q1%FS XUR/c\?l禺:| {U~toG5eFR7mQq3>OyD{wd { N9圜42">Zͥ2W,kDs>,"⍦I/*KϊnhAFM  +3GeYTpC~:|]8f;pJLnyǎKR@1!{SJ^Ϻណ4)m'n޴zTGL{2S fWF>- *qd4( m.km3F 2{v++~ S R+%>d\)xAYW1HjhDR |m 0>6"ԯʟ> S?/Y|{(>WDrI#h@T@N\k z'D*G/Xhq/~ۣu o#%JɁ_־ǔ:[ݮԘ[xGl8i=1bM\SUUN7ư{8/ H#rYY^82zÖ ]#|%;9kOyD{wd18)kr'^ (J_o7_vC$Xۡ:`dv]a){I9!%Ưp5"R{#C}95=6<2nvb/p*UONz;#DHP7]$L2?ux.h2+kݤ10 3x;5ߖ3/3}]rcL}FY{|jhI0KU.lOڿj%aEc[l#YEQm*i7=FWG:sP˂ULM}ll?*uN_'P *bCy:f% 7dXyC9旆ЇozkikqƢ>9$]8f;pJLnyǎLp9-\UZaMѓ۷Y]=xI2bzoԈT"l`7ƣJi8rwgӦc^ǻ.CUe>"|T?ˆUa:YyfVHT\R^$;Jf[MJ§9D+Ll?*uÓS˅*ie'Qm2[U3LŽ(#&A#hdP]Z~McŠ٭ĕk/y<`;kH,+첧ψ4w Á_)FtT'( !>\{р\_MM؍ Mph)c}ɗ!|B+nY_ 1% 2j*)ł18yW C6́u@ ,y@kGblYbS?h54m U8wdΗ  V8,U=E`RMEM76*1B/`'m/K,̋ EeC&4S tdْ^p@OҲNZV.|Њ5#;8ў[E1O]J_/+s t8mc̈9#b{cf#v;SC16<GI026XĆ@++*{^cESWpvJv_LLC5L_ ^R370Ӵ$M%|F'm_G˵1-64sP,c,fHd=Gg^<_07rYӕCu*$R`FXeIZ"SSTf+tt=:̓6.kv*}O[>%o+&{[Q*AuV8:`:/6$z{7| 0y?g6chX,/y0WpWeb^iy؎N&"o++ݿ՜$ݳQ̓"˺̊& i\J:$vcwbQc~8߁7ƫ}{_1~ꤣ&uï~b|p3? $;dLVюF;8 q螋%~<=y="rF ^a!3|ʘZ\zV? :*7tuSʳ9C7^(;: _1>\CЙ:SQS]$b]eB) 'َpM/?Ȼ v#Du!0#qQTL4%/Z@0K&Y褺'Zc;Yp\}Ӣ*apC+@>vxќ5JͤV+Xn.6ಉpEKϽ O 찇Dc[3XTƀppbIX E*^չ׵SXzϬ,zG< g /9䘐Thb$fݢ'ŔԁWR7)B$S:aZGU\EdGhx PfZ( C:^K7PqَT1m?qj@P✻PV.Ø;+ثzAwv<q_\JG-:腺t1ʻC,@ 3cw/hgB{[Lo6ڗ'#idG{7UXӢD0yqa[aһ9a%zA4a84NsbsRues* =A!OR?Q-<>Zl񰢍%T~֗k8iP/oۅӨQ1}K}$5EwĽ<$k0/k^UD\lP/#'d䅁a,!u;Ӷqv0Lnت:H6\.r5of_^쐰6{YcA&8AIq[r|ԇk=3O>Aky6 eO-PxQ;fܳ 6_y@4sR :srt 2r`g<-@T4'1b2]'{38H s̹UkaN7[xs?[$ x*^k r8x ؗ"%7__JP)QtBIGdjŮ<^DZZBu8^LD{΂Λ-TzxxD+ _Uɹj hAXABia!WImbn *$G]C$lI0if`\('w5aDFsIuc |Q7#-|1"fi^Ay'azOp1maoj ,!؊>a2|DEiz_%%$O4Pz(Gy3}o{{Ov'c=۰Fzaa S0Ns3&[Ο4=qw``1i0 " v+ݍANG) ]ȷn{'3u>50'gmܥDRӐPm)5t0IIbK)w>$ĭ*7 (S]5Kf"QGjs2|obR ?՘u34vF+f0&>$1Hqc~vrpQ$~!|w^2 FT:6$>[rF Us̆9gńL>ұj󥌹ٶ.?B\=.KB!r9~6rK y(j",z+zlpA2Uaj%tsyWB &$\@h9uhTl=ng9$GįSù w:䥕v 13 ѭ 7AF96bu م6ՏIF VV}P ysJ F1-&̭Tpz܈Rhg%73E!nؒ# ։YLܭsO>m$g>FF:2Q){ߌ>3b՘z~Xu/˥,LaV6s5L3l1LoY2Y *WHu}X8Ae(_w4[hӡCKǬYsg`?GmIStg< x]j68 YBjO'g"Ŗ'Rs2ۣ tO+ko-;2?~1arX5][s:Ctv.Ii1WL |.tE/ H$^-@d 4Ln%SgǤrmZ%ql[ZD[q1a&iJ:+kIRcCgp˘D?!> 5R![(JGP|-}w'^Iۮ7{96^ 9+Gf~(!Ŧ\/W ̄C岷ZluBb{ OT+Lc'+{][P MQ^o6]ؾ𱄜]P}NL$NhmJq.Jh Mf,@lf'4dRQ׆Js'6lWpSH`bL҇VgLg؍ tϵ%mB7)sLQ얷uzwNvy%u5iVH ^!fpQ~3ȩ*Xh+*WL>w_R0f9Khomi udA5 QJ$bc,R"j46gfى)!S>Y n莄(ꈩ Nord?v*pbH-'RjIv ԯQPˤYvSLAx;c.D25Hܑcw -5i>] %8&A3"lҩ0>\~_i!Y*.&Ab g R{8_"j0+"4Gm(3X^w#nCWÐN4ەfkIVW45fq;BvB:Mh zwpk"2zr}?BEk|đb -Vz`!/W@Z4vp<].0)Z T3͢Saf: qx#pz\#Ôr̳=nfXmj3HE!i ! -p{Ꝺ.gjMaoKP[Oz[,L^ݕo#F8ޒ|bp|G2".sJtz=*谫$8C (P@( ބ7:#Y;e}&ٛ6MMvZqրT '@U_!yLϮІlRk|̺QA)o1.Ұ{ՇN/JpEuO_iw&"kWld~c[PByS,նwpBo՟(߆;@FO ~ɄpH<4kΑDr=:{}[^J5AdJb [ё*N^P=1wЌfUh܏r4vtU, >2qu.c79' A>5Z'6" ~2^#9d7ѥƳY!%傛f܈c.K;xgP۸H(Xq s(02Ѡ4sY.ܺzޒqٶRFCw< f$mBݨ`-"=k" s$NKQ^@"iz&SS.@dE:`(Q .#X-0R-_ʽD1Y]2) ҜNww{5}T1uV"4v\EMi%6]qȱO&w<$\5b3rB$ |'}d>m;j[ 2ϻ6N:ӫ`dZ{yK,w{A'@-T7<HSh^!WL(hs*U $TvJ>&nب^ ^fd< MWrÖV=@$Uu&zM(fx=6_ =ģgp8{3@M0^comYX90h`qjJ gh>RXv`QF-dj$ɬ>(h ZXx됖֖V?lPҶW&Hъ`Wk"%Ĕf'=)Hz@TL^,u0dquz5hVo?ޖU, d$t뤨Jb|SF}@@m_ic ȈmԸAGwNCyS Vf!Fmq,M0ĊǾcCbIuDaz訠ǕqRUQVkpX~.P טޣFK'"4}Vg}}A٭q:X<ô.q1)So͗ ӗQ}GqN+f@=Ph12^v{VR>FL୶cr.?M%*+Gsen773B瓠C>/ܙ|JVD(0j}8^=F L5<ވc;XA~#2:$3w Xt=w 08P_/LSj&XNȠWomBoӐudj Z< d)Χ.jzͦHMVSoyKvO.hE8WIswɀ I0 %dx|thTff/;|p 4(oJP&?"RwezKHw8Z 'k m Sώ~W wШ8U?&^nr\,6U9"fp|ɬ~,`z#%?# ŌXxХ_L00oM<{{B10vOA^M'? [@-ƶ 33xUb i(7R\c[PET9xp ŬʽPp*` A{U抃jV R x|Ը !FSP5V7I! m$S\84u݃?z\g3 p fbbZ܆SQN Aw#PB:]1tH0 qᬮX} Co s8s|-8Mt͟!xrZg 18 @⎒N5F 2uXsdzN`PM'4=eRQnM"$X܌cRӲ2YH6gvfPA,-h7zW9>m`䍟< C!N5(9N$݅d|9d/\9/"AE5]l0o |^4Vo2{y(= JK ߄ r&^sWNޯq3VsyHοoaB9n^PMϑ &GMbjW˩>@yl&{U~v~1KIyK4w9ܣCn&G1cc9$ @)o똟c%L ,⫆mFS8j:櫢i K7ĨS6pO˦Y **Eh\\pPg+,@ sMETuuf(]p816iʚHk;ޜޣzzmOG#<-6~{ &VlD_Ĕ-g>aA"O{ #|R9+}DjUftHRNGTɢYpF yep*6c͆ij߿[O7`_ޚORTґRŢ{R=u|7?SOh?8|=X.}~(M[~(alʳ~2Md-m nwY(۩jܱw(z,C 3EvgBTo%FMl>pxo-yNMc巒O/)l%iC ҂{ABQ~6S&{%?h$:W@mw#UgsD+c|8 Md!;#i#}`^#@el||JO<@Twk޹8m|?VaL%_[`[Y:y hitKڔ\WJ\.47AL 0 (c}߅8 P ( %|[>thTfc(-FG!W] *d P%$<1k")zavCSq[H\QVG39T\f! #LW+ZYXxُ 5[hZJZ,;uriEՍYtPDD7lQ,>:EEjL5̏$@G<E ;87A^ȾWk :\km6 !r.kWzԭ xLfɮkB[,lCߪ4@Ń8HDw21@ ϊhlWJB"xb"(=(_"j!BC*/+lDeR90-GPS4z5a8V/]^$oW4>~CUS^@kh/!w6?@J*,Ë[cTŃK`VtW )zp䞱HeGa )1EB/ Xel\ce^1!tQu&dNmpnGԆʝWm8(1)՝(JhMMzLvK#^p&za+?'el?@lHgv!~>'5{w_n丒mEu[0vUR {K v6y(L GL?ptE7V E̍6oKz gK0 v[V*m $j u[x(Q&gCm.UsoxXaҎmbs`?B8'7p ]_2N)rİpXf{ssXRʒTgL*ku` W:pMAnէ*(A=.0 _p/C ,P)hqlFrʢ)q}VjP9W`j.( Mu%(nyTG>XIy]&V?O3Cz/ ^b@ҚEAY=I#Sћ١ofr5?&dV7\Xl@ ϧQ),b v3`Vk@PrGt X#2?țuwX")"C\PȆ71sH6<`TΜ*(NKdjzL9+ }Z ȸ|Z/O(Wq#WZyXl,)}P:1I]+ #t}|B~Ɏ>P:( 3{>ea2f  qċ;ϒsxY`B̻At(Ϗaԓe^@8V/\2o ~[Dry'9>Wyλh1 Jg$1tJ/aMӡͫ\tfqT<(!ā:hY`綤<^5dܗ2 4*<,g"{7 u["VI @P _5[֥{L"tWIYDAkfTk'汉X':|9 W0:xQՁ:.J)>zחd\@##yLt>vqxI< Pߵ-š6pɲ,LpH1>ҜO1j :EN 8<ԡ4Πq7]?p校l a HMtyOLPJaB㙆I,Qhq/-JPRPL bHIبw$z>FNd ebκT1w'"W{abݖ ڐ^Hf .ѱc/JOuvׇ.EsAOj`O01kBvF]B4Ҟ/e>:4VLv/YaŞ `"u[؇dõ)rԈ W{Nj ?  K0ew򈏶Df5n]C -Q܎E&Rt"ˁF:')A,[jɫp=EeQRY^pU8)&BxZY+f a\Tڐ鹈{nFaō!nS_xu)CȵT,.a2lRuDk}Pj;-.ۨV ` H8;zwWFGX``68!?Rs0-6dK|xb鑗2ӻ[v9J=m/<)BlI?os%BeibMD #9ﶳCM1 qih|bi M*pzeE{>? qj,3`JbLWl&23u:9cN3aC"qߠZ3ȖAj'k- mDm;kB۴ByDJCm:y f >!bʲbl5ĩT'WcA펝[!C4rd ƆCxYɝg Pd)%lZ> z*VV9wYzzd>φ$/xG^,]Ia}i3_jRg-rkŗ* rtTmM`B"Q}`YPXvtDz)4b)FdQ{w<aM?&ƣZ9| plN9EJƏ@)d/:QAwOm+}oȶچPK5 vg}]h2H D5Y|Nv~s}b|@ ip4u׹s5ƥ 'k}̿3D!8Gκ-;ծ Ѻfe5,y!JV G}Źh,06kA9TA/eD@}Btjh)uuIp/r -yx)RǨ]zOh?P{9gfpXtd" 篡!~`bEǔa,)a!/}9a:R2r/4IjMx6lNKo, f_ Lccw 7|@8 /2J!˞AfThpc7OO^ !9t4Ȳ-g]M'LY@@01j*7ܮ\?A^l%cB,Tcӱ_8dP)Iy}bs (Bh7\1l>0˃*6./͆#M (D3H3^).z.@A9e¡3WLS"I zH%3F9>#5"+Ap@[J~*㸰}[bݧF| ZRb_*DYz$z%Ff0c1 nf=wF0?v}6"$j▸$P( zg zu 0kv[ـjpE3^mȰLL"nh. D"x6 J9*(#$캗?fE@H;Dpkd1-> ]|(vBq k˒ '*%(7/"ϵIvR Ү*ΉJ<Tw tV>-( rq{lE67 c܌rڤ8D;x~BtThpc7OO^ !9t4Ȳ-g]M'LY@@01j*7ܮ\?A^l%cB,Tcӱ_8dP)Iy}bs (Bh7\1l>0cL^n (i+ 8oc#I`2 Ttx68un\Ḋm*m4{:>u~#'3IJ/](/Ԥⵍl&ֶXT i+_\q >#Ax[]<8*b({0Ͳ3{n֍/#_@$Nr e eoG̪I>sB_f&8GS`ƯBڃ|/ԀOZߋX7d%C9#`ԃGdw {"AAŢ :+t5R1!ޢ@?kmmxB#oJ[ϯ!^!18%43BA?}:>?*>K>/ <+09a +8eG/?'nYSPk׎53͝ :,-N,LlEJY fcE~P ڣhs7b>GX=헓0K諤f|NBhzD~?À@C"Q81Y vp}l6'/yghUiG* FQxIp &v" ^- MbI]e>_HkGl&)1!]2 U/1WŃ=38HZД3WIR A&Kty.51Ex[s6Y$K_*1sqDA#W"I@X8cދWY[[W,FQ5iK)cedo 0~;_Pf&o+I&! >β2J!˩ql.lw2Imc]@)EFCM'LY@?xR V}J<.b$;0?x]mvChȩ.\Q81Q)@Iy}K0mwPtJT=R]$ӎhϽft4gj`A%e pzQc#_&Al߶ 〲VlsJU)5)0e u:kƷ&ٍ t&N|K%S%; Z4gynl6'/yghUuYFQH{i!]観fKٚMbI% /bDҗ[J =ϥTiI8c|̊]c9Q y=4hc3WIR V}2L)TFf/ڦd+,N|K@yt*0xlzYVss2v3-o 0Q7s|kLҀPeuw&u1DjnVB c_HC hy[o#PY*H>p*o܉qf6,k~Is/.bpalٵW bsz̶ w#` Fv9.E g@gmt~q:RN4G Ȗ4"a)M8T :%1L+UE1Y {r0Ӎo g^jΡ`mm| !Řr9J-=pJw?o7 V}JU1ed[s6YhDօ$Зupv:pe#/S夝Q_{Nf^C}O<E0h1122_nÃ՘>~xM(03/ܸLxOnCzt %_7B"V۝z7j5Z~rʨF6}/6~#y%KS`̗ٴS7VNL2)Ovu#RAN3)KztƎusP V3.Ns55mqzfԔ.X3b3j°蚗 &"K {Cz ϪaMޡT'%=䬛Eʝ`$`uNiO@.xQ{Wb2/n< FD ͪ׫ᛘ As3G<dR`NCj71||V5e=~z1_"JJI8vF8v14n|Q3OE˰Om ߧⷕDdH#YMH9DWAĄ~TbLIߵDx7GKpw]: 3P[%A#6iRJy`耽K l~V#Y~`5L0PAuBKujb-4-市D>eG1yLZj qچRWP32[úʗ%;#X@{oη 7 G7ldu_%K8 u(pu$ۂSgNwVdHh%Q4hK=5ǃ"3 5͝:ĝU\av -P~J$j{[ #cqAsF0 QPMb@YeOnu[CP%`)=y>^i#3b6jJF&%X](OFƓu:x8n(k=ī`n%0ni}9neX@_)¥xU=tQ* <"Cy2/$vR'inUP{ki).E҄,|GqCAj)LU~IsvV.'iZ3Ay51VG?Z1sHsS&DdӪ(g"s$& b-*푫\UԙA%Ƭ Wv5"")}(BOaqZ"mȤKS}4$ve;}&eO#X&d{*ٳ#"{/L/'͹wm3m If (:Gkԋ-#c6X Ԯx厤%WމOkr kAF 2uXs\Ry\J8B|]kX̬؋ I3jz> 9ɚS@sq:zO-7'0arֈ{"3 Kwɢ~Wg*ֵr>*cMhbР4 třD^2_#f\x;u\HlԫG쌻!i <^,V9p /|Vy2ohIËSUZ}@ q]-]wcK=\J*8cc_(432^2wh}ZK{t|$Ѱ-M|TTи疲*[͢8' <Ӷ~[{[;B ~ZG+ÐhrX W"wΠQE ()d%,Ǯy8v p1eAZ$kᔐHRd+K^1|ίH X:R*٪žqez{A{Hy]wZ"+pUz^q3|qiLLD`Ic%@cPŪ`nKȯJjwjit`+o*|T4ra~дTVˊGLIԦ2GZTBkG_(0M`18ʵDƕp4HIIf A>r.kyYȰ2qMziдèx|]32+;33?MR6F㻔7lHRِ2?rP 70AuX7[g6C{MU:pƈ{$1~Uw_VCcw&Pz_ۼ21(F=\஭LBK12o#HFZnh[01"w f6d;\7>bZ R[+lu1a*e/duUJA x)JU,L[kOL ̼=GS=HG=|R+!a8g*z@ʲA1˝݁l߀jh@z{4N$(ҚC&hd g 8҆w;Y_b!+mH&Z ę9{."umǙ4pʙSXjLÊ!Gx@=G7_sZY 5Re$F&j0r>3t]"^Mt+1}p aͪQE֋Ur܋ b'@0Mme2K嗑E {xf_8nI%y(i J/j0|oTaȢ?.(d}~kvT$7ޗI cIoc7pГ4jYtuYol i.P7띞AЪ ǨhE&PCgm=QA4HTKwcH U @],1cDi*3fF* ^'y|nY怏L .78O`{r FZ:$v e oԔe**Hl. s;TgE?扅ץF@"fcRMK:)t#l`rwB0Nb83[fT,ta.U6EB~^ xKMW6SMf8:BJ2vSh,XK @@)<@G آ10'M&UpZ{a {<(K؀Cw4NJ9z!qx]/~#_"`ޙB/Wӽ@FwKyBv )_7V#r/Y֔Z6|; , >cWY4bI̪liLߗ&='*\ S{r>^8$PgpІ4ˆ0oyKjWbo{W;`X4DOCoַ! IAlSL+%!g=ލiA$i Y}onM:U+R\}z(dwZ+f{ ˛w%놣e*g 1? [C!ha)3 4sp|X%n]>iĬ]~l u H,!8c}mDD1oex{]댺}.KWu=VfyX \pyig;sϼؔW58=z&tO!=Y 51dRDޢBdnQXd^X3G҇lz$0_-·ɫ|ۛG&f?Pȯ!hV?T@zeՃ4)LKCrrboTdCN9q.X}x{7iC5<YX὞ ]k=L02ݥlӕz;M(S&"?"CPv=h`K`Ӄ^Uh{DZz/53*dw'S7adžUM;fdPXpnl[P=G;v쳵[bE^KuYS|^yYs[vwW=2] Z̻4-OuC,3i\劇Ds$ECY`];VRȻNY(.Y q4h6uIBxG{w2E.$T1)3o8a@^8}vx>>F+ɛSk3ݶ{zaxՠz\5I FCx/av`R8YjJSdqe_1 -cVx$}a_W 1Te}xA?><ݪ<@yЋe^B:jˁc tm2'$C.\ Mΰl>A R\1 ֠ѣou_O>, j7wѯxI)?)R~{|]c:` {k@) | *o"8[,l[֚C4~aAMdBhI`mnʽ$R&.7`~=I |tR+˚3A5e%%&mBnnP؏WYv(#zfJ7ΞdqǎQ_ὰ iq :x-/Pqʔӑ51"8[C9]=PRzDM޽-`,=[TPv/Ά_̰;T]84CF09} Ag_5R$baP`KlgVȩ*Xh+*WL>w_RBd'8)zH&qƉ =OһiIw84SfҸ<2L߀1koCJ IYy5 Hκ_a'm'KydV0rLg^CFNBBZ%Ȓv>\,BR;C|c>>Ut,5;[1_q.f ]p1`?;3B!k6'"K_%l.} \J"*ݡ]K2wQf QW|oO oJw4PzI(mH|ns':b_fDLzj'wX D)9, k2>ԮS,H.ӌvNAc]A;XafTC˯ULD까Qmk|o'XyMxzG]6;>X6`ڐ;8ЏvTcNcAFBKf`bh|yAJ8Yֆ]5Y_"e1u2j܉z¾;XDZgasַ^$LTeɰ`3mI & gaϷYT͆[hulS2dd%?O\H7ٹu|řJĴEFWFUP( @{P?? j@3jNĖ&Nv 5abvʴ̫De*py[ԇcD8`U|nӭv$Lq4{_AXLU7) OۗZ1te#(`c7 TH!%ֳc8x< 2-ajSH3S !U!Kh+О1Tp5[d2^]DMYɎ ~_Z`C&݈Wً$_a!,y-JtZaE6r!tC`;@lhp9Sd&Ps\dߺl2"h^C/͔O3k=3d*MsGPD]k# wU)GcS0rkdj|!PaO90yB"_zfDǮ# A)rEM b I&BUM hos$9<7>vc=4vrGςsЬe_i# }~ vH FSr^*^MOrJO̲gǢpH2:\h0$?| BypQ+ |p2Kۼz~rP̴ZPS*6F!_k<Va엓moTa 1t?R_Yty9#ii |˗Y0C5K [dX(`\ (>t]ᆪ_x~(N4zw(t2if>ǀ: 6 YHiVmjpq>z7?dԧ^N7Ă7uklJ'PϾ!g=!"6SZG\kL+v!^kf,o j֋*iL mnCEA3hL޹o/n mYjVA=y~'zU=Yz$P :˨fj!`z ,Ip߇PbP-2(j_6[G~ꙟԂ-kLY/ʅ%,i!s 0;ߨ݇:$2@)fE4 ZK΂bLěbO tΩ0-h,xhŸGuO.|(/),q ͿGA5z0tO5mgB2NU@u+cA$ %p8:ԜYlzojF؂yAnkyqDŽ]uZQA1K2&i9)kY@.~{erݜʲ$HZ>xP6C'J oܭ{ң@ڧKn^hɗT0uQ DZ͌8$ȵNz1# CL,V. B{(xǃQl=yu7# g&;6P }i v!^kf,o j֋*iL mnCEA3hL޹o/elh4dD'_'Sړ 6/BHC -pO6!NJ08;ifS473-k(ol?[VD G dPm໕|ZThTR+uGUKɶR 0k sM'TK';^CIe eZfU2[< _ >҇28+TL5;5`RTG|tSrH-z˨UO&^ r23qPD]k63"9ď39le0[<ۖ0 =+uGUJ s%K޺%@72m؅{E_ǟ2٫Z.īgETSoA0(+I;` ˡd·ݣ2z(DۄKi(Ѿs@00e-'єF1Ty5fL1N_N^# vUlHR(u Rp ?QdIgZϓ5ZW=VV~8Tpe U@$.CkUm3SԱ%~@ZŽB$ʄ5|mfR,u1ft*dJHWt3~j7vh:C$cDϺԁi/9/_pB3%W^c /}`L'XIV:L޹Dl|_<1cAjHKנiOJ$@TX5; IS|R*Qjphb 5Rj%?'u%B2k#Ԃ-kLY/ChhzLJ.7զwJr (R̋dhgj sFDŽ&ym> +~烊Ql=mꔃqL!F>g)Bb-\I8אAF=YznVh@7kq:q MuU^&kH1v ~銦#[=[rWPF.Le f⡃:$lgǁ&E-Jsщfrad*x$)w-`z@{<VaK˨ɽuK91!oLd۱ ~ =%?PeV]W΋L4ނ`?PW5nhwcr/BɝGe-~Py# HӞQ}R恏`4aBW%m"]Ӱ[,N5=-(b58<7ynNW&fn/m_%% Ne!o?'&iCBHeaD^ _G͵%\O㠧7>vc֊Ҡ=CVgğ4/(9%%o:$ȉ/='ER=s@eDo_$[1(,ɔXχ?7 =u#z$z|yGRVy"f<J Hqm-yH= ;xe *ɯתrZYFz]!Hi]?rv$BZŽaJ)Qhb0Ē+yL%&+[7K{e  ^i? ZkY`mK-|ktfE۴bL#ncs[6T =? qxv{w|uGUs< ]ඨӗP% 8p%Fc/apXbcHQ@WKM'PҘi?Kp%~0BiAIj( ga}8%rA)p~]ABX4J1g%3jpdtc)wP|u'7TUQT֪')Kv$et#a8¤ ,ù3KҘ_4l,͎٣'m¥Jw MՖ6We9$\)@;uSkl5(D-}T2=hG!-vH4ܬ?>!lJ#*ers)(0`yhO$|Z"<~Jhq0kjбۘ94 kz h|zN"_- 3A AaᠽIMxҾNwŁ!E.;8RVkcXr4@avt,] )$"d-`Xl`gaPϧ208ATM iku/-^pT@7'pMQOsãL@zUI FB /ǔ<5 `fCQ.>bm"p~ K>h|&zN/#lt#Tz,q{2N9YǬ1 .V P%G&ʯcژ6GI%lX^ȿ[߈s~!ݛK\iy~#4塆`8~[oTt AJ `h%؂E+ǥeaiE  gkKSYIA-y w`+iZ>)k[jYt>^_8!)ʹk-|h',>KYW5k:P;*zƢ. v.OFp,#- 8rԤ'o`\`:<'E$B,1litg* 6>r)mQ˺ X#LKZzu4%c`иzq mPp?\,hi;ϣw#!&P[=gG!l<W龻Wu#c *s_crԤ+ْWIFǬġJQ&+VatR84]"{ Ӗ_j>z߀ԓ]!MJU޷Z|L1ڑضVpey\)<J`4,hu8Ǎťݟ9ATg&0܈ȃkKB`1**ѡSEVr?˭k[ن^0I08{-;^{~q+k=gC =]rjU;ž]^gJAoY-uQԁޢǛxp3F{r',0vV@}>ݹ|An`ϙ'V08}^ښ ز5"9[P4Ş0%L=y~V.'`?ϗ&4OۛK0Ap,. *6 1(Uy2FG[a;NFhwDn?<`Eq9H}@w XY8V;1&lg\9vˌwŐ {}5uSfd+2tSI]+ɼIJ:` k=; !:9V!c0JnTp&/xŗ\w+2ȠO=CUA72%F=@ር™o<XU3zRJԘV}.e*y?i>zd*Tp|yʷл XoYɰ8ȡxO&bWKtt%¸1紎;9>֓ItE+0ĉZrIjI^[t Q{=|.`Es.TܬmɁ@<'X y#&㇊QS @ 7^p lk@:]X佬hP5g>ds8%.КTBȫ194j,IJNۭ7m iWV>.N! L9 vyH6Sa ^?P-2tIXzۚa|9xǭiT5\@36'xe>XOUOWo'CgalDEmh {.ĊM!72Iy+6A|UŀGehB{j wwhYem/RA \woug[Gro6Ο6w r: k&n#n^|ڱRosW`ː +:$MֿBR̈́ayr(!ﴅCa+s' @6 %jQ}mhh3IPutrܥ6Fbƣɛ$(|{a;0И,~eL 2A&ԋ)ͯݟES5Mˬp3V&'yKor1F8t@}k7)o/?5ZMߠ'W٪gq=m<x^})RzƩb t}"佐qtxǿӇ$Lo1.eZKc z2OPwU?&#X'Ox++;"+ w,s܎/MBmJ HYO")}(BObm>aPoSep 2tɺľ+{A Zs9Ӻ+ӖaBu:[4yDA8NP'mvޱn_MʼH>e Tv("REz@BDqw#Ob>1;l3u$ne Q2D)}^d\P- ͱ͵ƟdF|~fxf@@Dž7)'35#e4KljعN(sAƮ0J&&lqhm%dC!%Os(nF,ב] -3_-Jm4w׆Jeș\,4&\לw.`2L4|{཮RakMZ3k\1=kg;[1MIɵuݭXO H&D7/jh$rkIF(r 6E`kkwda&!x@>HNO7j8+F fݸ?:Yd*bJqo9=O' 0Xw(&H CT,'[maF>.YO벞zOTAϺھmƎVN7CIFF!t\GRؓW3C3 Ey# کӠV(vL!RMնlI4JgjH4bkLWp .gH;eN:D+vO 0Ճ恖X|l7;IDs=L Y寷ᪿV &Y`N⅄bÚ ;y=;Ț M=E4W9#1xhoߐ2ب݁[n+dFE}1 qpG|оaT*j/}!m͹37ho0lm۲4N#ν{q;+6#f򐏄+io/-+R\EQ0©p%> Xm^&4^cVF'I_m\׷+aEn)kS^PC:8L^ 1QҭxbEPfW7OqUK6)5NV }&N텪 3S)FXm42׎kUkKO#;(3W$IԴ2qRI]b֙^pՉe =ng Wc{vJ/"0A ֋8fP[\֍5nR8v i5H+CsH5<3TN@}k0(k\Z`*?1pY+J0)&gB{q\Q|1UlwwY*2vyb@r@OȈlnb&e Z<~ *uz% Evwf}' G!7 ɷu:/F4T|њPj8I|d%\K5۸>J~!6 x.ubD)axU,6 m q@cE ׹uV]X$ɮ$Tb 2p+B8ى)!H[SeQ f|ô2A&1:gOھgʷwGZqbf9{Ⱥ4S!Ю&@%`o'ԏo@@݅[MUU& ̮ȓPl$xf'IKxt ;,^@L2ؿ@;&rdrf=g$Vo/Y}^˰`$ 0'V4Xn~.g u8\'-(vlH^u|B4Ҍw%A~C}ihuTr1\!Qc4ucm|_nL>BvE $8Co٦8TW@%UwƽõchM Xw`NYIӸ _b(A.0G#>,p}Inn 9/xlړ!e >0##īR^qsrZUU亠C=跧sAԨc7E/[B߹YWok8H^)4\Z-3筦B0l ˥\[ƹWZ&$&[>$?lS#w6ra!GK ŽEhEїC >Xm׮x/7+߂ѿk+_7֪) }g&AgwΥ?pA?`\Sbu%v q Jq%c Ti(y2yt9H'>(z|55Qxoͩ g| %pyuG={Kn]7rңDp85{8Uql9X<}D (<6گFRõO[gTFjk~YlBʝZ:M":0%9;fn1\P!\9 ,,: 5wRt5 bR-FDii˿l@kjz2FO]T~0|݉K)𽙑EFVBmH|ns':sO{ΈA}@1BQn%Lz=t a݁u}A{Oێ66!j (z֨j")IV`(H8Qx&l31#]mCv,"TPM|}k?qPݷQuoG+F6=Of`7FL<"NY?~<7 [8%7:Ags-)sTuZի7Vh;y cSGr{ Fw<0*syT?s8>yJ$DA'?~GS{ n.7PV!3c5i\޶s]\!!=! 2h@^%- ZDӦ9$ZXRָE3h;U O 0/G? Og;o%(-r+p$w^4j̖VX}Jvm/Ow 5J?BAH`mV8% /ԷDkYޭVq\ɣ`m=8.[/dJ)1p=*y3{:D^W^T?SA?fl\Ζz+< (j<'09II&sFY~|(+bqB{ML,![.N%A묄G $δ̠k= QE'Bd#cѰZBvtZsOweh809\֘.nVfb>6 E^:_r6x [ij⍠3_d4 D=?dK_wP,w UvT7v>1p4ƣD*>zoiM7*Ɍp1QFԤyyX'c{eٕ6tbgk2 qDwyu՛Wce|fI >ͣr2K2ϔclgQPJbͣ& 3 sƎS^6m،޸L|1}=VC_J+y0 9T+={wٱ1_o:%m;Ov}';޹jS] ECn/&8P .]v*abJ`){D*+/je;>:֏vtDIz^/lA}žSMr_%nZHQ>$! ϮF!aK- d-}@cf-8Ծh4CH {#<&Ua\+M"zP5d(6Q1SSTc# {P#H?rh%xk# { !8F@v;ɍ8 9Fn4ieZlP$=tM'ޮ5s Hu:tNR*soy :9gɶ@cܜUиA[Fhg+KAWyaPa2@2-ԹM^j=1Ǩpz-Js^}dQUcBٔoE_X{7oY " ļ_f%9~`g=q=eJ[_d]'sNuݹYBrs/g% \;}M"N Q-x  .<118ʴ]j0a_, LjBqϛRU@r:kvpW,-A.ST,t@Jt{̕ uF~^hw8OhUL=y~C$0Ԍ  wu) &GBoRJO!}8̣(>~FC7VՄp~)Г[eDM%wrBb0Й".0w0.iԽt)OoIW"\v)%z3mD'@JJykV6>O~ '/ F,ԋsTNsN#Ғ i=tdaE#63&ǎ8mPi!fJge ʁ>P ( HTLXΡ2T*}DNf9Yq&8Kw-C%;†g޽nD =Ʌ 4+_vavTJ6܂Xh+8_HlckϩHC d HWov9RѬ|xb!_q̰ZŏJkWV=v%h:Q&2.{D'SbL޳aqBXFM4^!`yH΄xW`b#|C:܍J?2q뚩hkS\l$mx[,ꖻXdۨ(8Tg0We6_w6e<-MFdHPIwiDͳJUE ೖ g۟|>.D(ϴ&Va[qsx\jdo~Ƣ&:1WdMg m15^L^9LgVwW+]gbb_ M Zz0 FɸsV/Dtëswu׏DB|xpMt4 s5('Tn[KRz?   aw/dxg7ثE%VQ<CIrr2" Y+Pl?Q6*=QJ۰(7,fwԕ?(P0]V;bMC34Kf{h迌Ai9Z2,Î1jV%-G{}XwcK9v2r6Q1Fɸ=ػ?7cemDʅ<5@C$O3mo m15^L^9LgV8S9eG#)&։ľ1<:|}1>CAS; W^iQ#Un /E(#M^!''%fShSنj4겱jաwDnNu~AX$dy0jmyvzwh+ogkG2V*">ͨ)~>Vr뾜gMK8XjqnS8֍#KQM:\sתlRT$YH_lTs#͒~Z]H|+.wWΩ<͌?"Tm Uj2Ua ހ҅pӞ7wC =qު;$BY pLg6+kVEvʤL2[Wi$-9'+_ԑgKcx z5I(69\ j ߁q krФs'%eJ|dIh.&a[0/胡j \rԚAv@*4 Ӣ"3w!ͻ™f¢5\9 oo8w$3u&m_3Q39HK"zY(㘅g2˞Q@$?Ԕp#gGbx33[CJn{bܮ/?J~,G5#v0A_lD!^еk*fjb(-DZJ>ۻJ@k&.3 f'O Q{sSlT.(8guvNc2VH3# {7Ysy/w*/b@VԹT773z=e-\\˨+cDD~QnDvsŠfaC@m3IxMU;뽱rT |-Ha~rM*I,vBB2 ;2t4A߽Q uziM,sj?~НTnc=;WJ @v`0Y6^RP-bY<M@u g+5+*1-3\IpؔxJQMP6%N:ԧPN4]o"msrLf ҧd)=rSEaV3aMRtѴw*!З)=)Z%q() t%[X䚊韵 BlИ]D"7I)̮8pmI|yn!qE=fwx|(j8B?(C/A\[ \R䧇)- 9,UʰE G+BN1xɄ dzޑ $p>X\SY"$X܌cR]Vx{Q=h+/}ӀVTW׳m 0eo3,fO]iK@7V, oEE^ģAB蠒MZv:i . |SY@dY꫎ Ty؛ 'X5Ò#5%W#l70j4 p_2TI UOnb~tn.FE|^і4q ۆN"'s23YFuscc:OOىm#ȴZ-k&+  fuUɊxpm' Y3z`bs. E$Bhk(ŰrV48(h(|ϧ ZVۊ`|3ZS-ӹ]P1TXPjD31*/M;#.šAHhAO2^etA黿@VgWJ41)j[7q=JsfL[u~u; /-HeL%>]nL C/_ԡ+O28͈^!xPw69fyil`dZj'H>`0xCL'H!A Bkb`m׌/Od{lO1 i&zHd--Qfyi; Ιgj;goT?' ԁ"De76ߏZR+wJFWK,zpHh|bNG#jj6C5u:"H!(B8ͬB-O $LK׻}#< +ۉ&f HYg43y&!rYi1?H3CZ.k g`h+ش |Q7_B"xТHd46~+|\g2W5$;Td2QHp7?YmNIRڳL̠ R&iׅ6] Zc|3 ike um@TH;ՕUH}<!.ߧҳwgOLu%(@ ʹGq}&5&bͻeN0O| LbR !hܥt8^edlkSLg[SNLhyz%%ɻ^3z'`$z{Qo k!;S~qɍ=,A.Lz\:P5a5Ԇ 0~YSUr:TG;j Ko>4 ,/׊{/PG;Uw $u/1hpCFOv|r:4̄`JÚ3[CR~XVKY<~ͧ_sE.^MH/*0~8Xrۮ*Aœ-%D;FeGi8^'e.'l|A.;o00!Mu*[e 5e##e^hv􆯎7|kveM&]d8%!?!(τbqݯ5Ȥ _5ͼaXS2#\,⹎UK>xNJHEn Fħd'iz twd$H3eKS+ff^"`{ u @nځg0[u ܒl^ %:EG7(oIXAk(OAĵULd/[ȢKb SA0Bz =F4wWUaʡ3F'[vw~ l=qӥ72kZ2\CE:UU͗Uw74XĐi)rh5E.ҁodap3YO`%$oMq^y}/1i?d~*}ZCNh“ŘwN]QG/8_pطHjCQ#T[Hp,+`Wxoĭp03<}FS*ioP|_nl ܔiMM!"}q9g]77[/kRf9g HKcr@kC^6H:t0XA?XőR[v3(Ro@wigvx4?\Te}zF 8muҭA(8E +,x߳>q Rǔ#P$=Vވicjk7VYYYa҉uvD}mYn8q PӹhxIh]1q_(KI&|y ^8C9NyV&dsw䷖_yO W[:Jx]s IW^y6yv4Ad{FOc\~YFGϾpe4Vf53Zu($! s_M1l*+ȱBraOJGs2Ny?&iͅNCbHMCILzp>f;~mE0Ӥ ;bD3CU&O!(.ܫ&ZO#a%qHy\Z ˞3(yco]h-XyQœZ}9ӟw\u.a )v;^sA^]Us0 9KCR4@%^M6j<0ۧNR4P*'gF`wLkKZ~~2R»j]oRH?2%2(p/.45,y֭U:m|7? <i"g0(M?9J%RB %f 9m42'g}8E+:%iF> ٦BrJ*/SY@_͞Ǚ!C++gy^[kd.{Me 6qIEj:9Ϝ]14JU'_B.op!4:gC:w\ޜDWl8/eLe%rX{n^0cC%_7?/,YX Oj![EYo*6~CV>&vVC18h)ek(mY?ÍTat. q#9bS:TB18ޝo52N,Q4 ΅nT@jVt >d$gAKNg-!^;SL'}VoUN盭קȽ B+pN}&$£e6vF1~+E;W@L8':AxD'`wIcT$0Κ>nlrcb} hZ+?A>[-}tVC+M Qb* ƇI CfdYv du [l0 2k5og@r"`eskۋZZYSX}߅8ċ6'V[KCK)X3x8ܭ,< RI%f[Xļyʝ5 Cgb ãkh~%^v:ju,0=3t⌙E閒\FWN[U B.?o"ס;*f-q%K<w&6vRyVahk:QZ:0H4f1Vi5M/ˍv栱.;jhf/&o/gfx.C=dƒ΂(!CѦe Y^yk6؇9* bf3*$ŔluÉ,^H8HQx?hhNp=lʾU=W-NrE\Mh#Qms 9ʀYT* qT)|Tw' n}4Wu^HF͂3j). 1[ x9%lj!/t$ `1wV`1ٿ9,\2&+2lPnYx[no>I_Q(!zG紻*x.Ҫ1kZ RmQ(YAst$U$a!~4qWH>`^x֌Xz:Ng{W\~M.^ßΟ|] a{Tn?8T)yeE3j(񅗻RQEU/,YY:LTmwUZ qetj@Z \&ԙ'xM| -4m?K`GIM @74q$8 I}u7U1W I d#ILT>7 59e| /J@YfN3^nYV{ ,dCg١({LwO߇T/*?`m+ZYt,wʨ6.GOg E& )e$ g<wy檤ө'%n+>2 ?D!kܵ/ډK&-g;M+ҮdfF8JU^gKVճLWǔ:c˘I҉'.ƃ_;6Y+ٍ 7-SrX|-Bs)}d 2aSbBW=pSu[(s\<󞋆"٤;?S G@N#ĖS3Cf^Di߈-q*1FT0+ۗj59E,YFLEu|'P΅ѻ|wtfž^3K']ўghH=a݈B{8 I|2Nch\i5w.uz7J,.>\ ܃θ.m+5IM'\?4LGe\Zx!GE&{FpNZՄuPN4sk}ͭۤ.*#.R;1_CSoAUhkA!_R.dB֗f d%T729K%b=3!G2i8UX!l^Z=:?,A ! MuP,J Ϙ=F0C=Aϓ3@Fؙru%TwBA ?3n*/K`h߳Uh@W7D| h U%w,hC&CJ9mٳo[L@F#KQ}B (ըb^8ͯ%Uf*P$%O"]8Bp `c˚zhMYM}[dk$V:*XѬݿśўUVΛ\J%2(OPJK},OcD(UT(-å}~aڟ fE(#tj2dKE&Ɉ HcW!΅d),<-F:K۫ qa VKVu"FՉ:R[G޶3ڌCy-]nHJ~XfL%ʕ薆)78hxKw>~] > Λ?b [nNǒg[%\@N,sB ڕ/U&K۠2bj"S BgvsB)4JZpE =Tg P0P.5< eaO.Q} s }@? _36!oFۂ# @ߥ/m !ۈlzƾ2.+EӤx'Nd-oQNp M%rg~DrPe9͓6g Qt=-(X9QhHqh h`_;>̓?-kp9TlkOw$aJƮ_I-8 m4"Hh :4~:A?Z9o+@-SmKL6=svjCZ _LjK9-} ÿQzPyhGJF_5ʼ'9t7=WlǤNk! p{¦l§e Af@kA9a n\Fh+E;Gկ,+vtL j&:q]Uyxޅ^2Dx]Yek`ۇN"* 8sV-uVW+K2?gq:?`k\#i{mNn|v)YTU&OdU &YjsT]0qVror"Io{(0!A}:ȀbL⿼$i!N_ &g0MY/ImZU cWB3:\y+.O2'OݗmR sK*B:6gǔY~]#ߎDiy8)+Ywi;ETҰ-r`;Qk!8;}OpLƮR(y 3!:uxfoq4Q ̉7z< đl~CQvJ70`=N|BO:"D\teHT"V`jeP<*:Y0fۙߞ39jgȈ*@Ԧ~MX[hPG,1^-WAdJKHB(g/fe>:2+1qF`mY wG^'VYK[ey de;<'kZc{}ot{U8h Tcţ#zcˎ;0R]dZLiƳ1c% R?@XL<xހ{G(­و𷭦 [Qrph> Avmg$0xW@\LJj\ޗߘBO'Ф.N/IOE\pwp 8NOu'!k]H(8{'?(IY ~BSwpGn19N2{bv C0HxuO.qi2Hk`Dc%mW~_ #a{Ů\Ɏay rO֎/fW!E7 f2U3PDCGzԾ墚TGL8#ab%x=}_9c<[d~ &Ko<]:A, Z\%*J}͖ D=ܺ]:"pI~YӍjoP;Uy1r$@ȥٗa)%( cp۲ %}tݩN#LKٌGr7ymyM9F <#v>M>X֧ctSsz0>W2ml"DWO#Om@X( zmhaelqDiL4@ 7F. 9wΠ5Ji]`ZՄuҏD,8 Hu m(^S5A䔧 b=v[4`бOCS0y Q% D*!L;Z>u z: zI"H EfFOe8=*%"-Zg˙q;.z"ŻApHQͮ;NP[YH2ƈdWy<;GCvƽ\xpSʣKpyV!zA&A(d:g<^*"Ķ4IгZ\ԩW>t .9־WUaT5H&!5`- X.x1¦;pXj3Y"]WXo:8y{˷vƴwՇ4Io#>3 ?̬FMw+hgK;9k?]L̙*TUK8yv֒[4d؅ndz 4$% 'hK? n%$Ol{'Gh3V І=770x[(sv@ͅJV3jj]v_O{ K'rEh=OB-<|ay#ڍFW67MlWu871[2B˕# ՝9m[T * B;n2`ia̘c>}=Ny, ^:r(q1"}x)3/omȾ5.P3IE)l՛%χꖱj߯ `FI3sE8saEp:qAZ#^;o#M/p&楬r=G-;l/S~Ąex&1F^)-s3A{6TVC@)Yfm;GL(+]I0 s  \?#pi?6MJGI;ͬ ϫO.!6$Mod>ouDX֪E)tWZ~6~f,(?uQt' h8e^4~AvIbQ7<:Pwx_]j`J1&]P7qd_&`qJnGM,2j}nǺټW p︞o&3U0i27I&/ؤoS]-肊cSl֛[`R0=3Ws|~Ynj I g!P Q߂n _)f”Y;`9knF<t%B xY {6mvQ}.~ } F*N݃ «$νZ;\s 2tevWUn7j >S>%)h@4Z`n77M?8.^G|5FFuF2^6}*?p)mlsb'%#_ TF# t+ y#Tcxr(qy"{E%$ߓ +'yb9|ɹЋ/fѷ]:6 d\|AL.5M:D% ]Ndke&nF5$VA 1 lh"[+p\(큃`ZgB<If QA&gGA%Vl]>JΛ"F"gb(sTOO@CDtX 滗1@ jbUK#IDx2f#]O\eWAx khJ0i&ovL<{9a, ]ZuwþZbVA WP )+%ha|BNw,|=G2P>WqK}cQ`/i8Bi/rkjee7e4Zg nt :l˯k ,q^.NVA8KY[ڈ8:(cK/$Hd A|6 3]U V*þ@̙)O6?g N K_jo_RG(͔l &vsOS'yKR`?)@,VVlFen~g)OE,ԕg>z_bQKǺDu *q>kOFboMl VpːӑH"0$n"QwهJ 'PRb Jf= e'2)b+NU `^m@2T/x(k3Xbe3ktӴGe(KXXv6{׈[&;,5C&*Tߖ$>VJRpZh4I6pfT( 3އ{Ny.vP^Ju/dqe_1V̫X-l+ӜFTnrƅ~>'(dns-ku@J#m-x^ 9-d-A7UI$bC68-~Q'VXҷ0ڰnڝx=KO9:ㄪpeT@SkQHp9[,l[C A֣d;^ GKIlkRt,ҠZp%7؊N(0IbоԢli_\/Wӟrc>"gE,<4Z2hG.*UpA= -h=nacAjkzC,gi̊zϧ݄qX؀'2+jZm(M'H;r҈MNtE~Y.ܴ˂.u3)0 SEM:": 6է.:qRB}pR2v K| rvz.eޏ'6yzTNAl1+S[TX>J^:]t8g [%nCVj ֵp 0 @✰hڎpDN FjfTE0pP+yb<ψty1yx/tdv$"d<D??,YMx>\Ԗ4ks'1Dӆ9X(TDdpɅ~w '|-!QHLb<(y>N l2q͈ oE{FfMFv-,&Jk**_'ﷂwwF3`L)1)&aYdI^_Ci}X\`t('Eq5~|"ھ $|4 ) +ڡ)dH"cl9RMD~ ,nQQўñ(OJ5җ]bA=eU N vP[ 9:G2}=R̵[ϣ#_16oka˛ӴɊwu&DYc< Oҹ4 w+%+3'9sJeE{FN0bGCX]κ;CcN0;: jOSi۾!KaV`ea xlϿh0LjᝊR\4 0t:u # N`흮i簛2e">6.%B>W݆$A&y5_ dhB?tzIX }a$TQ"`>V ND+u7ʹ^we)"%n& ȈML8pc !V2acÒ\9Ua54im!_ڨoEЯ$)R+!3"KDcԘ=_@(jfp(g@aolBtǩZ9rd﨧Ou-;mrxGc[ZnLt'DEIuj,J| HdW6a,"TT 5hO #$+=7InDhXÍρAqWHak0H(x߭fxR.,aÄN > p­A,/"Kxs ӊیype *+#AYۏË*Dj^fse,: 98i@x iIӒ ˻i;1AQfw塵adkSLc:OfRΎ,w0/ F NQZT†gK&a0N!v795s8+G!x]'+v~hWCĄ|Bo|c(17N }Z=23H_{v =p`~v}.bsAkz=roS) j5i-j׫Mg3%gt$xޙq28k|UjQ&:E |sJ=N9X>e6#dҗL_ibNk`HRPm8bH/?IA4AsLN8;F1r4+/4{s[hx^Xe_"$F\`왕L~-+aTyvoYmJ<ޱ`Js}IW H@I#rzt}4S6s]{&o{I+%~|h:(O4(O m{j%#ƥ'fL{͙dlD7bh  4NùadH/ !] 4ܓ9jWǩ""Ն>A"ªܧ3`T·$/u!M8d~%_aQɿ,} [Xd'Rb`#}Aa\aoxKsY,T4x=og'6]UУ=X8r) }$HMA ;r9}Z* MLhj֯P @;*S 鋌t,wa~N)o.= obTGg>&\\3$reM3~ LTZC^0Aw6Hx8b|> wg:I\"l('TfՋO#˯n0fOyE?1'$m=쟔e_z(Y<H#ۥVi70ʞ:XBJP Vcr0 ma/MESIև 9YeQkިѩ.mQ6w¤ֿ(eb$}+ϲKw I(k=p \nL(VcK҉C"olY"ڙ]\yEdvÓ%?K}*=ҭlG\9|]+ s2;Ec#A/D V6RV#]+ tq Kal~f_A7Ctd<]>J^؊N1)8 mk=|`rzFT[mb~8ϋJ2K%ˏ"1.<۸# sbroRuUњɕL;i Gbk/Pu{2dd!Ca3gwfqsѪ/z~P :i2g9I4hp )̰|DN"X2RT+!Ht~5ƙh DBi}aױܹcڟzu1,*)Ly܃\a)LRsn -FCYzK-x4u=Ƅs&!00pӣ-P[JaIIփ(/jרȸh0DV`ɖXNmIdcXRI۳nP*փH~տpsfbVr9V1$T/损u^>M'dP<n<݀8uc% 3^khmi_y |_C@Yf3,^5^F2"niAR J>B.x4MoK=Wmj`GGK . Tر< sBq{_optO5Qxסmw"6-ˬ#XnH[qXp#h@䋥Zƚkll2v1#xy9P-ofyЧl!@ֈGG|!> 1h r 4A98+A,O,n\\- >pihe=%GJkO4Qki9Q1@m#D8E)J!t7&۵],i !Be$6+t1Y!?M Rmr! ^v>Aj\Q#{49[r@snXm/OŋSeNv*`|SbMKtl>[B\Wh^T'iga'# #TnH3.Pxpϧ ?93o4LzPr(6W.gz+tqY+;=SHgYsVM.nqϐs)%nBx"ȞЂLzA{$2{rKUN2XFJKyo/8yA0#X!'jTYUީIWg*!I')24"=Re"-$pHnϟŶJw51C'nkF G:5#108r\w.-1tNPEk/dFHѾW SkK`<}1J`퍩t_(Gx9m%3ZBb?%jUT'M&\|h-Tr[VNXe/ th{QgAEv6~Xa_|X0z38h"1Cawf"?@2(;fްA_}`ڙ|gL٬.y?]"4 wB%v9i9Ӽ#{aRfcD)lPQc @KmQB)_ gbaΝX'`]p%#3յz[6/rRh",⤐ݽjChѨпAz,q i_XC.݉yu'8 yY(wMs#;grd $4((0!D]dUc  ?5D dN '` .<;෦dNbݡ;@8TqX@@2h@S \t 9I0Agj-g P!H̟ؼD?)mj'jpZ5 RGNsU:X,fqj嘛1}"uN6O~!ybɕ}6Y+0$Ìp]Yӻ偐f/pMb_(RK]qE{4SAUEzk-oՙX"…%1*˟ZedK+31`-|vzWUf H} 1m_.RZK KCGQD!PvrYp]B^a#lxlBخk B ї<Տ9Θ鯢V3Kd;a; &(Nj ܏;̬]gqf$e`iGt D 5OρJy x HĀRzӶd Oݝ]z|i~X|О/*v`Ys7j !ΛLw@?]w8_n*S,鏬v󝁪o[DgxPBT ̄t,s׷XE!EbHv3y(݇Wkpt讂U+[ &v qC+0~};wb? *k<KSiSaGeukb3\וq<{XZLGăk$5I(qN`{-Wn-w;2q+5hn;O_X3}Grʟ(VVn*ٸ#G)qw_OoE0YV%*V?5Fٙ~# l:ƥM3F_|]Rg.C$EXQ~u!$gc!z޸>iĆw;Zlܮ\6U5WEVvS>|HeuG%{%g`s/.D0@g>VZNX`P#9ye^iZU[9%=P֒mJ3R6iŤz7U8sR`}+Ul'1e!Υ041耬n%|pUU䞜L9 4luJƧ*i?+~hD@a˚xYV2皲RQl>pQl^o7u:[8OWOtի T^PmzQʙUJw\6/@$p95>3R4`rTؑ0H&.(} C%*ZLG{L?J!?Qus06&(ecT١e&P+F]<9^t׉ 򒷇`WMfO$q:B{\h>ڟv />T%c`jsQ<}jqŧ$"!*] <%jn|iyC؁;^E7aÚw-1:*"-hTF\ѐYݸs-UϷ賛X-gi6'(P˛aG+aBύי.ލi?T%I)-}9h L5\A@-T6l-'/4CeLV0(yrxӍm6=Zv'7WAxq$a Fg mfBZr,$r?ŋa)ulC@n&nK+=%`)$Z ?ݸd7`%S]t_z?4o7u{ jFefe4dSk l1 RZM,j^Fd4:Lac`8F=ё%)eⅮutqQn%zAGpck}ϸYS Ø|15liT2=,X6ေ:qsZOnSߝcf4ܑx*glۤyUv!`NU{9r,$r?:$0(ߝ^ ^F\¦ˆר;pw?* q&}:N&Ѧjh(\5@Dz|`ᩤ+Rˣ"+YK\~;JCZ0rIz$M3F-sc"'fh m(-1hf~FTCu*uzv3rMXq^U~{&7݃r]DOۊr0ӛ[C"Ƞ{z#v!!F^~ڀL6:땼p\UX%$_ 1\eqh8vijc}[ * [5RUѫ6.$ޙxuOS }gbݗK}:!~حArII%SLKŤ\n)贜^4¾e>fUD̶T OSY^B2ZlW5C|1SYF#Оdv݈ ϱ~Eb2Ao99*TOZ\qi&w+@JB'={uqZ2l^P tc;Wb{}/?nKGLNZ>4dwn,3p>U`3z,== mg҃S݇^}H'xTd܆(wڙbbLU*U}-"kM9]6[lSMxx{tЪ+rʟ(V5'ʯw\_ؐfFB{> ׮lduob \wCm|˩VR<+[z[hО;BK] .l p.>֗RO86A$byKS=af_-^;帑q߭fF}4&n I0#ThȒvqBs8(V xd1>Dw,aʎ%h⪭4|b'guKn\ iRRc*Tm-΂8^jZIp?kUظu.Vx2M Y/{e\bFp2돮fHk|ӿzȱF3`%S]{[zg`?u?ls 4]=7T|?_\-v :JCHjse饆3]xUUA(qN"Z).Ѕ i)ѝD5#kflZMW3uPMN5&"UVsYXxyy Y;/JMS pPMyΎ .[ U[9NI El!ˑdP=][}?x_m@ &uexu8.?ԪM dg֮29v|R>qpВ1b/McӟSv3-Uf~+U'ܵX-,MvuF*S"Qׄ<[ kMI KC4 S5>%X9-}ٔȻf(%w<-GCScvb:po!Ygp+닣444_gy}}(?!7.]b.کXLJ1|MR &\=䬛ºV'П] ,/|uSb6%Drk 7eׂl4𩰢"3?{ኃAh!}ɤ-o`Ӊ4ivw1Nmn֫WZrI!A iP+2г^b\Ă0!=#Xv_K9w{ے""֏Deۋ 79-OUX 9OuBfv3br :칿rfT/ϼy錾=֐p3I>5BT=ܳ:^n4U! "Dic6gon8SFhȐ1ltΑ==X b^GٌY, 9Z`cBg4x˚z yu4)H#pb0EiBu)!8[gm$/>ˏuԂ,fE +PdE0f+I;d0X^iĿ?RXz WNn$AGg~M$;&I"4¼L'2$9]P0-į@ՠH=(?Lm}`Ϲ*|XrZ-fI?Y}?m[,3eZTXff1<\4Dav.K ; HKaE9zY}/ޡj,Qrs Ty^޵Y#O #EOpM  cgv;Βy=Yi:ia@{iy-kUlJSw˸!CcZJc=gQ(H"[UTSyI_UUC8VV^BcғG:\D|B3:}nK)qUVBzs}2kvrYoWb`$0(k3AP u}f^r *DY+s5wn-'.ޕ"Y7-Lo+aZ+~&J5f#9ؗ-C{㺯S)wx[zT45郱Ii,Op9 pX k߹h7& $cr"'˿B˃y-hT5I7迺J9:KJ%V;`p/,Z,oCDRc ^pxg>?dqCPߋ10?b!&l1 ΖΙI~qYz-q[8G&+ x@-%+f^V&xwv("0aYl&8GƪDmռm\)^UJjŹm6n+&āN),'Utcwvh4.9]zlASx#LPEN͋&@c5# 4vEn ìh D"ف&K.P܇L5xw=gw@\t_IsJu~`#1_|Qk.1^SXfRRYeQ6jGP/@ܚR-*?\ 4g-٠_=|=oj io&KN  YGtY,Wn:dz뚳Wd3@݀MAo-:/=D L٨(UU@ΦCN^s:C!]u[쥆'+W49R e`q}Mj %RnEa7^K:x <&ϊo Gx[\VSTuѐMg3*7UֺP9چ05lcY*gx<-L>V23d5;h (|q1QS{B2dqizFѸ!R5ߥ+Y[_!+R^ )T] 6FE=|GM@;/0 DjYZvS(aͳcDډAtBGp/>@Y[sRI; 8r7a>bh/` 9)G+T!9饴󄈇QL[&GghI ]ko҃ /cwlb (wB}C_>8چ _{v|biu,m>"ݦ&=7AгQ`"AeSI`' y%\ /_Zqr3bJDxf^Ks-My:*^iDơ">7_%'6C#3b6h?}(݁.zvs$Crx-b6h?}(݂p)vqڀ=yM(HAlL $o<wM+HO tMr-\GR߬ʕ@ճ" A +2_殌u96].ېSP'q..HtFH]" LE6* ^jLlswJ\m>Y~ѣB(mDE `T~ Z?fӼ~1${@U178K0Ub,rϧc \wY+-w+U1e`[9^+{.UR5뗆iy7'Cĉ?NB&Zn;30 2cR(T5H̟ؼٓd㐉v?XyBҐeY7BY5M SgG/~(Z[WOP54^6@̡`O:LQ ?uQHLbCܒX҂$ǘ#E`oH ,LbCܒX҂{栍e6nL+ySvz].~nXi>qUbvA~ċ6 xU؍Ixm|Edj @9HZrPQFGKXjRoʲfvt@ ,8,1^աV$F,L;6RwCj֧ w໵<ȵ5]Ì5.ݺK9뜻z@ħWGD7 WlV , 6ae6KV堺VŪn؍A S`RfmhT Q:>]kUe߾_iM}Q;k)Rx~kfILga "9K1)JݸE.Ѝ^$>y}8!)GHszdڅd GGG}]]m^SNO|>7T2w]|8 .TV-63?n:am|ŀ@1Yqb-<&o[ s闰D,B ..ّowEp-BH-* d4GW![dnTdT#H=8h;^HrnI;0 @x $r{+趬a.]B$X?rV'p[pZ~E\ѽr bÈhYME֫S XX0C?9S/-5ܟB@{*]ͣE-?8=n  <&TaX\CՃ $qdɀNp"}Pߙ"t- rhX5a&BvAY] b,:q[Gp D&?d1.2Qٯ:*",0J.z՟ 5p4k_^-iLj@}Ncp2u)džFˡ2Μ zۀ7!v {Y[iQՆQ,ǁ&~y6Hv ;ϮyH|0J=T.n`&;5'{AS 1*8@ivͺ\2ɾqa|t?ʔpo%p%)A N?=M'܈HSӮzdӲ_ e$ټ=b=/1t"BDK/AaMĘs㎩WPsIJ?vlAjbϓ5{8=Zй|ͣF! f Ѣou"N$ۂj%P|[CI js4'nBQmpzYu~]4HBmN&xw[yWWع_O ~/yrfݐq<Cldi{? }<'Zzz+7*]Z/;F2uxh =q26 U8Yn-ja덗ĝMO ~:S}4p,H^*ܹ,ojC>*qQLM r-f{Bwfqw)9OsMwwi79rMm=aaTWIev:,;KwsF Ğ\:"݆[¾[>\meOHn$u;"'%\em@i?7JHұli'ōXFkyq@ty3[l Pbx؂{-T0;ezs6M-/7ޙtկvvML>EgvZuc2?N8fb "PL{ M~6MW1[dR_BLiY^]5<,NvM: nb.A ~+!OYKS"jr5 a1i!tDKj/Yڰe(#ߥcsz3(0IGQ!"U)߱nԞ + JK0ew򈏶N|Vy*P .-ux-j#p;=ٍS8q?xzz}SB(q .GJfypWrfb\'o.X#T:,R%*GX#o1GP(aja}[dt:` >Fa cP#)A l̪&P|h*J;5J&07֚AJLz\%*@Ҫ# 'NqFA^q& lh_WʶT4 *26RRYk\$?,'p_ `"ڼ"*6=i  ~vm&rwt t*|ha:|1A!K.I\!Hw 4yK| il }9 5uISnaAN<".)S;J!ȩ*X$缕h:(pg[cKpn۱߃]m!\(k[~}]ב&+E4klq {3R{%̜wTSܤ-}'W$׸v&/%`^=G|PLn;1&сUhA`$g9?!82U4\u{Wݱ<rYӝ6ze=)xgx\ZBpaM^ae1N=wP@OKbi▒0gJKx7lŮĻnؙqO.L8O r z<7˥*k8J#$o[8}b4j٬+0$ISw-˛5w PBhLBTՔp"ttCj?4Mb<`+;+%W &uS )W %kNæ h7nT1^"=dYJ dC&Yǯ[c|jkqʑxa\ns';ݧb~ͤ>uwot٥ ]zγA9^N5ǃ"3 38H'/K-DkQ3lȥ-`_$(9~J`6pQXN[Wi@xV+m1}zF)RT@8;蚷᪞-fxϮu?d]L䊟P9}h"ya`rE vugCȓzglqc#IuvE3t;j<b/?B]!"K"bT>;?k (Ụ9TZBgAϊo]z-o286iLrtψ"`15ٳjmbD-RU/MeXQN~A1I;h[Xu@|\caGyW2ތH_.2&(- eMp,Q.b 0y EKT7_&.rm%'M# m$ޱ8{F,n' _<|@޾|W1Z Dr"-ihN^4S_npD,&+.FO}c\5Z礝 y-vZ;XbZlKU[}m؆a/Dzk^z$cDo ;/+ً?Կ$m{ԧC.{3l8c75Ott#8BHx:0wŰR\ ZAS-{Z@_r>|'C(%w:Reh7|1woRj?'~ݴ %?O/AAHEdvHP^^5^;>쒓.d??,:H_z'GX IcZ6]:rn@2]eT6>x *кs7”#%{m'Mxa|ѯ%6r{jFJ4SW|#ܭ-'N'Y$zmREcyVgd6zœ-hmӗ@H:X^gp\F?x}B`T4ۣRY Yǽ,eF88 T$&5`4-Vh7=iaIyQ"UY'am@.l0c{nY[s1}26񍫏E<4Os7б^9D˷U NL_aNEgPr0KV*2= |-^IeKdMK :"gse坰6ͷvB"4rG3`*Qݻ3x8ܭ,<l 5~ǮνcrP2]/!$<5u0fRFvh1 UkFU6ځ:SzOdWQR@Ղ9ޔ{: }w E)J| Һ`>d8OhUL=y{8G3 a PK|&81g5Y̵[&6Mve[]8|C5C4sjtCordwwۻ~$@JKI'v.-\h9Qks}8|g{ס AxP#Z/;F2F}!񺥮26 Cxk$U6E+R2D2O&)V )c7/2.`tX^{3kYP"> Ms4Y"OC `KAjW۷с؅m@hg!'_θ`Ѣ(/ǐJ ܰ#`afƨog؅?Sگ5@p C'Jyx9[d\8w<,I"+l}U kT7j1Ab{AI"+ljFaPދ*g/ 2OD'd1;. >^ɋz~Vw%-:bۚ,Yˠc _^Jgv1S詖 񷉮13Sh/*78@:dT>u_8OplD`]e_ vOS,QznXw&@YSTZ*3XCkl{n0'1B+npY Q ?!"!|lN8mg0׸>p.H+ZiU7ce" 5aѡ)Ik\5HO|h?`T|BAk= Ofvn^UB"=n;PkƮ-d\>V rhumo[|,4a'p /$meW_]A;a{g)t^ABhŇAwdgOBJzqz̀u;9Cw6_]m*wu&S @ߵc/oYY''cT` Q1w<:0wܩ6 kLSO.'n@1.|Q8jƫiՄ$( (Q`Bn.5ߢ(JRS d!a"B0X) ݋,,:?~X CeYRFPU£"ond z:(B_]Riֳn HE©Q0ww`&[#rs&`9c`m ЪPˉٯd9B='ڿB=C}ӳ'CN\KⳒ^q)FbxŖ J4/t#O&TL̞Hǡ1w9!ӄ ChD 뮩>9AF g _  8R^T@$5\x RSO0ܘˍ? GL<7dQ®1s+ܦi({>iM}GYZѴnDVe &$*7C'9nK7j:P&7fU`Y $q~TdanWa;X'0(q I@[mIL&PܙpOO85,T6ZsUϮ0a.8 ":x@`R_{9U~51T^:u-s06-$w A.0kȃbSwNzU5P?aȃn>JOwޑH]VAtwQ> Y$Uf>ȑvQCNI|q*9qC'ްQg w{FT%;7ǝDYHs%fjZ_ycSƾXUW{5-1 v~-%P*;} `J35sAQVŴ@ OP_gn~JK}9e3u.`:Uֱeҧ%3C 6bKc|*;Hy/@5uZteӠ2\J+`5o$`L2 L6RRX͓7}+"4L+=\:ߩ 7dIv֙؇vS wnDt!pdJF6%/x4KaE\K C~u26~9NTxpP0 ҇Jq|5 A*ɉL7*UqQݖ(jӸ3| vN.#jt"_`PScyCݫBy`R3{~Vї8ŲSs-XK:hXLHhAUB>gק'hW?{,[x+pPȦ*r0PMnčqoDj[ ZB`CfR[d۬*0&?sqp[hest#9Ϭ*-Ln1 |IۦF4I \a%Y}qt[676]&Fp*ĝܷ DMoW-F`,bԹxGW]!$ /cWyG z_lDq]ç]wuհLK"`H6.#\owl!|w/L|]erNȝ C8iFm$vjd^FY e|YhmɳRk7`D6><ץMf%̜Pds2Ibz$55FJ<6f@,n519|zqbȨY/Laתwq~)~w&6?(kFjqFGJSJlP~T)r  ld&}a隞PW.g>jX_*o)׋5bȀ^m^UTlyfu r< "·rd8ubvKRS9Gu%2<{DB/З~P Dg0Ih?N` *J}P} J~y,W)=xSn=;~yC7f[T}#;HmEs\V?t@WN%NIο}ZC}7aVBLxG3I@'L1$J㄄A$W0R))ȁ,QM@}Ytw%͘Kr";^YYl5ln:]__rd%MQ>WXιU {"J<s_J5f+X^~V5PNv%s?>Mĝ 0 9yP`%FxFvWv7eS̈%mU>Ϋ'˝Hc Һ_d23Ld^08QuC  55L:#}44GHni7_ H ׷`MV+em*busI?/Ka}o{.L "EԲ֔myJFBD$qc ;NA[t@tߤ2kC\~8G!F)!Gr<$Y )oƴ^>FPdUam|^DW[UbjzY/DrECyj'AWEp|lIZ9D)/ ~W9%ou!dM 8t07Mw+jcfX|#) ;d#=d/^wlrURA_6ۇv1Ai$Zenҍ3_9tB)#;)nF` ۻ"c#+As u !R1^m-5&1'*WJX8Ev$'@i ,JE_ך`Cy BsX<QQk\%zoJueѵR3H+\DW>2l[ӧ3N5SSB^{&L%BatcC)jXP$l*[_ñT !OEETj@LvE>gc XR̾Cyިpk:i5"<^D{dꩤxj{* fraLx1@d /ۑ cwf)x,_Y{Xoք4t$#^'h3d߭E:# 0 RS ggmŏv6Q.'"Ot6*t1+8.1akQ$7<=kͨ"/dN"7,'8=ˆ:1uj<?|?}-g՘%؊P=,lVHmUʋȯv/z!d2)v0&;Zwmj7Y 9"=3$YANf@=Zj'#_sy!niEG~@^XVИiSqd1$K?wi tN3HDbldnI A7тUH^f+0x*Jg)pq2*.O7 :nj,y<">mj?ZQ\p]h@1‹2ֱJaO2sL&A$Yg,ҭC0 *2D@GE4gC1Tij6Q6.q_S ×sd4J̑Hq'!ƬHP;uwiL/Ǽ<{Z;Ov~ˎs)7FzD&ZBY.1=!jg)2ʥ4m1Vw$MnB:g<#2 ށrJXLo|\ Dj(bOm5!U%kO*]}/~V?OXTu;$r|J>π VfBD4tXc3G Ƈ-Ёd@8x,ǐ7Xv ;u@S ץcx58S8c+ybLCǓG`~ XHOz6ouslk_2Jtt=/fUoa"hفCjOʸtwM>+s8.#ny0'b#f1GK1.^E۬[1-mĤr#!{>Ge~MHdq@c$YäC|U]~elyDL)Xe.uP324Y7q |-* xiH%.Ȧz.2{Xn@D*xfB$$x X[[fcJx"ǬEa4Eb~M} 1@ 'D tVS۪\3v Q-Z/Ha#IcwXXLb<(0(4DS Bǃԃ_k"4zFw̻s_Ēgvg DZx@$jtOKSY0_J2D&H`M|#ި`|GlT5A@H2〘P=#' ?ks{Z F|՛\1NJ{9#: ID{2\RpRJ(S75n0[":+UʮS@аPxx T s x HJq.%Tr~#Ò \!\HRu+*^l.Z*0^`"efkЗ! gڵ^_N >xMCN7OT_8AM@C3}yR*` s˚VDk񔐙< gM!sLc]O 2`Uv}4y%6˧*9NY?L؏6{ [/ZLbנaȶ׳ן%=w9i'MS7T1-t5)qyI#r|*nDNz ԋ8/,6/\s*ΪEQθ߸$EXY~p Z?=J4/)[irx`$ o$ե\S%):ݼv1kt957'(P]B:Z*uM|#I =4W11+K|_ 'Й\kyxt<Q>ΘgRpqTZ1;3jhLeJ[,j0A/;7-Q.N>v;^.y]yHy}zeCL2*4U{*YXs^E `u/lddŘCDlVYHCs?|()CH1ÁGHv8J*KI]1:snJFtNZrC~ qo>Y\-va>DAi<z[~!)KQS"hc4sA:/ǐ5ѭf]-,(bA޴>.4@t&5M.M6) MU4GeXBL cȝʖDzj%9n@m& 7*^(qk oG~q -p|\AKf 4j b!6'v0Mle;8χ?8 kMi:,r\椒 P]'7)`X`DxtHp؎kÕP;plcMY9jZqxr}rԬ; eLƙikaSJ)V1Xd_.ö?5D}S"9Lo =qibpoA~i .bG#UF!l|P}Z"-i;JJ*ư \ Jp/-0)*Dc$ ^/ '|4: v=);gK*X &nE-/,%PHcC7LH"zNejWѿ0 _+SYdJ@$Iwqie8Lcx<\)*EO RT&E`W>Nag|TD%Jm@74_N71lZE*ST^ (Gvhe^{kG?lx `[]rJw*(W݁SMnM}q}"l~34EaP#8;h rtwy9eB>*3DdNP^9"9n#tfՒ> )!J.¸ D oPhGiTh:1)gm ^w TJ'"fmsNB8geqC R=ː7 )̻Gy.Ѵ 7%mzuYO젿yȖl`e:xG0A a.|4aKJ7y|AhGrIqczѰ;"UulRN: 5 ,մH;CB( lz.0DO&s .y9 5ge}mU䎿7/R M!b*esJAl!'x\ژ-y6 Ef'_SO& =fmhh~ӣ9&-B& гFCOcn['F([I'f5 W.^xhE:P[9n?֣W.&JFB}Wo*6a&ZۍoRPԇw 2Xd:bt57v6b%7ɘX2mk+FK"Oy ~ۥ љ25G) uuk<'{^F%%/ <@nʼc p(fBpF(}`A, r q`Q6;s!=G*E1jbp$Fš7.,HU@ɥCϩ/;÷@vQ z,DRFΠѓ.pZxFɍV%߶2,ZՐݦ,O8/'h!`8qk)T\3龄,> 8\I(nJ[O"\&iG[|feU4D :GFS vB qtC!Z՚›*0V 6sv=kp jbϾewguHvXٸZ9C|< OiYVt" ms5@Pq)wܑeBY"-FA vi&4Lȹ[h*eS3wL&,A7HSs^ n3( oe5ZDb[.$AXelk?KzSӾ1Pҝ1u[i$w_{jMr<6[̚;@R ⺥뉾<$ L]2wp_(|>h0]6>t%iPV$!t`+ml 5ݛ-lp, _1NjYA"bmd+7SjQK= կ_?f[7{B, v2 ycĂsj;>؎ֽ0A;ʮ E8P%zJcJ.5Xy6K=2|ϧf*T]%߫JsJ!@A~QOm;0'[-3Z;"A5Გ)ÿLX#s$6"x!^BV}Qkij*.ɪODOd妖l,sMnLUeK&$\Ixj,CVzr]d T^N`z? qJ2KnɒfH#p*% YuHS KQxI-Q!N)IRU 5nW|>)$-@nrKGQz_t [ECE Xvէ;1㬸ΣCZ*Lv4Gn@U׾Ң"Ɂ{V掴<1a½;#ODKKJ7 b9iUnN Q8J%+*MKw~lsvkҀk'r8}ܸ3.ܢ +8<!? =+Ly.EK:7g<:.AKy'I+koE-a7ϒ7fezwb.T=?h&INܪkX'Ph  WM?iٽx!; BJO6Ӡ [{ƃtL콐#IcqOH,Q6&v/"--k: $%"H37 ƵS,?PQ)%~a: uLĀWm t0tZ~0TȂ uY1m#h]p_/TXf_ZhMxУ2yZѺԙd|sBƴx苀[#8I~dKy0>&BUU5^"fyѕYt# J7Sܷ>I3I^E X2O8΅|v_BGV3/ŚQژNYqlx}q=ô6'~FQgdD~N;etUtӠ)jHᲛ,~]-PI'»Z߻j9'*Bj.eD;ǫ'{/C[{`+_’<9iR%&9?kƨ+pw~k{I7i2CSvĈdq0kA1!ϳolpS9)=3ʨFWH|)Nm0liwX9 E#2W`EcFuIabJτ~u+z![È<#3ɷm",cey8=<۱|> Ow.+O|v'#Gsb!{ҳDoV=:/tV,}cEOD{r'#rHy-!]nåQC𽝝KV Pb:fp6cE J|Rc[BurўXN+a84Jyf] 5bIG~CFN+ÙQ'=v%~ͪ5 kK! B a}`~;̷YN/2 is = x?ܢwKS/闰QOnˮ Lj#`' \GSG%/]^}}yV}oxg˅Zü+RXz4mqR8[A7yﮖ\ߣKgA2٫ԨsHGk~J39/seS @<~@5|V,䉆bDaTEsk%N%"0AIpuэ_MF/ 4=ڢH1 lSOr2-PؾSZYFAFy[y1[Mݲf_WJ 4e(T]]颂Ա|9?%68]eyg$'Soa69n:j,K s <1YCX4]Y^^8=I-isx|V~aSjfn'rS؆t/<۵ſ(iBskR y@6W樘ҠdmГҿ므mA0;;U9=KY]h6mE$OB4XݜFY> 5V}O hO՚hH^6F0dTeLs:sQkR1W13Rɒ>f[Μӵz߮E4W+l;+hY\_x`a; C )WۭͧN{Jt`)Ztø(;vlJ-w79%f]zu 7\ Ux:J(>(D9M;A> }s`'DhhVJvukHG 8`P z9C[9E <7)!1WfY.Π4-%lVo}Z.k Z>e;g?YDB'~Y;ܤ5tGrf9cye|+}6bA]{9)+N 5琅Q^cT^_3I)({v< i?^ x),Cm|$.Nf@ x{r3 ecf%&'oc(2iq~qWͻkJ9NB^Da$k׏Vl2]& G;kz]ߵ!=/6Rcf@V RğIrD4j'3r뭕܇u|ɒ0orZAFSd#(jCª,Ӫ_yޑiD-T哸DA[Ɵ@DWbX p *lʱA"zl:mV"l4Z YS9xi$LU@7ɳv &0.K=U/-o4jT Pߋ <_g:B2^k;sb~X<_>ao)ry/pUo=oJh$/p⬭S=i.5fOIEq kۓwn `},[=m,5z!Gn]N;!7" KgE_#Tr- En !æ,38"hWu5HuIs`D'3|-F/ V"P `4L4c]tx}o'Rv2p9?9!<Ѱǹ'g3"z$ ^d%5Q$FQrl6E-MLRZ Rzý:>)f$!e}*YuKǼ1YfXf]ꨨ6Tt c,A H01rFZ9ACQ3sYI۠ `;*ȼޟX30\}{R/۩wFY⬹CTjG#|Nc®vh̀9 cwyc}:Xn'îS 'Oٞ>HKkһwTl^E(A dX2W7u[7ҔKa8oς0o3CO8t9M;y5{mL(L9:HwmZ V1vk/pxG~@//rRl[Ȉ?HEr2~-[^X=tOyk J$W85ZFꊘ75p .+(#Kr{Z++k1-Ameղn BQgZ#_g}%lɌڢ@(O]&%+,ݽ=RH:|6TG `^3AѐDi:o?՜gQM-L5"b3QC#w4Z&%:l[Geۦ囙УAf?f5ǡ='kVy PWU$g-J|ԐݲI^{:aV D3qmǵ9r0kf_Ϣ|9vgii}gٝRC̴IU T#Ty`;LSs5[j&;_>,MT”eY(a?{-am)F ̌XYsyȄdۘեOА崇!uPF}9_: bKMadO jR<2 -?k@GhS(; Kީ3xΘBT7z*Dc57"F0wzo|ǨޭiP)8] A~.nqy"xCCM>gbl"r p^C-*^"j*4'+ڇ`8M(%!V NF#\g>jۑxޘ, H:M`/y[B9-tnο?`<7h!’)1:yEuysm ˮ\'_1m^}L5Z7#J~Sx3Ty)?ʂBOYYޯ` |'pErpнqRFaeDPB6i&^fXy01FYr.  4(~27$=Zir  {_$70K'RWjsd(gҌ w_ֳh? i$&⦺j?UtbAkT0,_*;{ I֪슌q9O& 7]?BqU-5F #&2>Iv&V5pAҭNk%N8\2D]MQi2%&8,>c'X|蜊]et|xL)'7ȚGp8<.0ۦe2&'c;Ɇ܏eQt#\m|2*ӼF%'s0FϸRP (kQ1ۇxmg \nr5+2gt($7)}iOt.+6y*).ݫ鹬V&×9rhX˞#N(>";FFw׍̣$ϩ+OIvla3A1DG:Z^`?e6`;N"߱ m6I$"!4P gn{|?)Exn`s.>E%D\{gG_~^gO?/c1ƽz?:'Ϛo?/[ 5?QzOk/ٿy??~Eu?/W_Zh_?>|ڿg}ovoO?}MI7w_?__Moy_~彁}__ax7_}5g_3ww7//o#/ܿ~d?.xB /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ 8ةWŜ٧?lQk^oB&&c}{ /pSVJW{E/p^{ /p^{ /p^{ /p^{ /iRSgY 291$*υ6SeoVۋ >ݛLoٖ + J9us>A\Z[Φ[qԥ+8I8 ~׍PYGq@?0 -k~{ fuKQ:GrE(9J&({& (;Ns #=zH(&p&ߋ_:&H:S;T I ]!v'[[HQFU+ ~'{³GW-y7O+|Km8 h^;PYE*gx.V:ΪjNZ[IX:%\JyILFjmXy /p^{ /p^{ /p^{ /p^{Z]ˡ/L[gHeZ\{gLjnEA?n8or}cJf7if\p*k ߨ|~kmj5rY̮.::PP?#B (S,(0dS2;k5gNBo07w}ZڷhR2=?DoMP='$G:90vAL%Y-\(o<ܨxћ׎((S!o1\˩ĥM#;uR' !E_Iք!OUh뢦[L+s-*ts2OĔ8T'ߪBeb$i@[P+CJ\ַy,"NL>ߺ6'>]Pyd塪;n9lܐtx+L`2SQEc5y߾Mi!ė0Ij'<;9X5&}SZM<s|(0/kURNtIZvψ(%;R!5~$|^za&š{ /p^{ /p^{ /p^{ /p^ཿ$ 2e`έ o v/߉DL7UЩ.] (g2 i{cFM!lc8f^$=uIZpeԆ{l_D#; AVmzd?f)77Ip /-؟/jLYSZmecw;.췌?hg:GPo,ݸТ ~Z@kTy4g%P&|-}z-() =0NW  -U?|Wt~8Z+Ri^ 2xX-#dݽ]a4l$5m8vgc_Ɉ;)]ﺬj=@g aPzQTzS2WF<Ո F B$}o.C!dz /p APdw$戶@n}n8PwLRݦJgػCz1DMeKYP67S"HoW)P~1Qz/p^{ /p^{ /p^{ /p^{ &yv"e$ z rza QDd F=ѦLI)l"h0XQaއ\xJ(텤%qFY֐zԬYxnD_!y6(aTfTibY%QKlj.,E&PV;M#jd. g{!e$JP9 ?ĘTόXfSѸ{ 哫r²Цɀ7yD"%ʃEϹ{ ">lpC%CbE֒OVbjX>|ȦE1^ /p^{ /p^{ /p^{ /p^?V4f]M>Ɨ?w<|bd"zVu`3լ4`=Zr8~no^ya?8-S%( ~KW2]פb%,X/VDCEv/t1@4%ŢbFP0J^ jYj I8Րs.V2B+нz0C xzr+н{tQz/p^Nl@<-3y^SW{V1^ ƐbVE^U2o#Z cS⼩}ӛqлA#og攉عx`|7o[7pW^AAQ`igb[j@QqJIFԶ3\cQaQ?_k H5DF;"@m;0<e0~ūW{H̿UHR{nr5d UY~3͹__c;*/9*Xm39J48sZyPY.2A!'zvv2&?Blx]IQfF]/Xj 6+ fu={ClPCB€,@<{]W@<+츯B !<^L!B:K/p^0l/p^0!*~żw #qthegIrwHW%!4"=0` 6M'aBV~RX+wIPFt Н9fCh&:y[ưʘp|(IBE %+e{ 56MA4,r,*OoAZ958SnKCXX^a.EġNj@*O*i >(;&ERcn^ :,,ƏJ(YÈhYe`Rjsc!.^cym/p^CjtId~GB 1\/p^Rxα(TOAAx X.'ܾ bca7;鞼-O]5 vRVHHN9-4@yV+P|Yip x نKD7]0pC2 y̿UEH+н{Rݱ+н_!<^tc^q c]Ր.>Yt$0={ ox GW{>[dUdx(~ fs?9Gp"yi+}t ᗑtH"CxxB @`,C:VC xr}{ xB9@;R^Q~&1+ra{ ە</p^ཧ2Y~ /F x❊\}˿ 4`/p^{_M ޿Ր.`Y/W@']H6"| !rW(܄p~ J,ڙщq`f#w l]禽wȉ蜳>;T$&V- qRvl?Hϵq'4Gs9l`,F~&q "f 0WfdaPJL1L>b+0A6H~A]H!(E6)B6 Lt!Ū|ȠDž7Lr E\:S$8JNׅlss;I*Ss!j?W+Vi"5!`)tZ #Эcvz,o'3xgQB\"; }`:Q+|ϟAĽ]̀p \U ]dؾxnGmǒ&a46+(}HhfHT@Z^:L&P.JG,D`(QnKUTJ:=F#ԗ֔bu]$bSb#bQ^6U)b <|C }ᫌ.xI1_7FSg*D,Cd1^ U^e/p^]+ݟp^zNT!W{oB^ E$6Id!e9@ ,:r~3+н{-1^ ys*/!_dՍ{ E1\9 o/p^ೆ`{7Sн{7Sн{7 /pY~н{x(ܻ̇kb&nxpY}{"JC xyjM4]Dz{ vL~@MݶxxU|P.xx)V^{,|;~ϸ/p^ླྀYI]ՐZW{9V'WWTbaMr/^KE2,@<dE1%yҫYS@dBwy11@.)dxz/D;r@'wVC5}˿/4 14uz1$3q^ {L+н{-X^kRzQm+ð؜3tVC x}aY"" /m^s xz!! y+н{U@q,/cP^;=@qrRb[4伔 _K@W?E%`ܱfѱ4eb[̵^a5HYsn[8$NPc`&lJLKaMIdn2G&P)a!(,*.c ''4'yQJOj}4*O=k7:\c2101 :nV>WA A6> nٓmB :*ԃvG Ek6|]6xE(:^e'MRI: 0Yf] 53]ĹcHLh{L8O~:uzXF~h W—Q8q;q%QXGo`QuD КG@(*c։zX.]qCx/\  b4YcHNMN`ў+B8\G 9W2հAP~A~NI$ nkJwzb才a7IIv_ȊV7<980¬ma{>x5ݗذ~6y7Hb2vpR;u:LH,df `RI)-E4wL>v@RC~ډĹ ,WJQ8[B vON 4H  ̩,a []+r5,+eo<@X5).B0. kZ] !fGBx >f#"T@9dT< kf&/4ulӠ)4O^mȓ@p E|'pj .BrE|AtpZh 05όRڢ)tT%}ѩ#A~I_zaǠT@`!}!smHVb?xYJDۄO{\ UעPEB*$Gh}LT%cY_E,7iduvDY<|oхA[i _szBv-:Zxz*hVƗ?`i/431!X_сVEiIF.Ɨ?`lhw-7SM=~cucK@D%Xs13p$1+\R=6DۅcK°6'0_5+\cd_zCS)¨`"; %} ;4JIT_1¦vp4\wgҖoE 9wVC ,"B:oB L151yX0wVC9d^{ے  Lx(/p^5Ëb*w /p^ruz/p^ޛ-!\OB> {/p^A[JYn,@<CeW{>CBuG@ "k~ 鳲vW1a]Ր5joՐ/k{1^rr7wVCls_!Sa.x,B xz8/B\8/fih6'H}1K|H]Eyf◌@<[TH >1SBe/pU!˿1\A2B xzx_xz/mNN^D+н \W{c:r?Ľ /p[/YytڙdY ODvݱY4dO5մۚWBL6KxSbRj\fTOw?,!!YC xsBl!^{S|bWz)ucYxW"xb4`/p^Jٰ{s{ Dxb_iP0/?& ('\]"ezq+Օޒp^{pqKн{2Bɤh1/ՐExWyY.~@ { ʳVC xpr:_C xzX5:}Sp@^O䧚-ߑusJ^ɆF hg)>u2,,[=vQ4߻t\ '{ 6.C x /I>W{Pn]Ր#1W{W{!q1^"j  /bh Kv2|挓y)溱uѣ@qW [<crq^\JـeyxV.xXv'W{A{@'Hϸ*YvޞW#d*@A98F/;  {".+z[Һ*W x>m!.d>sFI<]Wb."뗶н JϫR|dYYkg1^Q/B ͻ=q^ &"CB b ̏+нj,YX$1^ y#MX^{jR˿1ALxV{!G.A5L^Rv^5 Qm+{&;6p7 ٽʧ)xAC.NU&k^tj>Zς|qxaDx'w/p^{© /p^sax/xAQՐ6鬓"x؆@)xŸJʞR&`e*L`QYygpOBtܻ!=q^ orwrܻ!z/pX3/Ր"4a{ ԗ-NxR|#;Mc`g)5^X:!**Upr\g'ӺtRcr$4ɩ5a])Z/pF 4PFgbO++V lvUJg}H:r^4r r&!j*Mi!SE\_/HE fR1Vo7Շ L`2X..k˗垟7#y,574ZC(i*ToUNa?q!-0sV\ZȎ_H-Kn+b!Ml0oaIZ^s$[t"3FbPla0^[+~-ZA')aW$Z)5l[P+H,@rFDņLF0l֣/~w Ad_!,n_T4HvmэXoRZ RrGFVKk9~]޹A=:}t[_✏V#F k<+t CQЯ6(K1O?fNG.f롞{źAn(M5|txݱ fŃm~+bȓ*AK?͹H9q1BdS;IgUۼ!a EnUs $) .˻^ZB2rouY]A؉7fL<-z7$*W2 ɛ$saE6Gޤ;>CRtB?7xz,#6%z/p^;dY 31hd{yF .r/utۭHr?|Rkh3j>[q(nY7ܘb /p]CD)xRv^5K2Gd<(b%pЫXU %ʂxF%=bWONBK xzL3Yxz/p1t /mͅ /V?x4gyIm H)K6|bύm@qlNm4.nId9f2ە|P˸f]ՐDLW{{.+н{,Q)d!p~s|4B8 WTb`3ཿmOqKA1 k {| $@`"ўuY*2xY@<~>bK“񭓺=>& wx-S ^ཊDv3+н{& -H1(؍{K !H_ h]Ր?\fp X,N E1/G%ZIsvFJ"LJHǥŚ16(? eA?pWu@-l {Cے?x9>@9fH~ B! 늎Na.雹wVCsՅXPzcr* fjyEҸ|WC--A*tc_DW;w# lz 9 ◅'8'y~s|,<-GZ_voXC>}Kh L@7;#L:G񩈢۔]NbcICS} Iv/E WVc'ie]“QcX“Qg>̟9#{* . `hHxuFOW{["1^ \hW{a^{p]ՐQ+K@h &/pC.>|;ذnǢ(N(R0N,w2+쌮4lHܤ,OQu\^k>;OV~\%8b.):BHף)_v%<7ъ%9K6|dobüzb8ׅ9h\)'e]Zܝ3/A8vpsx4ީALȕxGMdYyÒ{ 8nн{ƻgAP$ +XCp/e!M&10בO4AWlQ@٠^٘/!WƁ+yea;(ɖ?>߹HKQS1]ȣ-.Ñ\,%D܅'΢;0w%_5$VY޺pM&T_^ecOwrlؓ.{-:pc) R͟[-:qz7RrvjjyލZp^܀%5T.K0+!Dx>J)t(Wkvn9V=̷=kubz[Mee5$Hv~un'"Olp  igbqV?Ɨ{IiMVf  xlR2%p$٥"+K3ג,BbI*gi*!ٍA= $ =IQE/c]6O bFjoZXwl;hƫl.](CLV?Y4mVcqZ(Ch a]Fs*Rݛ@K_p\K`tb KPc&wɲgG';&3.r}1.!KG|Z43C6W"#;@ .#gCaa2SI [4o_|Bx0'V0s 2!)6mFdz Qd6+!#B%KY^)C(B{R1#6e2ǯ}0r:j6PF=G^ל 6srȤE oXEk ȿ o6^Gn\W7knd\z:h,@<ݽJ°xģ er7y+L+:(%*mxEk@{.c7)kАl|nI-^4 |ؼ;s ڰ"XMţNmîf* NV}!͂9O7 rR,F`7s<yOUfN`As&IP^К.f`ƪ/RGTidybGp% WTb1`UԵDŹ_21/p^wzYyV^z z/zXuS"Ԗ,˿\Iw!Ȉq]Jq7z v9;-Aݧ˗4sBaX\p|黊6|guM0)4 \}k(<)C{ p/6t}u4JU% 2)>uwJ|gar8 o.pv.)>uvrދ);Y)ؘ NM*Kzq+ՐzK}{{x \oy5o eꍁ$!6͜#VG\WmPlQڝFc AeXOT$5j*(ovLD !ֿ󧆁~X7x-yQ&b%OMJV٬}[}Ú@MY$[&&y'N]2bXmDwD&̦@pGm1ޱuߨn@qw!9 Pk:&q6^ཧ2Y~ /p[6r@4 ]:ŰngDu2X;:8> $:7 ?vrxz !;Xk>}X5F؄V#$l[f~\dڢ|RduܗsƸqy ]Z&bP4է޹id7xQz/i/p^u]3^ .1J`{^u$Ɓ~ ՚$G"nu%܈V“QZ[tۭHr?|Rkwk◌@<[ܻ PB^Z5a{5h 1j'UYG*ui,>⦆H>dp)oqD _sjʅ6I!6ht,@qiUe*S oJB5Fr\ 9lssU4ϢMN?Z;S#{< ԋąL@̬\y3+jY?e#VnNʺ5=.1f P~ حS}h 5է񭱅)k;/ػi^[*[◅' )J{\);/:9k~{a]ՐSSEuhi;t+aHWX#*?W5󡣤Rn<Z]$&O? c8 eRmegJ'IFFTih5KeڄChePxmUF8tio41rl?VOAjQuPBpi, v"LXGK5U?k >"XNHZ.< @NF2RJemW.;sOubf,"]1f%*Xx;hEAGݖnNI5yj QˉDŽ4)>CڋS5c'fE/ NƷ\4 x%>{Wkd95(CF9{cS0 ͢,x*!ki rTpDC ,,[ﵟ|8s)_Gв ~ w9}9R;%:).Ş3g ȊImf61SHȇI|oz JAn\@YGٺ,xn??r^j~Ń}~p9 +ge~a%T 7,w= kW⋂svPtR#dS&ʹY @q~.-Rv^4j"uik(R*E WSƢ( ')溓QOim .e0z/p^3'ڬ1^|/Q 1qzS҅_bJ"J鿄04oػ1s`̐Ы:@^e$0s-r}df嵫ZȢͻP&7g-[uq9a2AYߤz⛽uVMftmu]2иj~9Ht <*L([&Eg(iˇShYO7#~É=Cynv.)=)ò)۔Id=c;4mDwD/'NF.b-YU ̋Cre~&pGe>V;ѫO oՐ,BCV* Xn8A๑+x7˶6=. -B!߲Cx*;|cY6 ,/t{!.7=4yJVX)U&p"WB~$'>PdXQ)Vu>-Z@SS:qp]ˠ#nn5ËxW]-"Ky9k.b-M~Nʺ5Ëgz5)xRvI Pkl5_!?{ \@QX,fjLpmzN+9HuܮXsmD&{`X^SDW[-C"ƻ+zہaBt.|Q|S46 Fؖ@a# Tvm0\ >2[hH݋p;t D86'6HU+qkdSByEZ}PVwDy /-Xw=cegcƔ%~~/hQmyZ(Y_!T|RVvj)>u`1p~s|+WYݴF^0-) E'ΦEˊ/p^2op^{\ϱ% =2Q# ܶ|DAĜ7EZ\v4۔$nη˭r HXȻctk u~HµAHJ9h}jWQ 5 y? O8yukErJAgINN^VVs 4(27 F2M2SΘWx |Hڈ:}1XF@r`G֒$тe9@ LШ-p 1Cr?w)]|C;⋈,j%E^Ե$Pp#|( *Yk' ^B:&QKXIՓKhM2)gDC ,h ` KE1.yeGE O OIssjd+]e簢/Ds2(O Aǵ-51V/X,&lH5?뇊ڈ݆9Hu8X)ŋ8ѣB.pf%_*0(%,&0+1@XL'Ʋ DTg*3kƩnA'TL1paij]oE^!ƚL/ O"nu ZmI E'΢;-Qp2 x%>{.w@>:trkLɿsHt<sAkɲEz(0=3r&BmiN;( `jh0Q䗠NXָdyUfǛٷsyQ3S wTz'51L\FQܹr Z,ch«{& r؉[w6:ٛNu/H0Wy;ng0Ѳi%u0X=j-hsԟ*6 bkcb-WbvURf?K{MXJOY35f?ɥpI~̡\MC]sp$%39h/%Q4)>5vZCim-.QAY8$or}IWxɽ{ 8β/[ɁSJI5p0P%:H7ӑ'&:j!K=2z6:G!4[=4 6. tU?s ?|U4蹼-o]J%c!>Wj3:hXS8ޙ[Q Ik iDs^`>^eD&d3H 90aI60E x-#V?I9ŸL3Pd?(1J](Oɑdhas4^$zS?w bŴI~M+;DQm+/ OZ&vw[◅' UY劽Ij9;5x6).32Y~ /p]wEoB]6Ba+?Ƕb9Zm %V 6D*$j XVhդbڎ:%Phe}aDX4G蜪G?~ׂlT4H_"#bd<NI*TAOoV 4CdIe؄3w,$lՠxև?jBK СPaO >4D1/VDA'IlBȎ;leG<ɖ#dK Cx7p' OT$#PXj;ieK~m;`1pvfPE6h KX "uik(: @fIw)xRt]ݵ&◅')@qm> K2Gd<(b z+BåvB̵ b9HuQ֌ x=(IhOO{~'5gn[ZuҊs,\h6p$30$WB;$b6͞lWktS2$@pK7,նK*Bچ7(Y`HoQLQe@e SE H5?#*-n"`9P"E bE_8qᄜgkt/t/ NĤXEr*La;G6Uόe3“QZӐp#xSnN,䳓/QOSu'zC,v;ѫO oՐ,4 fHWtLH-;H4􇕱aӸX4^|@dwQkON˙g}8ɟZ욾u%Q]$1tQN 1oь  Hi1aP|7JxѮZc>عf_`1qHB9:H`q-Q|Yn [>G,|E%7852PnoVdg$YrH(?^ YiU' u>*Kw=R eK. MTjNT+UN cR]w%0r$5kpReA#BtcE*M7; [?-єnzU7g hŒJ;zt=)[v v2Ӱh|obDn{{ȴ@ һCKԨ*] !ű;fk]9ܙ۔=lv/,zΝ鰤OGa?07geqРIkrv] aiGtv sX5fp:bh_*`lat;оkb 3Y MP!a3wʝzѣS>:}@g8A${ccAP+|,)a /(] "åsɆ0:a]Km9%$ Nd-w20f:c%CVSD*}@Hh#["TNPdsYO!ᬏC PKc h 2Qڇ1-Xwd`/%1qR;WhO/E ZB:\kFwdhYy9 'pi2:YL)`/pT{Z!nr{/p^ъ`"ͅ42\W{œ| Yoʹ6>Yy^{ b+<;{si^ཿFe.nC xx&OWԹ61+L<_$Z(fAYyf8:^*y^n|"3h@<`h Oze l x:!_@,ZܦEK+н{~~P#?hB_/y(ވd{$&/p^ߘ29FE0vX)1pe\ʽ7%BH$4D+н{ /p^C;.Ĉ/tO1{ +܏NbWxR=9YhxHB8`ߠ?m lo.x°w>6)sdoO/p^{ /p^ MA<ZAQ7o!W{  ti(tR Ruv佒/l/UMu@y^߰3) Oŭ.D݂~jW{JBFk=Q|sv< /iHHX-XFh,ܥ% /p^{OƩ(Zz%1P}Hsb mmBL!$.F xzPz۩2x? xx!R@E?[wVK;\t x'/p^{ /p^འ"i>EjF"۷/p^ཤOY~]Րsc'g?1D+н`wVC xz/p^{uxŢҽB /p^{ /p^{ /p^{ /p^GHُ~ xz/p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{~7#Yy^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ /p^{ ~ڹ-1,,]mr(.2(a!P$@' {, <ŽtKYA@Dim)i >G҉5ǽ(d;h@+<=i]{Ad5{`y5ݦޮNUE4(MGga(Zw_bj *!4p@DAUN^RW*CN]9h(qgOV,l\^XLwZ$g}t\«f~Z#Ewb$OaLA=qуw` M.3ޠ!|Pr?v͓/\wVo㳷7(#fx6H(Z$KRǴ=n6gBhan L-e˪`:'ͽL>՞kTKK|Y?Ф42/վi u7V1yksIC"R*D8id<,ctszVlj"I-QmiS U #m\]0P? 9% _tDm5qњJ3J.d(1zzxn);b rωDǫ)]fx(:H& Sd[ޭfUX~rcK^SZ҅IQ$[Mۉ0 VjU~"FLNe4' a LTpOa{ g8vgEeg] 79(D#z0/Opƪ 35sx 9,x"_Qvu%SAOb&9yҶJQ?$sIBt! (yݸlmY;pbeJ_`[o{#`gX{TNit)|^v ]ШPLM?uϱ3V /p^4!ħ$JvFp|<#Ԕ6pLǘi}DK"dl^k(ʬ=Ʋޮڧ#.9B2AӊȞ:pB}^$!?Li>jcX61;Z[X6t2[.X# ksm}SXrL=g[N &^MHSyDݜKT1ZkpXXP/y[XNm^wvEj27hk.˦ݫQϼ_d3f $*Zjjr3Hamo^6-bbOݻF\QvGML[v*V BϹ (A}ѧ&t8n2}{9s@n8Nbfg]6BYG!pMϯJ0%w^[ a:Vj8PÛ;rx#W>y)s)PMLӓN -v7u8vz̰p^<O8zPqfXTyha< 6qgDύ@]Jr&+d/g v.l~/HCHCM1I'{رm{X55m Ӱ;VJY5|wVmug,|䮕Gk{i=Cusbw/2#:1T7Oldt+ŖI{:cUu >{1UG>@fustU]'|Pez랂+=CXźrp2)B6IY>}cVD͗ƛ3+ iWq-v]8 0pyY-NJ-"y99QA,B(ڸ]KOl)>JjtɸB`{ݐeN,4Oӳ+X&x򇟁 ΐmsҦAq1g<'Y_Ѧ8-ITPQ*nc\׻:wXGmR ;W+ gǵmPTsgf@km=`sl]&'>iͮX436nk ]k81HHjn+G2%y~XmV]%cx <;]qC}O) u+RnO@/}qqJGZe |N8$DHOXgqc{q/Bfi`p؜ңʏ}=/U3R/S ڨT~Ry?BPx)XG +G)YUh8iX5$q85$}G$A֫:B}* ckHl1c+y0tOAY=tO-#؝~_T~sіǎ8 qQorQrjD:tUkLL&K`(}dڶeF藑[wDёVgtҬqI3yM#)7S^|r o(V6L)nJoGv{vEDqG7ZPiLހ%_@$9ÿ-wYoAT<"Jn])dװ^A?#\^c{9#M8j~bܔ|Ҩ~-Y-=ꙉ¹:'g"csclyJ h`PN\Tb2Q^T_p[HMr!XâqIMx <$'[ &vTtWUm0CW3FatzUyuKgYZ\TcR1W^ᕬ>#ju7u\m |1][\h.J ;Z;45#ERKgԿЖ!^&f3G G~&Bm<7_nkSy0nAy j@%QiKΌ $vv ֺMH$%Å~^0ɓ(GߗfH~jǡ 8g Vs>+4cnǯBG Q7JsXV1.BoCtN"0.~Xd=x Vިzm@ZZd 3|%N uid1MUO "tmۥ_Bڻp͹EaN*CW*MY7Zmғ$-S0- 0d²R*e72HW76e;OϻȄ'%JY19HJi@K/ 9UTJЄ'ZS\+s*!J*=kz+&31pcq7b` gc K&{+#v-b/ѳr{k':r|8C0w/DS J7q*쑵C/)3Ga\Uq,$1n`E_!oTw YTsN7xD;t_[Zy5m_xe\"{E XjcOC\D9+f$Bfk|Ǵb@tq g$l)Db c5iVF-<''4T= 9b5T:[Ys 0QugR=}WbK2z]JX??70CxoylPg~ՄĀH5:7ɻswW卄0F[=eUrD49?B:~Focœ2U8G,^5hתER7Ͽmƅо%> %!E/1W.״M[,(J6odhicrkTs=-q}#O瓙B U$/qPTQZo{hLм-ݿpVwxgF2fq+',^oAXݚ|$⮇MKd7?<<ɩ^שHHa/sv.GD- Jj 8 4ة Kxh!d%cН*D/Q̉`S . b3(3X\<.2X DrKxfTV,ME:"BEWSSPvr{QrxiY!Pm.t\e<æD9k&- k}$4%˥ϻE{r |spV}[i#{櫞sL}≩q7¯2M^GVZuzLE{4l{{F;*Vǰਯ74in5gs]ȂCNЂpy36H)Q0g:uI_" FOneBs̘؜K'73;GcΦb<{;#M?ݟxX ,^[=VNc9#!IR!1R?ӄV6>H -BxؔZ2Brr2%vVeHgiB[y ΡJo_Y&ᖇn#31`TCOJ$P&rAʉ"D)UF|Z,?X;"hѱJP[nHF"I|hIsGP~+|! ` hOԵcbU'IL;bV.ƛKQ]hT{R 7#Pٚ մf5%܉#YB iI r͞8S| k;OQ$l.l0?iCh8NBQAqx"BI Fn>~-o'`XP?]NtMԹjԕڇrDjb}nbS؟ӡz ~am*#u$Q{o\s%Șׅir}6cekoC\C,*{QoNQ^ĵ)_}w 6;Y(xZ:5ϕve#]M ]ĻN ȒlyU-pHUsa}K@Bu[P$B=!cхY㖑dPi*3*UT*Xrr>6n jӔ9CިRIZ}|"?R 4YJ8Hxĉg֌0W'K@**Cc.H)l͵ZB{&c,ŠJ~9R7qfPj$Z0&o5vӀBLI ;#f3z3X6 S%9(*ԷT`cT=;`*MSd`? V~zwR{4_g+)`1%Tw0ΌGeoK~͞.!L,ی:"&պЙea h8.3箖g]d!dfݾ?PwිA<_:ΓP`%$iWj˨E_?ȒMeh$m~䁰YOoM*E$l(0*U? SYQe=&A[0 "J5ߴ261,q!So'ET=vL&riκz_zYq\Ng`{УP<=' UseI slX`XgF[^V b19ގ%P&ʂ½3+R 9ލk<5bV-mBɜ,?@F9#n3Q[Fz)B6(:OFjdzKZ!qyh,+F>r|v- MŮS{SVy% | Y5+s j MhMhH &O)X <{| fmgоN$|P|Gz̀uGb|*|rph;8j-ב(]54ʟ/6nG; Qiio|>p71zkizQvG97Eah"!ӪV-/1^,:{]cdj͜}YFZvw>&=vdO`e -^`+cjXp da r B)řT= a5u8+f6UnGq6h͉ޮwGQ3 *CN)tf^E5ق]DXeÁo370]#HOn~GmctӃv*[ձMdfۢI)T^$۾IeBk`YVj e}_fGT!%S(c h:i$k74TOC <@G7XsxMJo 5>1o~vp{8+"EAsH\JXǮK:Ң _~oB(vSd.Rt!+>pW)R6f\NE g8|K%wwK%wy2K4=XCKX*ϐDCF:\^IUwO)#Sb6нCGe)L)B6ƐP`;^=SB9>|ZѥMaۯTIӐhc[\v|K޿'W5 W-*ޛy@SBM Xt'e*stل|S=3#Xdp o!{SI!OaUP@';Ez5pDM+DiUGԑv isT.p9,Q)Ns7߹#oJ'5k'QYT3Dt&QHmH Cl{uS]jF g*9*qz׵TuN>gB.j/q16F;\ 0mgO|V6KǼ$?a2E4V 73焝A +ϑ,()(* ƥMF6ybe<bAU$AFH-5r`Y#_(VR#8s $M@R֮5oQf]]jTl664R'j `rF<z*DP'Q{Eblm!Y䏛muƺIbH7%h*OyML~)Vm:zx&XY0{2?`AO=hb^ hδUzJt~ꘂpS gkeoU1Ɩ3\S/a$#K719t JS9[S7^ R ה(]cXEt5+BcKΔ؜ܽpV|9)k4yx[Ϧ~7Nl K^c<|έOXa L@dU\'B{JEuHe ntB%O7$z?@";/F Lt&~}\WvQ ;9#u=hswW+X@N!P@k w~,_ R6I`|kr-7u"=,jvγ6,N7]V?d($'v]{{r>~D"!?hk5(]Ki&_K\]4ˊ  m.Z< p>*$wI le9}ϵx';b^,Ua]n^Һo!YR.sm#T˸g> 2:(W3DSH{Frb%{+YM  >AЬ>Mek>HutMZYش"H؝] oN0ЌOT?d ;8g3!_E*C! "h3KGڲ,@ wiJ7lS֌T&,"Xߞ ) ѣ^TECED0JgUGBF:Eh(A@ulf_S,/IBi^$? l?2vi*I5O"'d"x Ө1g#VkkHM"ÁGf'V ܇ ~ɀe3$?M3xw0N1A50C)˽ Ya dHD# Dl zʂsP2T_Rn2%/#}&A={Ti\^ġl4o34pcR:J+?/?@nJ0e1!j1o/أ0BP>BTG1*Ez4j |*0-'D^}a+t@/Y5e 3r} 9nU!> $ko*E1ʲv )FHĒXM9r sLSCE._mh<4KTLlFs䎿Е;,N9|.׽2:soj٬RN#f*~[ߗV?ӆ|[)C1\fG8Fʠw;sCwW,0 Jp-Z-YQL&A I#`̂M+eу:Vv!خwu8:^=ݒF?д :7"0yǣ3/ˇKf˲4yP6|Tg4 *AUHz]?z8G-H>s)鱙m\BDHӠ]C>khrક?+ EЀ2 {I=3X5 3_8q n 'ָFH_D \ͿC:z_J-˻+Z|ֱ, NKqfhTrvF0T,tVrqKmƳXkEWFu]/ѐg̸c(A?M3^jauf.ؕjjoZ"+wJ+g/3Ai/[]˥Ox-I?[hV!kJ:^z$'B,@̞άց$M?,ֳj"n`o 7lXt(`n)@#˯l=G3<1i'rO¥WGh, ]aF$h``I!zXqI=0>ZMlAüg6Ԩ. ߪgu9KW<2dv*6?SoYrƴOr!"Xi{7O?mG+E=isWHvUN]ְy12$8۫ײ>i\AJH^6qֵC!Pn t֧f}5Vż~h"A- C6{A ?"@oJ&›z?#5j+s77B`5alԯ\M %i~z|?@ ,6M̞p6k5TM\"`0 \ x y [|+Ő/ s)B l3QP{L#R\ܩXZӚAF{942m-)iDW;t{`EINfC1eTR"맚tW$|,̊h6y ?T˥SV,HjvNj5BlDJ.53 `TAv3G\m. nHC% 8sH\}Xϖ+GOy>1!MQ uSiLݏ=a"uJKUqonS CGB@#K!Qx!)5mT:{u*$V4b.@:PHp ^4**؈g`z4_Cs\Zf'7,-þ.-^w$ Vkۼcl>E#" )d'_eZ5h,g>G?(-Wokc8),]ݿ Yi:Mzo|7X _ zz֎U`~\tT0 J?$r!gɺWN =HÇ8Ϻ (v[. ͚ylxH')md\Yh2gʛy-lanAe`dhj5Vt  w؍غذ'%q'YQpKŸj AS|bK& ?)gx6 #=CgFfXZv.;º&tePiT[K0+u}c꼘%Tmp zUtq ]199@\PXhӇo#gs7x˲L9b֫ <qO/ωQ* ~xa% 0?KS))]gN$&JZ+ŵh^rSyptɬ5ܣ1 :'*%gδNLg:N;rJgr"gl[G9yM#q̯rɔ]Uqq%o< )~Zޏ{9T^~> /Nئ8$d}=-aJٝq/;zteҳDҐSp;7xԼb!=iܘણ:LK׫vbgWrʾې[~~'x?w/fːVp@b-7L9sj5KfuWQ¡3g|66+% ,v[3 IPNy쫉r-VYͨއoxA,s G٬:I;ٚlk)&I^}MV`JCQ>cr~1B+0FrB, xBݽՃ6ů 1g"#+?lPxƇن7:az.]"L-Z07.r1YVQ\i?;p E`^ fvg߷K $T @?Sci?ҙCΛ1g"Ql%^ߜ"xk~3y[Ecg;5zi*?WBQA_} nѺ7fsQU*Q(*ӗ);բ{:քii13Y4lM f~~*dbofo&Κ+s% z{izS0j}%faN* 79=`|{KUg;Z8Av,Ҕ"y␆S*Dmp@r"h >2Z6IU/9[}<tBOPm8`.|aLBOB^4WfQxgY=qC17MϏ?tfZP ;4CKAaRI8ZJZU0Ʉ 5?$~ъQԱ-!?8 O1VjNP rf&Τ% lX@!&3gYQ];)~&"^-F>6IR<:دw5qVz,d^" ?7ASoZœ{.Zlқ@24_ %j䊰; gCf%皬 XY61aD-C-uOnqb)CHw_d:]V4m)okGjakoNQC%X3sӲ~j:b1LGۂY(~hf۴_ƋpiZT\}R=[q'֞ΧiA_0|sr?'ۖ$kdPbARꗪRWGA,ߝ0nra[?[|k_`šm=zow)W!l c;?mykWw{hEDK- 6óva :K4zn yݠEoAߎMA ȋb1*9G&{jG .G`EY?)u˜G6keq]q5NxJۨAl:Mm^R dY?Ң{8("l"@AP!! j/CO\ ) I8@\0`f]}ͨjSdnH@u[2HFHk)0 a-( (.HOccl[#U@UغXsPPRo8v8ۧ h %>5X?=ޘ]x 2O B1CC',BXt`{9*zFȚNF&(_%7 C t$m>a4L4AoItSeT CV(+AO)؆X?ckkĵ&{6q.U$5~ӇS#7fc*vڗvl+,i~Qo۪ SR*c= {ij7J&⁓О`8sxS-B y:P)92"e=ٽ> lZ*y-'e;7we7T+Y& :C߷'pZEwR1O+Gxi9|gVsdXm%-x^<8u)dsffqڡj7!0~ $a 0ҮFbU3:6\b4}hkhL-yARy5k`b.#"\CvE)cEvdJ<35o+maȵ #QVwNt&#:i*(.L/+2FP^[p$6ͅy#LBpCR E4G_^$X2k炗&aO}ި#oZ-9N1(;j;Jqݪ.0S)yY ][Yx%g@;܍ f}\B^&N !L="-M6^gM8mwࡒk+(#5%qAǕyV4xcզEGEbUbN3RU>zo~ùQc>4/-Iq&Hz l$Ęgy YY>qǹ%!@ۻ5V#ʷjj ngvu>D>1ܽ 92_,@ƩT*2$4M統e^ڏ ^Y87M>Pͼ˱Zj/W^vIcscoZ>kȱ>MU#t"D?r+48gY*x9Xo6Z{-5&];2"<L%A0ge'EW1T~Ec7{;([HPG܀^CJ֥ gV k}UؑƔ 2 (R!Q#݌$)z#dua_e 0Og*mn% vz8tn!@ E@^aPQ`ˉ!DOt۠?XGz ($~$?4[}hVl006"JE57v"G~'DWWB}/3r,MWXlJȚdp22(V'!t9/rvpR䜈㶸HŧpS@1eLQ/CQN껈T7^@#29+I> gYK[޾ ,+E41"&#%tž=Ȉz' %lC!TokQQQwHZp8 t3 DxuJuZHpن.nN(ۭsO $it22 }uL?]yn`7]}(q4fA+>kv OÛ~(I[mC08# wh@k1Z/1`Ya;q٘{ r UWŗË" bK!+2_o\a $fK5/pOe8V%ȕ7v&:1Kbz: Jgf؃)>=mW&2!VzgX8Ɇ;0܀@lE{-R"e 5xB9QԺNnsK#қ[8T&(*".m;SKzmM] ]kmװ/ͳJÑqU$0{b/*7BTE5DPvx4{2Zv\^: ڗe%ܿFO"1+Cߖ/ڪv4j1)qKDrW>` {- 4q$@|jeP]j°aVQI`/J-VZ k)L>>NZʒ\-n(_2gAXyHDXG6'.N<^"*8{Đ%4|HaKIjgVDP;dnP(l4v۾=EBص@/ w')/v'͉ $\V!ߧYLNiJ +=u q~̀ =nEaasSO9[{^A'OaU _| o,(ˌLv+oKs`3ekzjЮD')=}LSw4dPڡBHcŸ_)GA8 ]AY~~:wYGZ%ڥGh rjJ@?]v.豽H?YDD/yg#T%tU (h_V}-$u.zB4D^\EzqM8. kY nxKdqW,r۶ hNxlFp?2P6G}ˊ'&3&DH ]$UՀY0jutEmeɋr?\օp ^q(6°ϐA4{q c":;OAL;$EzOn2'+xq-&|)U\5ȵVf;)l <$i]͝W濮z ıdpAQS4b֌j:cĔ\ TGWyc=OʯcT_"-a&rÛP;bOK\m!^Q&"ܢ4VOۚjHj3ScB+ >2ڭQJLE z)n(̕FBA~?سtmt6uAcх)a&V@٠I9|,УI` ٛwL7~e>LR(;K6w8DA{FpRi}Tzf.L&9y).9M[]j( _G=hgBšc.?~gQ<r13=rx3Ȣ B?T<1"Fm#(63GbQ%^z۲IQs8$uš 4ʬ"{ؽǍ±bY#c%tklY&Qɴpr_:`'YfHoj@,P> w5cm?jV)}CCM|iskQqB͈VNmx4c86W:VvӚp(MNf6l>SД*Y$mͶ)̏;|^"0H瘐>})/?ɌQEH1Eׅ]ڰvLp+%-~`v-d&|\d֩T7h,DgJK{%à ec*G% ҝ7K8XXDUEL{1aN]!By {^v*=\CzZ * ׈[ܦE@xostӽ1K¯2T'@ɛ "=tnӦ$XfFyCW xCb2 $;:ZnI)> x#|*:QG ^:f dbr<*E9iEڵ4~@R3T=䧽߾^(5b.؟ P iڜGЊ9TEG[]XLX+o ]? > i^ _ќPp;pWW4$=󀽲)(WB^j׵ WgprNi;WoDbm'"L +>cc-#>YtYʺ~(06NQ76={:n`<}i`BA?Bi A`1taҐF/’K, \]\&DDϧ OGO螘UZLj#<]4 h0T Հ;4r`vFlx1\wv?LL4"kQdF5O(IQCƨr".+G=92sςA~M+l>dcȈ]б`[|&J4ZO roȌ:wgnq·&%@y8V6YeaSˋS@/J0,Uvߖr'(zg֜N& HKF$;0E9)ڞ2=vQkH`z,bbFLIيD7-qXI'AJJu.$LV6Mx Y{YA@QdCnFVcBJ¶G@Mjf1yDE3*`:dTC+%x?6@6l( ;WH@@qtsUkyCxϩ卷bsϪAyOyBCm$>V )cq{IX^9 \솢B)O L_;&˼w"my%Mvb>S6'uNlbq4MB*L0e+Ѷ6{k2I=k d&Ov<_%YuT+yv[`P~?R\ TN2o.4{@OB (hWR%ϙLgF,k΍i􄭂6]a?~ڀ'+ G](9J$0*dh>9ZI9U_s8ov8Ui\ UR׏'DWXTUK'>KLj/~ekWN66R1fVtn>MtPBf UM cmYÌaU BjQf*sH}8aP|KvL߆ݠ@1!%RΥ ߺ9qSS!+}HYW@nۇyr?3&pPRPzuWuPkҕܒ=| H Z[+ ߽!k34׃T>pUnH᳾'y A˵,${_=KO4tM<+5 s1_~^|֕1$FCaѨI Ng Bg`Z+۔jiB<_HG\6=L:$`{RB1շoգΘZY(č:p{-筠 Lhr̊G>- A%ċtPno) ym6@m@V@SQ W<gL \grq@F띝B[ӳ2qč592(c G`'~[y8kHKD͋2^@ŝPj01{a y/]5hAHԌ߉"7"|h,bOc5ǸLOC-`(6sf{geBvݒ0 .ɠ<|6F"L޲9O<3>ߢF^C5mV1/`tQzXd7_s_´ p*#U~'hdE(kcUNr[%c4,҄c$$L]7?zn$ ULe{,—R-|i~ɔ&/W,lpC&*˴n1/h+9^ $BC>AH?K+arUcMA7c@\%e+3&dɜWm,/Bz> :h$lU< R]8 ' -u8 EB|f9 yиHE:_,A'EyOж)2kuן`;Y9AAf*v)sL$s%`ψ^r\Om uV8 ľ# ; ATW髽Sx_ Ou Q%;B4IwY91( ˌCܲ󳸤ET|ǥ79aZ]q$=ReށcsömI8Q Y-}۴#; ,pp;r)'ae;HR*)Xt÷&'VbTPZ>~xRMrH)h; kQäiIPe[@W>~)$g?;rv$5%gN6v\ήEsuϼDh!+Y`x^ "hi,kL<`gT28Bdq>u;"jt@$ (#"_3AIqg, {/lŠT4zupy>. 6/>A$"CSeBwk>)7u`nS &\ʿI mP0,(B`@33l;ظޣTd ˦晙gL(5lkny8h43ol/w> T*_E[e֍/kN)d,Lx Pts_sB#jwJmc$GI <њH؈|_7@Jђ({0r~ j{@u-&4Xl7CK%[F?2|R%<h7,<$>ݛΈAs{!8  R &u-1ٰ*Ir&VA%l$;,rbE0d|d"͂ZJ%scF,}5ZG7~xؒI xzcBYGo^=X!PQ P8w 6&xp=*o{) fY/UOcQ~0^qB:yw{iP ՞[H֬K V DΈ0*ɲ:KqCnQt,fjh]ftTVoڇSВoee hRa?wZex_+5UG4N<Bï:hx gdҍ( ,Q֥RUUG,,_h##@*ӶCu)bo 4x7_pUµ#mgqX7{bEԙ 7H QqbmloW@OE-aKs * ҉m04~XZʖhQUZ> ӫ$nw6AMVǾjW2?!r8XY:C!ml?n$ŕ\/lJVreE@Ԕ٬Anhsdt%Or\IK8yųi{W+E3]hEsF?ntl [HwUИ;ϢzQɯh{(ea8K*hyEkt*s H6$ 8,3ǸK0lj!c2`.9g+J4Q|֨U:̪&l.1&ZrfMj;P!]^ձ")YX?>"Kz~/> J*d͓kל~RYoKK_9#]} ARvRL8dn@>Ax\qD]@-GfׄOҗ%N"+/DM>bhaAU@JUqL3wZ"6RaV)]gpox#AEϰphHjOOL=Tze!\vUV3 8Ʒ*!V ë8Ũҋ[ƘpW:̰c1ƣMh?VFg'rNm(gKmۍZ/(W,>d;/w6W8ҀgehD2/.+^n(K}Ps( -mה7`g8 DE@g4XU+_8=YZ5aZvN?HrC~HWՓ{7hUԌi]QTatIjHꄕ6?@ҁ8d- H_`Uxv/6JaGC|h7(* j^ƨsj  m|9R j-d{&240}wbG&SWک ռ5_x09 !҄Z</yϸu';?]=|o)#M.;|]DiFGC{KuF~ŷ3P*ŌS";[@ُK۟6ANiIeZ)e)oWhل+w#=kjl)q-" [ގ)< k!܍8᳽Rɷ?9k rI!|!Q0XTx?TW3}jb=>]}F ٱq_פr}^{.9Y@T |}Ul5QܶO`,ƃ/TVl$P^ġjg3|^^k4̍!#Y'CS}TMQuO?r\ ZkbS:O^T:зgHA&`(wGJT6 '%/Dj&U,И#熐bv6r dōiU#!* EpVt6X=~Fw P:9Ack2&j}ЈU߮pH:u4RRI ߪM.!1x;PE?2UVG6L,6a; v-b8D6c#8=(r; 0`\_xO4Ǫ<`n3ۑرJ~bϛâ3Wda`Z)3[t&d"G" 38U+^~5PD|d0]iAGzj^lڻO2Q8$ONT*QJW2Y΀G@0ܒ0 bF4%H^H_Oo1_9CNF8Tez"rє[.p#Oh/UƄh̿\/>7l@#4,c qc&zU0P7x;Aø %;vAx8Gkr{dо9?0]  ˞wE* p ,ix(1o@d9XP@>l}ɨј`~g0e; *u`&Ѐ~b 6y﶑Z9j_Q#0G^)V1m1W'\ES `hD-ɉ4H a7 !{CXݞ}5eWqY G5Ņ4Y sTmnxbԺ-Ny%P*ȄJTd44㐐/JlL9 VŐNHHnR3|]) ~ɓ]FL MWؐ8 [~{wޜ N\}byg%i1?'i>~nZ`&ț/w [؆һbo&je m!^)}vj "JZ ȿ0> ̫| R2A$ L܈3V"{8%RDx)r1Ntr (g.s}dw]ex.{#]. Pl]ǣ\q]BDy1gvjJ:dLj XPadBRsEYO"@F {Q"75/qb/I9uZg_7\ssBwsU[\ŭYfDlLUӻW3.|kR$xuJ+8ԲȜѾ`JR_z9`՞uۺ߸by7_[3ڄ- *9(꣣!a-.Ktt VKWDD0>e|2kSh̪?om #Q(s[bzںLȲRqzЪy;2pD^;o6$x@q|!:+UIEV泚hO4D?QޛbNE+ J@++\2NeuRjmy!}4WCw7MõdgS~)bÊkD4I,ARus.6useyMh7_%7LtTSfFgC?qlJ|(8( Vb3>yf>%izrKS)Va.&PKs\_ T_Ien;˖ZgGySq!]= 2M/?;Y"fBp Xe}bR0R#eaF4.2'__]QvyD[{ek`(8]a xoגI4r 9e׫7Z-rZ|>N=W5MjQQ7i֞ 8}sдbPfRp'uئƟ)sRi^%l˴Q*J{4zCueL>XHʇ $ q*GsfX<{~4F;1/ɉ4 8RvORNdM344+&"OЄ_ŒdIݟQٚ PrQTѩIen_<H蓳| 6Ia9Ty*r|'}XD D#a. H?ϊgv&>^ZK.'L!ǵoh {zYT+DXYڙ٨etp[|wds,1ϡ͘C0h+'h3r0\+ NVMƙh1fm_MbO:W#sVACts,kJ0 dF֤0CuZsAbŒmʒQ4C !UjX JZ/횶Zpz-! S~|Y𛦩OYb<2l\y[z=PWَ@]unE8y`{͋]ePr#_M^쉺 0O |pbYgjϳtff6z[,qt-!IVxqȾ]<4 ұZ#-/KP0FQtbd 7gP"No^u9fNX.7v:x/d+p+cX>hZEffMԠo_DDH Ϋ;!Enz`WĶJ"ŕDϳ5 + t<5׷Sd4(E!s@BBm/(+bՙY[ #/$O=!i,vJ 2P]`>*496G ̨1F*N5@LƊ6zDPP:nb-!E&m@D6!,\C.VP$Ǡ6EC  ?Ȥ&v`>0^L5$DIM^K!m8cVbf /iYOұ9-z#sL"x30f`{|yra_|(B@3X[p"N믭5ێ~_iPF>\ey "^`i4DΝ@~4=lc+6;g`~;6vq0D_r}bޱT;,7G:6P&@.۩j׉P4iTa Ӄs{$$'sÅ6%QѮ6N%f41ӌ -)K:EGk]&q珚e=֔$F75_Lf7\fׇM&<0ܥ'g\'kkmg\,CD#ن8Kr ĖAx{uWf+DvB_z;}#z)$xIdN nGvRzeODQ87r/תDtĆM0 a[k )JV$D :~mV(?d/t>i}[?22BKlVL_D1<_"ç+!d[R ֮ڌ~AHK =vmA7az^OeVRA]0Д=hny'&{}(x '^s+t鴌,d6*6['0}F?&aɵ?Nj<-&'\bYƄ/P~P #Kjϼ7$QEXͮ*mLsebTJgGɳKɁ/:;*zSŎI8ځnE ӹdhЫ&[bא)] x.D?*R4]MS<&CLЂU&pnE~;%=2$O@YT ]W :pVQ0w à'_jxY zb€=j @UžWʃ4ux3nt5imUw% iW(6>.#:<e8. ,㽤Bt&O)=a5//nU0wFtT _rٵ\Aڳ= l.yT2_hg F|!żGِ;rq&8kv ޻h<ÕKiu̯0gt*:k:MA (6٠s `ؓ(&#/;SLzW*Zd⠁f17O9j6XxC㏙C I)]DU[*B|jrD:8kȉi 2v?ƑHԆK5[N_^3KvAY _*'0oԑL:ӽM{)l"=܍[.p]a1? H9 U|1$.eV@@k"#I)@4㹋y"?2C4^{z'Z%21ZXrqTT( 2'y{}Ѥܚuyl3&'M>`e;.C/ݙ}U.T$Ǯw.-9e3Mtq d6`jYHU1Ƥd]rDlĊY_S5[SLjt`vU]hS-"D` 0{)%նsmer'sTi(Bt952ԷZh Oas-\륳c*thXD}bz#CZ055.YaFLY K4sP`5B^ͭjb& L5yyry'Loށfw=& ྶa-CQ-قuVvzRy |zو+jg{!f֣bN/Kقe&.:GXyа'6Aj…X+~,p$Iӧe!(Q`\Ĵ0 Tfd!YIYָZ!lj`jP5g㡜D @1#w|mt gFDt{")]FR)@XČ'c; B`MFwtf1Yy:O0n4O _5 }mmuÝb8!d;+/mX=dM&[o8M^۵@sUn.6ZH_.y_A@2Ui46ѩ5e~w@ ZJrMA'ϻ(6,B*}N^ Ꮤ[P~xcT80V6H):bwN}t!jđTZT͏ V=>ɟJ;KikmtYW,a3+p]S3894ix0Z×vnevWgSӌۯt;6aL`F7[P֩dmu2A>: HW.$?zFf|4OS,?D復>pO)>JH?Da{ZbL=A7qhn"j[[i?rQ<1J Iomeq) avv/4WDYvP/6~,^"|c#v &hNT_|8kܦQcyOÞKa*_q̇sh6B9A4p3\ٻ8Y};`-S9֖s{(|8i\Z壻?rZЉ6jj|\rpb?b=F0lC0%mv3j׷h;bd(fN씆8Pw@u$5m8P}jtc~=AkG%m9 $\u<<RRz: ˽̰Γ.m|?Ǥh4Y%mM~cd&Ad[=S]tkp 9V*,ufs"!V?RU !10OZq0@ϛT6è<w^y*n`d,z V'%Z`A?HF"` [A#xDW -w @( ,J@%49TPL:"<. V&6 0bj sb@ek  ]>jB 6- "[Ɔtf ~#-g Cޗ[Nm՞hwp5f ޥXqM=uPoo{|5,dS82ht(Z„MbI=6/$5fst?cdJ]gÓǕ͌."D\!XI 5Ը?K8P_ʃN@#j^<}9\cbk(N$ ֕7lA> ""RJ(ӯ\/B1X8,[a"\HqהhW|Ux!ն (*lne?+~W{$zkŎ9!ns[ƆZDJL7oĔMsdNE:!%UV|+(Id,]@ P |Trmv=SbQvIc0{h2ukYqw)<ˁm(9ґYBt0]tze8|&BCfӣdB6xy]y<.h^,[_]n)WB}YT`8W4Y)4ρE6W\D mM>Jbn=᫯Ji Jr廒[<7hU'a3PA$[/Kk!CKP>:&)D*|"28fR97[?јL0sMmzNTsrz`2'KAo,A 9=*RY=L NJYMGy (u_H"b56.g.PC |t/Q{ @ż 3tF^WK6@ZXU@]&.SofmL#7aJDGӊE>Rž2zDmỈ!Nb|c-W6Xpu`w)Ky"pҭ3e4nz?2?(SuGm;|&OO1vN? nu=R0;-Uo`qgz OIؘX~'7M^KLx`ŏO#N:iڰ|yScK*(XE7? 7iW1I",L(.q)x*6G%.B/fg3M vb*,OqfIKsbl*zNMg ^GS!T@=tnAē]kM%CHŸV,0t,ٞP>FW#d􀊸P9BoYu_Lx1tG !lC/ V&a 03 R(#M (c &:&я˘=A9FmTd>ޡhC_<됞\ e{#j[Ϻy_4=KnpPa߻89DZu8p; sNm3A;_mN x9ɀAS8f=5sj}{d8y كnihȮ!*Nsie{H i)9[/]c+Yd2JsVd!=j: r ޺ʠpAJW(rf |CzяE*= d}ѽb2e?<.90eG釉Ǭ}J0H\8D 2xwYD8Ӷc]gڃv s?Q܎smx.tQfڗgT@BPsKtW1un*ׄIۂls?KkԷj #]Ͼ"~!iN*'T t)!l{I8t=i KQDMuzWq~`["N⎧-}9HJBI ￿*:pډ?6pz- *M a}uG-gStd.㱀x!}/v Ι0s,A}:aj _AJŇ: P>y=I@vW|D<]a4[Ljfde" װi|]?` >5YFr+m SL#<8dz6ħ.y@yW畯wVXTp>7/$EeBP"Zhv+gAV`&K/u,/¨ZuQ!̍D8V5iD笠Aۯ0#@X ~Dq%PWWHKƊ#nJ@ u[UB  b ƿ`M5]`~ G@ ҈;nMB5p@!QwV~q|ikyFsP_tٍY@*I<u)X#3VM`}%jc\Tw33_S4 >'n :R-ݜ>7f,DR_:)< ˔n%ԓO62;9ipW|R Calg2lٴWޣմhwMB՞+S8kZP~/g +%q`^ja(**fԙ#LEa8 ;+Qm͍?2@ D4ύ.QH={rdP?6X/o/s>>7벗ԃs LLÏQR4#Ѻeyy9D-7icV25\j) /+@ K[]i1}T`(ʝ,j?HӝQ;5BMK+{W;>qK×k/b7%oStm-PucZP>+n9 ͈{Ga5t6 _m@ۇLkC> 0Z]ߥ,Qܲ:\NuZx؞o4W%HrvceA*H+Bo.Wf" yD)l67` c!v dN'>RO&zNA !#;9 FU3#D^ڋ< P+6,Q˟ Y 2v<:/&NӀh v{pe1ڼњn~mHՎ˪04+]YB0+6kq:u&|@!WCD6^rGjx h4K@C(;]5GX魖VI@װk WR_)o_0# <2s_LMqB{x2RSNO2p "I -峕Y[]6/dS^Vh@*^g NHlS 4~stYA&\NYra*H9yWM26[pĚnnLɞL|jc;t`@6>Ztc"VgZFU1n,M%<<HayF_p^dP~-EA xu^̐{QwZ2SZ7l`D—ɂ[ K!Xϑ~[{cOC=Ȫy c&Q/T6F3|^D\N}ZP9@Þ_gLFK0t!NǨ#e#/uU&D.bۋqU["f[BۅlM%XlO(ݎCM 5hA̡f@h -Q=V-WX .#q&LC t+/\=1Fs!RL|Tp906| J(%Iď%$T-BBE J=L9$}LÜ@%Х pkr48df'Vbg݄OؠG5:%Ñ&B6`=~+!el/% m!\z9nu :%[irS,$ZdZ; @*I^tD(c^ʭ[$ ZP7r! {ikripNM12LbCdfp3ոmX+tj^)H8`oN]oĠS8~yz`d'#08J *F_M(ٝz)fn282JA)V]u܁;tHc{ס8lK{U %?V'#4P{}٩I;UC(wkr q0,öm{xkS5,iH:3d#arO8M'veؘ,;p<p.^H hpStZ 1e+\RW*b{% AH ƒ3pG ΃2ѯPڜN]IH@N+|HiNW3=MNk{gΫ)Y[iuQAvȩ(U}GY$[VgL\_F&7y4pwT==(ĤFJt \[* }4|h)0;;E~Kw֟z#LQ,=nbس+grY(=|C L}iKY~-N(0LXe<}o1rTQױ¸&7?gxeB\NuZx5tW( q$c %TslW!%BrD ϩo z,~&I ?g]C . TD~n 蠮KF+v|0+N k0,B3O6AJˌT悗_q)>A:WNoj R$ <\ P-Z64rɦn3L_mi嬽Ɲ{>A2QoBRwW![5kf$o^ye7Fr ۠HTx)roQG•'=$ lvgu+l@\_JHQZRUUŝ-h`򳩵psu~PLndSV~LLzQ3^K@%ne u۸'3?^Y}*CpPJxG eKM^_Bt!{!! (-z-T^΢E ,ypQ& ؞ܩXQB>B酅0WP8#eq>';;qK N]mT}ً;C*`'W)~eߗm˵J.ȵJ;[gDx6o]qVQL1G{a>P}љS|>'.[xh?%5Y$FY||5Eı/>p잌 }leH @4cQz`8ϻI2Xe/k0V( 2,= *ĝ#C!ƸvF ?ptπ!##\bS Oha*4ĸ.R۔a{#, $aǦŪ4u|=?]p@y-V6U`|?3=V{\ǻ<f-//n!'[=>+,`XhS.ɘ87(*VY4yhCXI_qnDav}\OG&p"%,tf |>GPs$czI50.M~Y˿T]h{ѿX_Jd5 fEXB D:*X@:dxaZBR]9($;gX('l0$I*ӃF(8HӤZ:d^ff RfZv#f[5DI)κ|.B/&I_Ec<IǛ g"7* onը528vbnQ>zp-D"ːЭ>ڽ 7u E{J_U [$WM/o CZgp%,vJn4ϖTXrE\ DP(B)uJؔ>xl4Ĥ@**ELyt5ܛeޡt&Xo r#:Qd_K"ICZ|: #=⮙9NO9,1 a X [(#zGD]ß`H;Yz-l&CtѺ2ªX\4%6֞P/'斶N)p X5K=&p^h5]A+d(|F(QaLhE =S|BÏ2|NSJo Y0DzP8$Me<4#~'|/͕S^Fn1P &V03\ۂ?i3j>Ѱ!`-!4iƮ7-=1` A,KtYm=i+ˀ>l+N^ *9:d` ,'\- } QߛXF3qw-՛cIXLȓ *EUi" of==i^N.9&]Nq1Tkܺ +ģA3A6O~W::jq]}7wYԏ8^ Ro+]m&fu]))Z2rD*:ŀy=HTק )L'.19bH'QhYQ`8 ¹N3HVWZDb_s &ɴ*D36[Ei0:hBN[p ,1FyXV!rk! d7wIiJoĹ&L>qpF2^ >ЅEo+}nj[Df;]R:G{sڄsPh"=DbLZ~t뷴k󇊊SFg B@W($Xfȇ҈YmikL賞E.?& gK?IVSЍ|\a'ĜR}:v/+NIښ?&73a޹n>FjEv:z,I_c/ky>\7>rI9bj%hƏyJ%E R g=ր 3 MztB)ԛבbKJB A "|0i \)$т1 ߀길J4R"d;UxK0*BSCGOd-q޹)R&M~-Ӣ%[U[rW;L>pl.8rݠM\4DbDXNEج%HM0]=InD|(c`QN> /˘N̬ehiB+~!A!e}wQ!Nqt(}lG츀GXMXŊtRċ { e fjN{VTVD;3ڣcuDo=E`A}Jc:11RXyvzk UevNJ[_0*TY6 (\"zx~%5l ~u ̙0/l ֚mlȨ(r_:X=?V+ҟ"Z:۬H79FϴMeR i: qe _ 7ܨ#7C#o[WQcțn wX͐:u\Ԣ'^ nPEuga Os|S\pm~L"CI8*#ܣ*u~k>;ꭀ k.S\;4 1-P "@t̎Km6'ɴa|ũ%=SAX?>UY5["YކʿP5M}Z\"'#Dۣ4jZt`_5b! Ĩ{!-pn|M= q`T`8 I5#|ŠڥG &&N#564@!,JE !NˡޒlDh@1q@sYtՅ:'(.LMgGKmq|Ovg0%A8n6)TZ-4O_P<(q1 39=Oc {%_xs}\5  2禿+˱JruĬ_^ Y93^0eE9(IuuB|?mh{O(s>a +^sS:O/8ԁheUhk*=D0~N8GPXƟfUv4U\a`lݍANm[Ii!1(9weP%p L(k^dZUV|7Cmbjf j6N4+;釸X3Q _u_î$fDdCN ]׵|Xű>R{zeC[mm6xi8\>ls#O#չmL>h_cS fXN+10 tDt!A7Z k8;25KH}eJ3@;܈% u;ǵlDF$k>蓡ZT>[EwU%gf#q)͚xV__HܠxÚ~l( ]02Sx 6A2q]Y+P&-8E5X7 cw݄V .jacWsQrE}d=isO)HWv(|` )[x'hVwSAJd2$ig,K̴ V&Bӳ%zfK[̾zo!E9[h&B&fUgqvlBj<x^XI  ye]';0B_ %GuΒ NZA*3(H0톅nhZOX @ D a 'P C_C*D&Kt_LD.MWN(Ҟs,yOVn6 _I͞־R!ҹ3ى;1]6=s™h+e9^C=cxܷ b cȓELO<͞}13iDJ:Y pU,&2 s N(Wif._Hl+y[. Rqxs|O@:*:/u$ni{1޺f1VfFhyѣkIuWE;X>n~" ii\dIjZ:>{3<̊kQ%(yXt:28-{ Q۩JͬzCVsH4hW!F f/[Pԟ.fEZ#tyKǓ$-j97{J+M[~Ӿ' DҶ}Pê)iJy^o# 󹣢2 g4Sd K~1NoI3c ?g'әԞХ0>(@F>eZVjn,BԦuz3r!WctwgSj^e~nX@Ы k2.n[\;ٜ6 1q1GlogÖd Y|1 Ywgi7JQ'bY#kOM,(vB)s8Յ0Mnp}yTW)\:Vl8 %/' J? o!vm=Sլf\-)8Bf}Lf ͣ)Vo2wL8#ҕ^(it5)tR ͐B::H\jER$hw2J ahDPߺ" ̯2FzS47W7=)Dm\,a:"xAnIocՙ1̯*|+RA )Rj}R#fS>ܰuH%>Ɋ  C0s?{g6N N,jn_uT͙cyDD(T%7_;Һ n19\kFA!3d_pYfdz%J*m,5iHY=h|vI\{7fֳ!&{[ZL@άu?2(]qf c"nhI)fC(V):s$%2(Fbc {ԷJ/]/+%ƴ;i&euʟD(FQX,wmsikO5֫o3&u ma7qw3UwXsዧo$B&) 4.ZsR=$҄)Di{ {H }_f#FB#qH;jr[WQi3qCs";=XH{zInApTtTEw}XUV'y 51~PUrv-BWRV}j/ nW?<&Vz͋kQ 69HkӘ: SRGgV+޸X?@&uT ˴<]@FҘ|jNKEBZ ^p@I,sg<_,W#81헸o(!V[ˋVƣ)&$?LhNJUpI! R^w΂l0Nlk]n)A:t'xJ`paO9(t>+'FF  f`6%s6L> 1 =˥;J*TpܗZ)UE0KQLћ@ mJa^F_UwQ[ #GZ|@ʠ&E鉂DHL5dM١@xF@'t9C+k9A Hѫx?8[3V_AZ 21&%m{ĝjwxThy,|ש)\JU)؊HVNC+T0F&L,n0ìR(Ra7 jrG/ ׺`HX0닥[ -iPm rsrK%IܠB{# HĢQUc |ԌZL㮯ִ}{c6c1_ (9<տdg0Xj#j8D-J́TVr/NJH-N+![=_wZpk,I#.YX̤ zByXfXۉ%eRNijX dNo3&u;qwYOUwXs6Wr&\> =iNJ+>_LDoq}95]F/?c F3)'K= \"f1e( aJm΁i%6|J 1lr0}1G&o;8&҄5v9 A\LoOsɊ`Y:mA2if!oZU\} ;۷sE w9B. gHV(45d>lk]n)AJsU'xJ5\%c@(t>'r.GhTJg Y$v lOŽ) C]feȓuP\A 1<į@?Ұxy+v73W2"N,9>iҘGm_CכB169:,f@:Moq =1]v` 4c,biq›̿. }8=;:KK(/璸CSJ#.t?.NCۓrߚ߆IjTJhi?z'4xz~.e~E?#_-9?{Fn3k>80\PW谴QR*e5yG!D ZV'BFSo}F?4+F*FrpSv%QN+)n*ڌc3$ eOݿ7Yul{*/7J5;2LhXĞM(ꫮy*r@6;*'/ތRcVcw~~zzdBnyC>Tvnxr>^Xҏ僪Kw˹BZY vK 8*[:؄7K/Jjuhg2ܡHkHPs<!HIWrP t>$u'G4f0et8>6fU=άw%8H) tn6jV[OI:o+݊]$2$N^d1r{O;Pq!e=LtDmf/=xB(Ȉه}[%NrIGP^Sků# owvs]p0NFgRvc;˓%aJyFb׷][Co?.N=LX2:lQ܄2gz#$Mҳ'?gqhΐM`w놾,tYrX#ښG5nvYA G?|x9~l^*#JPKKDݒ/pYLZpBj#+;ٞ I >OY|lɈXт4O߃F&֨JOV1#R&}R0sA竰+?⽟Nhko3M?rFo,F\atALJȋS0kwl}<&Y|᝽]28GN%vr@8'x]I2z=#488O*nu>A+[DmNclH8g-ɆԾCZ-ř#fk51CSƍNy0)3FLf.&tBy~є c#/x{(30JqKtk1rB>6 @4NY vӷCnRq* Li%u(eQbF]+BV Ihшd#Ķbg|8 F ue+ /)pY~d(f3:@N|4|iF[!As^;sݩSyp@.k]W!Pa$p&M56EbiMÝ4la !dNӣ@+#^j ) :2˨xWYOXUFX̐=WFb ba׼!:%`]{Oԕ xm-`jT wQޞ]NIN걸є*I %"V<)0/"RNkY+:cA[$P̍ 9F6&-rFѕy'EAh :U;P& *dX*ݔ#z^, YF,^I$lIfX:+DFN3f~%{MHM%ZJh_VLi$'I|sU]UoJ²Y~p:w> ^ U"e!@VMFnŽ1wsB1z&--SeoQ_6[e-K[1 5VK@w92W8W>KJ߳W# 3/WygpG´\Xf;b#d9B&~mâ)_׬ ؀jIV4z#tpY]!iKfvux~X\ W~ dYpN?,\L;;׸~˂㓽&'xp-RFOlV'iqYST%=9>wk7\,07o1U3ڄ^ݱiВUBsp{dZʉhWъ=)jj);8~?%,EeC &ߦȢù:^c?nڈ alV]!c;jj!yٹiF Avfrɽ6#MMɎ-~u=$Ha 35E>UarеΫH7xvAž&Vo4)[)*Bv歚sKV䔬IHp:!ɿLhbצrZ#iԇhi;dgyLgȐ;,E/iv4Y$1} qdT'L!X=M͖:tc`& Ljm: UYح6}Hz-kG֌'\SRX!<́l&ʮK:ƃki9*Ueׅo~6+/nUQ*h%Pk}+Q8K3>}+ڡmy"k0^ggߨ恊w>r/evB 7yO!J$\}M'L6Djð8ct %A>}bT?#hvW! UB ;U&uҴYDFtƤa(Y.6+ {qqT/Xy!u@F6} > (/]T1!1^~ǰ!>"ϗ3lib$6(#?xDYLp0GY@v+)A ʂ'm}RLiɕWyeb*®;ϫvZ (1yͽA W{%-v7dS2'bm[P30p K<ww5dt MfR\!@2>-y S !#Bw sb ]06HkqRvc)qӃ54^䠨Cjt6ظ|j/6B[)daQ֥N:TIi@[xD! 5^CDo5GgR_Z?%o.XH0{5K!:i[F~*4MP)^(2j/TM! Ҭûz@D%bMW=FUaV%)6CvKfUY@U?g@\ y?8#dm L)Uˤ#u=`v{]qlr6$&#AK 5T8&ᶩO6b/9@&7~o=Y1m82AAKvMc͇6֒k u17N ΌgfT!ChILw6Q6`=c9}t;"1(]TFVW<B$\MY'svݽI;Rs@%H&WH xHPAjc FL]uw./G]4aEm<ӊhD-v/eS59n dd;zȵd˪nh1A A-dir}^FV*Գ5{|aja6%_DpDϑ'\ ^ۿ8pw[#!MV̴{7cbB+#D0T1櫟oz<r[Q^(4HEU LLm:lxtdܩX "wNK/41곌K 8h`es, H5Pd"WX=*!bPˤh La魻kd2%CdXfjĽh@g=lD铐vyjųHbi_Ok18l4qJK}x_ 6b1E-'" (e+bIo0+&nGnF)f8qmW0R;VRӪ.V^TOzX^6n<$Ö@a0r L0DMjbVmljN(nr^.@! c#Irz29W3q4V' wA|أ `ٴUL[}Md1 AmB491D+ 7W}WV D罇%ขC?~LQU3ntɕ9*s%W" ;[w1ۯ-ZgYZW c>/0AEiQjډ1U3⎼(J 7+EyT%}ed5+QuSQG^],={ц};אA J4ݘ |ZD@;l~9/N@ @326׫@P=\5 4L״&\KwbˈǼ֬zolrDd,G;3zH[pƗKTL&N/^wnSQHRF _U JU9t !Ҷ~$fKt_VPm` c]]K&U BT&'oMsWA6Cb> gX괽KϕN%M >S 3˼`+o 2<_^5(H %>dX]痯֨ 0yY}3lh<YT_Q4 E;YaRdJ3##Zv"_+`rDSrV-cBYw[uƂ4F.T~~`U(@&~x&cdNު#7* <Hfo$8`,΀ U+nyvV/M8`4eh%ă/-Y"7mr2~46!qP 9D!PeX,Q!@\Qאј3.^ͶwԾ 絈?0eV㎷VpS 7f _`$8s|:㜯!2@b ɠ bӌA. \ @!sFzXl )1i 4L;xUhev=_[D 7 ̼d)FzKqEX ~H(L9\*"Ǻ17!%70rF%y3n?OwgxE)hT|&PJ[ډUDSWs@J{1U@" sKݝEZΩy2JB7n`^bЧTXo{M2zM@baΚ$ΡK#6;qz|fxxFB$NZb7P8EW':G[p9Rυn霅YKL`DzAlgF?G\'z>tFe,ew7Hrx(wgd%Loe6G_InK Ld\)Ԁkε,߀F@9kE[ɖ(7bdhdon i͵:'ٔT?lHp˵C3Vmm6`:_z^hdfˑ6^Jm*&,2iw5zFEKNd8Rn'x /~ Mm/%:٬~^?~x xČ:5|";47O*xpƁxB`bJŇ].hܩM.t<îC.y { , 6%"K046Z}&'P j%6t \8ē5ePϷULȴ媑|hŒpغS9(͊|^F'o@<3= - ujŐ@o8vVNt< N,p%"+%wf}X2PJ ;?RHB p8>ePVH?7ݵ6AlI $@TXcQCj/ V/cn-=>wim8)Tڴ)\e!q tq.Vj4ٓ;Vѥ(t-q%x;/ 1.j:iy* D@SLukc\D4Tp ؀4fS:p>4K]pp&UN1S63A (qUe@CF %\ԙ $ "Ny?qχjS~]sL̷vU^z՞ wϲ0k7UK/[Z&U`Qx]$q|CIiE~cx q3 '9=*', ba.?1ExݔƻR{WBٝ]3M wuPJ"ck n}'xᷪNm6ct)#v2$zȒK+[d~1OǕ eoq Z[8W3I7O T0>Q9Քyͳ'IUj΀tcB,2g=A3l ?ꔗS܎T`G*Nav#)"œsSF/G5ʌ/ppEEPGoi0=k` 5\4{z5HQAO؍EA$O偸 :aYEma $s5Ef TzLb+ڵG%fxvtg|_ȔA Ԏ+@2yU0N,'&A2I^ۂM[z]kKr >Zޜ5 2i>̼Тl-zrެ֡M,*'1y- )KTygzMFg z.V۲~Pٴ2@"Dq@ `) @u^;@DFg)Pz)#'D +bm/B`gm3,+ xNoK&.9gY(( ,l#ӣ +s+[.% ę8e *»vmM%iʫ<"Pq!\t$)+AP t0n} 546ib!^ ׃vE5tleܥ,Q/$ ܼbf-f#3īSߡ̕++gg ^(*d#[GШVȝCZtSL61 s@@`"*̡ȧ3Gײ@u;[Zq *[F*,n _6kE.^ٛ]lK#ҬEL;飌@%-Dw#lD@02MO"/R?[g";mI=~IlʣkNJS:%K/+q sw|)]VSm"f틲jϒݿW4~P UEgZF~&S#m4t rC7>{/LqG~0|Nw+w{oK",UXAQyJP_x@7x` l'0Q@F8UmwۚX4#`6p%XαC u*KPkM)+;y6z^gǹU cHC4#@bi!r~T%@oCCeDfN4,Ax*ϋ2e&> vF鵾Ald@ b*w9ADe*0pBr?=Ǟh9DS#Iӓ@21_"W[xL.;ĬL`?:R o.hj_s߅ >|T!Mz.RZ?d c3 .28 e;k>`V[UfoD|okWolSHV*P 1eUp&oP|ď=PIN[(1Z{|psbN 8zvHT unC&b9wlj7ؚGR@3 %-Xɠa<$byfbQ9c{ Ϩ'=fc5%GK[U|^J$_T8˗I6&C#r3tٙT FF}ddfFv_7׮X)exeR"V}GŠ[`,G} XN+ M#*s'J1ŋB9%guV]b\HXVT.ry#puD;S &s֘2} w}4qUd6uyfpO,vr: ?\A|ȯBN9E(i/e{3H|HM{yYpI_k\ҴcpZA-K ԡJ7 sWmp K䣫1ʵ(ƃ?@d6bX(7X֌yA[z1Yƍ7}ddf\Ʊl}LAgu[ʦ(Q`qHÍgxaPBfp􄻙#%k&b(Pq;Hn#pWVWYIPcĞ*"Aw ^$QE<$V02ֿe'lr_ZS>E\iuqQDh .0?:q E~3|^Tl$H. L5R_\e;c&wW)葺 A@R"ITNbs_F%,,,M٘> (E6v2;K1,3&̇d4j+,b!F΄`1`7i]KoπߘzqZ)~Q  BMw¥pe(/i0}e^A{((B '> {4pbO|NOuVPA7 :#>7R@z`RNy f <QV\@bwu!8Hh3/fjזǸJ42}0ӣWN|8+}݊TQj`cưUڣBEGIoF7zjX1ڎH"khe~S9+q3y՟=*wV =Y_l=/[MQRO@fjzQy"ܠE=;p ',2YӖ2MRbK3+hD|PT\@@-v#Dt{\>eX\)_4/o_רTFtA- lE}fSjs-1@oZ޸&|52A*|wNv]Dĺpwy(JԓG P]k}:*ZuKNֲJ urǞו DۺOcӔoG?" .q ےLpnʣyggG)gXc27uJ*P1lPc:@7քi2Mz?2m)S( Ziݪ Pd1m lbv͞=ز9XD5(y@l UNjmt6)@Y9I;Q_ޙyn2I_a6wͮ,3Xe r՞U: {c I[ONytrZk}U'ӭT_3FY\?k3AFhaH]( qNWJ+6ٷM80.e`VyI>.NvY-w~Roa\raް3*~!-5vJ! ʛL40O DjRS_(>/bc/t:Oᗹ#Jb <2"s=}BxǃU=4S 'Jcv1ONY/z;hJ1//7Fs,,dƞ)ÜkR>ߘDu1J#\B w©)>(lC A E۠YlڷU1S*,Fhv4/]“AW f$ꗃ[ ]X SOUü~+AdOvL3ʜ"eR0@'q„:}]sAk"fEuJ3܎j$0\Zošj74NcCcp%>556ͰIkhC8# "v)ؕcZDu>hm|gC:U܆eTWaZASϵ{&q QZ@&K[>F-7%|TI\pGLuVɦ.TVb2v;YMtVe :ZU%h#Kr;7\#JhTI^1=ܬNЕBn%y(?G)s {hߛP#azHAc}|;$aj>SmKHpKq*Hw3 ),>7* uZz*ES',j1@ IR)}VYOˬ3̑h'YqIsBsĭ9 |w+k)/><ݨܵK¼I21(ʨsn^!'PVӣg"Z$As5zw5e63%c} Vbl<aKIZDKkq`8b,` tNtd$wǝ'П Ϳ*_|Eow[p59`_KaH&@S3fvCxCЯ%]`NE:ZSס#oHL"v\36 \Wղp-Z 굟 foqF[wkt )Yka3,QSr?),5PTfsb䴾Ih!I+p )+r_$hoCZ'lnqnQiq4Æ=ry@A~U[> B!<]M H5,[SjÀM1 n,O{4LjRe&L#{s*\\K^hx PqV$S`jhz{4t8GT"v~7X|JjU}x:wKA ?kujnIT&}-Drv8y 2R&S GR6v )e]wMKaqi4>6>U*r y杫8so3u քƗM4T͆t>\$kzrb\V.aZ}NU,Y:s:`rE!nKC D'*aե•J@*`sڬKvDgl`ͧc;-T`8J"d3N'3odknQ9CݤYLjG~ʿn_HCm.Idw-'Ti<4AxkANp=Hm4۽}ޢ&bRgmDƖg歸n *cæW MfﴻP$Pآ<Λ1 :!򧊳Y$__ y%`::.(܅]ۥ\ ZQ #q ,͟aop>/<h;iE6݌ިcCFKlR}T GiqmԉiQE;AtX"~ ciԣ>Bꖂ )pj&t-~~"MCN֚|ɧC |l̙<|p`x%YD ik/g9>;_q5S%$"=JA6rgiZ-1rӜZ'Zn'l2Vz7b욳䷤P5@vq5g=*;z J rQ_(s.9'\$lkZؾgKiniH]/<͠4WYaX`Vl,5"Q_?366dg[`m*-@ghD&_AGnj|i5w^͏Ac~ _߉{FN$f-KM`K9SD8+L0MPaç4NsvپK~8- 9}4v'.lV];>)vQʥ("9n_yj|ϵj;L̼@HnrŲRKr/2(CS<*{c ڧc59I:R,Hjv0tuq#Hr7uQˣ0(4Vh W5ߡI/T̴^ Y'@.LO IRHj^\K.2`ك(̩Cic -6~OTIN32}I--jĚZ_]wۓ` ?L2؏3Q᨜UE) 4}T5?1}(Rw"!͐$gk;!Nq;8-&[/D] !Ho/Iz .iM>8]FSS9^\TCU(X~TXꦹA|ʄjJPU"lQ$W$_Pm2GԎTiٷZ{^ۊ{j^Li;\Zs,MT9&"h4l[O&d\Zg%וl@2NTي=n yb ns 4B$@[c(tuU#,(/kLV2\ZU'M:q*Vv('e;S@+e[e=Wcve%fP'@>.`r|E'3Mo_Psudmc_&591euzbtvA46\ql`⎧-}6a2t!pT){ NM@Cyob֫ \_^U~Zi3U9b` wlb@8ȜhXz)lm Ilf ID{Ar{ gÙgyEPdHb Uu~౧v]oc^'A[4,¨%ːnkv ;,oSBLZ%# Rbv&M vjMv9׊u<{B6ة=DҺ> +:Eu 2nybS?@`G*h/i&ye bpKٌT j]0#[;| $I$]^@&)>_QhqlQCvxCaU.t7gU, 1*:e ?hGEw  TP2XEr$cw'C!J04漓{ yHP$)?;"ޠ~sz=6zDI̻PKH  {gΚ|M %+y[NOJ׈i y@A-*|tATyt˲`CK7!<Њ t7 n@+Cx(%j͟ g`XTL#L;de K*6>̀pl d7_e?z2oQ#kO25WN/N_|jCut Ӻ8Pe+uIjz&UJ=Q@>.po55&Qŧ\xP&C/ Q'e  emd@B*d `A)F]?0{adShEL8z?߅5[y? tQ6\=nu$IFo=x6rb`-UrzJ~J3q8Lܰ[OX0惑ugEڋ-DTnr4H70<;ud|caɟ>0uCy'R"_x&f\T.saЬ,-I$|˥RaDqZGF_:O'1?/U?Ԏ5l!yB=tG[;K t˞i66:/ita~¥#ZWp/%%L6Z&<0c m|{AC ebq}_v'45@IA%˷y6'%?}(닫Tv;" 䣯s׏w0C 'f]]2/R0(%n渮꽾JdFRח11Bq=2$RV sչSp!Ķ ţ[W HR +v(F9{.V* » ppՎhM'5qb/E^*PR;TjYE,DMK-gGWX.Z7fWw ;-Dr×I&dgz,hι6cA9d:Ly#]g8 3$ =xۼnE4v#W&W.S`ݳ_;&:O$B:6Q_KL=p"+4akmNo *O&5fb<( :*V݊M]`}x] d֚$0kw~0|)v@Q_sNpou0c ٓ)7YXcv:]{٠EVVqA`"X'3 QLuON(q '0gy7~>, \h(\GsUJiz`wY(1w"c\TB̷AmAٵ Q޾5oh͛w.z˼[U.~/!Kxdлj=YԷ<] Փۋ_#kZD'OwɁ8PLewMϮ[jx1I͋$+s9Pyi{ٶ>,~ULu ӐiH$wmχ6C"PumW`n83j +{%&vYKξN$n.[eO^ZOʂ爜e"`{T\w4F ~;u潜5dvX'g{9mA`l^^8ӫdUJ <;^oaY<6iOR65t*F?/S!Bj0[Ȏu8GbAqPmdDGoae>Ahv Ȣ# "h0bb-QCk CkeK}vh~fPTaySU9njK$B?k郚O%QHv<0gݢr{_h"h#:< w { ֡DnL lliYk2v,ׅgK,X~(|YOWəދ:U= ].7[ OBS6|Kg4goy pCU UX!.)525BqE? hmv+$䓆B^pc;hV-L4(m>8~5tSt~ߝN_l~$2 |eHVmY|Y Cq-`ή }T@8"WcDnyP!T`. 4' b TCңT@~ns"C&HѸL=h1:]b3r>J%(H^Ҳh-hwGBTAhgi!hМKw}ChD[Us5opB}W^j ^"fpM{,%1x? ˋf!FsE\r/2@0ݷUB4VR%>y H( u l>#?G4 S1.U;}P@bB. @ [/6\YN0`O'> `WZG?gť할RS_If&&V*.sȈ vE+Nj)B,f1ETgfx\T MFw9B'SEGQCO[:6H.&9hBKAX3={w_d2TETn"T+8Se[]pq!QjZK j*"4oq>'O{|O'0ڻ3};R4Q2INLG&t]۹i-$IT#~68( lVp_כE32-emlVm بN2֢Ds16 +M(ؒLi!@){VQ h%J{Ul!/yFPWivRǯQ\3ī!cC!qYx$`B6@j;/d+_ 3*J5/vU]A[(9o~| WrJbH}]6_90u% =fe:@ j'ݞ=+F# Oiv֪LL{/ۤhXոhfaJLNmtT Z64F@Xwr /u:]ܿޢ$uJe2:esYTŒ80nM%}yJiS!wOVv$#R_U=\,h@kmH=LKIII葑È6tM"gPZ00#?WbnRƆD\;0o{N'pR\Djй^b;w{TuܦQ)D^DіJΗ 4r^pjѵӘtYA#_XDصZaDrQ# AQ|j$t &GU geZ*ɔs#]CU]uόb0:r+)Uu$, >fߦn@,6v}'^qւMRw7/01] !_`/0% T=M[DCv6?-~*jcF<\0Ab)XCY[<QfFwcAGH+?ŷI.$C|4O$ ٚ^}>V<#J2Ep܄Q ^Y[>^"bmC}ڱ-/c{&>Hrvw=E1T;TOn*ibhUF`ZS.-.  Ă[,xn5whhG{`E  җig]7֭ j\`-IJ)2P6աkuiUZ=ykQRq|_\ܬl/hJάaq̳bIsi0OB+K=3sc3s_SB0l |jk~mI`º)ۍO@/}ި gEiQWD/Gw#Ӷ/yuX/EX TR5Ici8\ogPҼhGVN}6n D@moC~Pw۪oS?S9 XIel蘯-fDVhXpFrz@?O6ArS#B}XI) Ɓ2<ߍxOA Ytާ6d>;7}$[ `lK\Tԟ8B> X}M96=MXsw 0^N :ɢDTv<cQ^x>#(Sʇtob9LIkގ b1AVMf:k77dڃ' ƞkj^͕byl)ѢzGR g6U ;Y{_Ȫ.k8:$BTwtM#Qi@]7m`~nP66\c*m5ßT,E`6lGDΙqT_L+0FVrb\'HA7[RLA`!5G߾X|sW*zv(#>j"WV;8x/GoN%e/1J>`wY(1w"#CQ+)T%6&O+{oc"YX[ q*'#|XR --_&F TH}^FoiXpML{P>~ h"|HkAaO*I;i4g=DOҊI[;WpꟚ_!|e%9Wq"~(.K<"nXTK@tcmw.u1&>AMJz%bhb*/NjT(v ǭq,u>_XTf{6oOt|pH @o},&Bd]Vu5VE?.D)O +6_u׸, R`lP<2˒X=r0:AU};2֖/yMA8jf^JQ#z4f]T* jL)lUޛ I:XD.B̟%\~Ib{Rv.|J#N؊ܾV>^[9%| -fn7}dwacwY`Qr/R| };Ћ&;n~fn|BTr#u r0dzVԄR:qQNR 땰'z.xd;*9D獇;"M6!8;|+`КeLkVArD`ok@c6J H5Su_(o3x{2Hk¶ګ?:{8l]b8 fq-Uu; .3H:aY n UIzqE?$ct-)<#V#EuʳUߣmESb2͑$-x-ȣ.1Ą< zqGVEȘA):8I 0H{}1XZ&2ԕ,e 0HwH[0Nb+a6M1zL`DbhNqYdPQ~-d1Lꍴq2j]TQ^3j'i,3J^_^|CI^4d$X>WD=z:[1*BaTVrnÕ?^ 3a3.G Za3/a LGrXB@޼vxhl=ƠaD+G>n1dL1$N2!ƑH 7:}0;= RaX54zb>AxHԜ IʉxAa[=aoq>JbR| C۶7[frQe12{=YіPͤ= W0[^`ڧ(+;l~퉥&xuE:u_ojj7OôvҝDlPܿiO" =)NK<&1riF0 UА*{aT:,,?% %JxSn$$? "a]CiL{ 'Qg5VGkZ# `pgD@pg|% )z>#5K^G&2_N$a `G~5`߾y=eXL y3Uc%ЍUzCdL4B6On b8 шQe]2>V~e6aPTo}F0x/mVcͽ nrCʾY-RĤH"^*j$viNrOۛRnG2#uV ':J&<=>l;k:W͊8ݙb4*0?O=f#݆wX OKD^UZ+//y,f^nrM"no.k@4_gYT;fox!&[3~pֆ HD2̀\ mڞ p#(L>FBSb[iAgc?F̸e97۝p#Zi} hy"20^?/CQSpL>ݮeRiiEdYjnmQKq-7Qyu:@kaÛxc6 5bE%jeٚsC R>0 /Oh$ `$ҪتΣ' $s16O(e7ͬ@*%rtVwznqI=rjQ?/p0\|&WTՐ|iVDr{Dp%s33v[  ?{-;p)B=h`UUn jewIf^ER!ZT1_e\|Ҟ2*kb}<,Ȇ fznD亰I.9̤lڥEԝvKjcln iUrxDW̠1}%lû}Ϣ2| m:_^72]K>"~^I\z7 tcSZ ];\0K-@і¯*1>@!kJzpv=/fkS%UTjLj&F?c[i^eHo*bЩ<޺V3%4o{Up=FV(+6TBq-. `(ZMWKhq'~Mg|:쬭Q{=`zGtbM<*}VCSlPZVKg T¶Iy0OSuV=l^}+sYTEc!ZPU T͢ߖW#=vr;^ua0*#kZvAn/VHo/&khaEn0H\+ŐcKM<^2lnL n%ŀ(c7mB/[U5(E+:*v| a:90TrQJl^`ψMU.Q0Yp2ңT=LĶ%hCk}0؇狜 ka|#)b4Z%0ayf&R-SyUƨrߊhFwB̪|)K){(nMiBȆIF1P<L|JhV+Y\yc_iwnvF5%c;s=`qQ1ޮ15|"^v>)O&W)K ` dj.FnpШ:?wgf'fMcӢަE/!N1J-lrKW-3@(e{:Ljۆ$9&ݷgo*'1'BVsBkk1CƳ4ob'ФE:BJ?Ikmj˲0v~}я S:jWhB:BI|SF[j45BљQVE 2!])js:tJ=IwX~-ҰxP) ,9;!H;zp<,O$3[Ek]u:HgS_k0{SmpM,]ܴoB4kEu HseLAh =aaF)Y*WK2,7 h4}LE 8b~U4 ]Oa\(*붨U?{'͐Ԁ|ce2u1 ~zSI9`BUxp(HXM>x tħibc¾g0I@h^I&׊x 7l?)X{Pt]BO9 "_:` 7}8.6BS X?,5}$?Xda`<;۲m7Z2vO-d2} }Hug9Vx2$&Nߠ {>$:%xZ)SBv [a\?P7.+}\'^,2㌖\P"z΍}fRc'AW948oA {Maa g6'a ٨EQZ}՛rzQ(4.Veb[ZdQة} 6V@o-Q4P 6?Q}eeVU6~}St·BXI4슂I/ɤ.\mhi7 vW׷@eaկJC\hipi4#h&8EwSQ/R0kSO-㺓1.#|0+]':R{{5`_גa9O|T Iw n1Z/#)~G{хv6Bṉz>òA¡o 8.-9/@sH~0.rGw :eʳqR6|ix*+W\\(GǏFDY<2 "g"sTm`0fZyp _E|ji!>Xf1$PN0};J_22fiv6n{IƔD"c@:[bZpsawu [gZ1V9nf#;*6Q e*sAK r+7}oA2wF>SeԚZE%W&؅mc#QJho _%*؝ZSw7*I(D`κFAc/"P/Y1?\zK3 1>`/|+-o tim|Z{rzm¥x ꔆ8Vž0 YivԇZ"PXȆ'?l +AU/)Ƕ(Dj(_&Inc}> $rY^LHMWI Q{蜉z9]$霤,N6R2od%wME)hzӌ]=oul.mA6!qMH t=IAM'rLBpjr6#pFezJJDZh6c|G:V4>>%MS[x~zf~~´^a_w(1:5j;΍O/]+%IsBd=YżW4CtZ~ց*Jz\ᦅX؞#fy\6oOh^0yflܖcO30{<Q nV4Tvg|k#Ce]5 Z, (yL~|*#D/mE'iw 2xfLdu=>庇WH*CB"~B!Mͦ~B 7hRYoO*tѾ`iZ7tϔ PsxSJi$n*ns'e%Ӳ v%A܊tL4ǐ)2^!vwH;RܰLgۖNQqZ/\ĕL]“>%`q-<&je{C CKr}՗_INMSnhEFɁ F5u&*Z9Yd̈́(ywc^n]jf SAiWzہ!1yX*P`3 xpM+,4ᐗ75-*ē[^\UIϦɫA9 _Cl(/M[RmytU=O7Ma }y,bg?x5$,v5Cz''iinpC$:I[03ɨwp_J<~ vI?t{Fy  MɑFS/zZľ KK YH*XdDy~mHycIv,=+e/Bq46B$xfoփXVXg׊w1n+lMW''G2͔Zo_w-} Q[Rl˅!gG GDѬC7BJuaN8uF(7ZjOU밤lrq9 4#)oc[VWp@5gQ3LLiDIB6U9/gj4I QNS:8lD}ˏp$$/n;lD3>Y`Dhk =gr:O2x' Ф*Tc8^Yj-^]{u&\PIO8eyBpZk%8 |mgn߀Qo,!Hɶ c^ai2 P@/'2fA-Om'N>Yd2?I0d}& e~(nS(3$jqM+F*j]yU ~)вLFԗZ‘RۏCp.X?(W`cw~#6ԚSVfg>]*iXdteGLm ̕9xѕaHNi*&rٳDT5 J#ׄ1GBȔo#HO) Y1BqEgn9^@[`l0"E&NI!5#@ WQhMQSTF E۬T8̸ 5dz/A(ul ©0<C_Rk.7 z {m7M_<8TS2xCbն,S/#ђ~d&a}wE8渉Sg- +Tb2d_h2Gne"* mIbV0kfCL &kŕP@5doV7LӬ? ,'+ 9Jf 4rځ%-p:/vFj/^nI}7AZKF}8!D*.N0ekP3 O'tVCĈtH[*aV.黰T/-~˫Ry- d !iڲ !=g #)Z&Š2tSWy-zѪb&&J['Z|u$oƩooQ@Co鬿Mx/Zw2hVJ|xCv3cԑILY#u?c|E$DA#s:Zs^݄l  ZʬP]E9 X'k[["VȵwaW}ZQ6*k&1; ȵ. x>- @dv&\XmLФhD"vPÕ>Jtڍ JBu`2|jT~P70vKBĊFQKeoE򦊄u 3E \Rr۠#đW֨ş;0s>^0Tj J 9l?D%8þ ѪW y_<ݕ`/~ 31"@@?9|prٟ^?܍/sČ8Z5|PFr+2q|ͱC! *}6ÄW+=nm\>ȽdGi0,IWEa[9UۂR&Ԍ*NJl 50FD{TopM+'|y n?t٧xif5RM{YAL]_$3+*$GEKSBFGYPSW݈*},8^omڳ yz\R]szBu=[T}3H;ݎ0%ݕ@޺Oi%f1Wvsp?0g,]A-Ndˈa 9[["Vȵo X;пG5'WF[k/H^7q26RYz#AE?yItf|uW4P2ĥ?h @=8+>LpD"<8~/k>Ahw!XzsbhLc9@˳e$F,qGEi"i"MҩJti "NFOSD5xՎJ HPCqFQ#nHØ:'wy܌$v^lzNXuM?tv1l$ܙ/OV C50DzC |r7a]܌"IGrٌ. H.]ϦvS ma?3\,YXeS[:URXvek9LFo(x.XnzM6qG[`4F#j Jٓ pW/}YeWӱ_@he#j.90E %t b#QӘ!b!K\/*fWVE_RITk~''$ Ӄɟ  IzC}opB}~Y|烃+yqj2Xj:*{Ķ>'hխ=v &a<D Y(:T TuVk*g&N:f[m>80Ng y[6Ejü@\Ћ؃j9?b%^ӗL'{ycSZ)Q}x2pyM[!TBxך؍$/a/%OWRH~O\"èٻu ;8B_17 jˎ ʾTci`)//÷i53) qNmx&+fhxd_Սr87I"WtkQ$xcOOZkA!q.,]xj A%TN'뛱t FZF"n9~- Mu=Q>BƖ1b h4,T0X$x$C/vB>a{m;(˙k~j'AY{"Tk=ޱ =^+Tx4 }\YvxW@ ^d?J6 48Q&)9jhhwa8ȕS۽zCL[)><#Jp9!mxBq\! QR@Tɢ氍S! (+՟%ss{Ef/N Pԛ7=4Ѯl`Dv˝ lȽ*<"kKf%IU-l~DFhCtԮ0\:As;'D`І w[ϲ4j im;-ioanNsNm7Ysš|{߯<۞yTB)U+A&R,r7Qn)% {ZԊRTѷKS/> h cEWG;=X鿭BcȂ!oX)Wj8No#oaC4Uyo[פ|BŐ5wѴTrWqf A&DJINLINLa'l䯨瀍sR1^4*\ PޱO¬mbg{ OW$PXI2KE;7"=!M6#葈/m /fID;]qI8hS;j5e|+С0cn)t{_ Q73 џ4 s&TISR6Sjõs\LuŦKn]jhO8C&lI*Op*w4MɖUDfME>@/1Cc_R5~(RDDJiouK^#Ue/tO'\?fp; 0WQ}eieu)K< t9%BUNryYwIէ4X؈7WNcyGvja{DKs.{*0¢6(y)lDS=,: ҷKzڼ%T3\v(Ϛ?Ky WAh>d񺔽Ckd:料'mĚRzNnn)o;D!YJ[9w|z;)튦:XV(jjbF1n9MIhF.gݒ1G6zL@ʶL`n K1,$IޤڄHm괺9 (dk5j2tU* $ ;ɡ!_n+,,p6Aϕځh^O9Z sxngv@c)Y& ٚ!W~` }jhB{m,2?L?NMpS2Sٻ@-xm.DUK{~Lq tKT( M|OY)9vsO!RjEWbC(AfIh{i`fU͋x(Ia7mL_1jRy'XIW2_ )b!jʈ{x7g[q֜NgBxREsGZ̃: ‘$yTy}e4دۣz*ܚ_`sGHIrbeѿ+%e6)W2ZvC,o8ф9y>H*&#kgki͏|0,6 VllrJlG Q "=&}nweh"]$3d_wdy@Di^ }f]^qgnA:KZ;Foq1:Z@m*mWI0~ҳ4|WYRKbP&Dz|DTw[cDKseG/iYj j9B!y̒ݎQ3LhYI2‡nq᠘; uQ|_tk˘Rv̀45PN&E 3Xv `E5OJjͯt~!6PZ0=7pv|;m1v|)tX &pG|x1\ѵ߮4`OZjKn[_: F(=-XN.klz j2\ XYpy4q"+gnn=G7 4<>+/[b'/́w^ q]̘֯El%Ʀ_K'1L[!QyDT}j#]1MqwdVh=la7p&ȝ7g'+3 +n8( \%<xDa_5`{L(8ȘyӻVD.٪~F$$TC4 i"09Z U5T+E~sm%ASsI4ڗbm1gQBNIq[E!N1MC0v3~ЈeHާ>" mCFE-#98tU"22yo tFŬͩcV?nr{\;aģR`WKJm_@-:!"%?rb،E]+ƎQN{shUMx\p|i^sm%:6押tTi;c]~`>~Tݒ) So$m 8|F:97UgY[L5}Ͻր&#o70t"(u6<'`q0̒+w"B2-]7 -pb ~wJ|,DV@ֶ&D2 U Dbd^P˵S MmERєU5twܶ$'Iv:` @ *AꗺO`"ozJL\E }/Cto "]Ty;vsh@Q lxHRAPX XA(Gx.qgؠY? H35f+Frv_P|*`hż U pB V_/}`A}P/rhƸw6s{:i*)-BdXY,_Z\mKlgfiz;> ?~0K9m6hsaRZl0ԫ% ؐ )Oze2Xp \zɱ$ <-fJU|QdwHDb">6[ w ѴLh9sy}p y>aNic бr2+I=NXG\[T\M Doc q^kXty7 yMb!8+1bЍH|^5{iH_ׄX#v 8@#=K;\g- "AaO^F' 4M('9qmVM6T>L֜VH_eσ^5D@/V:iinpC$P$eTNZ葈/S>s׾&m\n"p~tCw~5Wj=#?<`ě;F^uJ\Gޛ$>Ahl˂&P_Q? ,S$CѾXF ^LGzrI&'Ӳ: Eeo_T `[q8_ O\]@Ė2h>,PP~0ȓbxN i]㳻Y@C8 Q?Ewפ|R dȺQ,7ۄvI`+ a@"PLU{w96f7ܝ3(caL UǗtҊr| n"-PVUA spHqB&xS* bdgэw\P{@"s -s&G_Lm!.0jUĖV;c1aƖψo(xR'4\ՂCst4Il̡gg1)f8PnG}W!۷SxLcZ0eHz#6һ)hTkv*74H iHa[_gYM9" e) yK M½XgN5&?gՑk31&ⶺc䋣`7̏GOi7gq[/v8za v⤍"xħ`>5/v~7iϧ(YdÎOQQ]LbӔyWnDq-|)l7hX*uEh )XCvVPGWYD #i~A^%M۲-ۅRDfeQCO[SEQ(;RjPyHqȖ ٟ'kQ)xdIt AQgDh /XwP[U&_%t6V*h ªU# D um_\>5 -L(c;#ݧ8;cY?.`od,hz|) k<;OSy_įX4C6ެy}n.lsqj|69hgDd΅8t9_3 zK7dDfy<PY:LE}@{pȇ5eo Um߽g7޽>7o'CmYEVՐ˨lӷZcgۿt|Ng @;.iRaDZHDԶd]pq!Q肌5[νJp"ϕq`fu~o rsD6KgV%,@z=z>k>-LBW!wPeh1mEzOMxws첰ށGՌv0a-K<8mtK_B$BV# Lq t$VdJy?FX|B`8n<6[Z(h+K#g8_4)1 R!GBsŠuPdD=^M2yC5fWL7Ю0_!Bn n8dZVN~]g"o޾V2^ȲŒ[# 9(?3ŨhIJOI4'e6WgՑ/a=FPt>@.9c)P ^܃ 4 ŴW 9o6籄^uPH밬y`+"bÞ|l!/v]FL݈({x񛵿<`zk~Ű^uiBmR75- f@AhP$e9 ]dR'?Gԭ6\_c .caǔ{'2ªATP} g*'׶2D6b(P7Jx\V'qj$rzr[tU]P (76Sɲ3Zu"Up2)FUi?ϹJ 8d%˹2dȒ* (JgC3aǦg|Tnh]z%|VDm)]|`Kr)hx[/_4Mv&v:ϔg/ R `"9'L#R?3׉>&@P8!ɔ# 咰#q&mAXI`aA!آ#jVEBQ ,Le- r<F*%6G rl+v)_SE^ؚ&siuP}T6IJ)" @b2IتJ2v9UT34y{*  P8rhrʟMR_-|!99.*_򛃢ѩx^J9`A%B:sx 3}ӄlӱ6ܜ4gob%j[\DahTK!$WX!" . bIt AQbe|!ǸB? V%P__ ev-2VJ." =I p{O6ÙPS4BX6HUk]28M(8$`ax:j䐟⪛@lV I1-+~!+0l@ %ƇkhU>Gk>?nhEF;?f[u&*PDfDP.7NU54 o~w遒HI aE_0-}}5]/ڛ«_kT)$GLk㝇a@3_uxXlMނs{e~L]ZAT怒rus hl>;W+McAW=-?특hۍ7eWjW@ΙYlv*Qhǐdl$nFhJh}N7f|.9W@@I> -NdрEo-i0@4Zc$MpI/vpUT5ϛ.2>99M_v;i;{+xBATPN&EƧ!7 ?Sh^.sМOj9-cCq_g$dWGhx u Mg{ czU $ySF%/G_DF ; jua)wn=]̋~ L`'=o5)Չ:Rꇛq#&$M3w0:mMܥ@@XI+8/3#HWx }[ە"] ṃBLVJVmaa-|y2$jwh7MU&E}<_ '@R6Pw2AпA7/^s7R0Q10w~5Wj=#?[(f;ذ`e@ AyɖGR. (7¢.fw&o]uDsX4UC:U=.[Z6ð`ck~p^0Tj J 9l?RREr"O)O[)_٩?\P w[%ę%:6YK[?-byjtܳ "x˯uoR;4HCRLvSFڿWg5NyV ^R)U;5 /xv%^ \IgyPT&f^%;aZ=~5yDe_'C:jL L* @uFPIkTWe\1RIVzM,1?@:'7tNl-}jX[3|Iτ0T[ Z1رj_우k8;d[öe}l>L,r5:_LV*0Eո~/<.dZٶ'Bh^SObK1ߒz0*mzlp.;<>Sy͙UךnOQ#^ZKCڈ"-^Pnڙc;/Zs,nZ>H+@M *&hf>j nEHӕs6tR/@bWo =@>&o'dapgݘ/)ɔlq|VJ ucwY('*Ȧb>> EWםs7??v37>!bH!gA=bYhRje/׬.gK7iz } WeQo#AHƢE;`/Em"^׉TYs7{^:0@{{ZYr@ -@fZ"S\i6 p*S"8oIǻ8,2kZXjԽ&|jzXuY 88XOp.Q4¬eN [u{ 7V^eZNRT}P1ө1#6Q Lʔ_ioz2oˤJM*C:3A1[3D1g{i=r!2 4^f} @M*n31!i-Ka6KX*P.1G=d>FPeJ~ ; ῟MӒϟۂg@JGQzM&j{H+//:~a/sTZIXd?:`*0Fڃ"]yaAWE! [`ϭɝ,;{nj7MRV{Spa'&/i7@Xe\2'/Jj\-zѪj1!}Df}kRA%B9F 5A x#EbzRjDZ9A LUW7E!~%&$ F4a+vSBB77h;G.}Z2ɣk+kdZ+xT`Xa&tF#]l^T:NHTxʏnzg 8O"5Ho5oQt&$gFjj ,NؽA~%EJ`DDVj1w ߀aj]pyغq%ɫāpJ\6Lp%#Q^68ڊ2Jl ش6*²WRNГml6EԪXͺtk{(*AQ .艖C|ZU15kF&勻M+LZJ۫Ox'Dr,3%1NX(^eTI:KFܚTtZ[&6 Q:/I @=QEVyh^$h*aא~w$:9=l|.E XQB5d` JL5>b&4,[e8.rc*EO@pR硧{v4F['( Oi ?>=c,Ϣ.fgڋ03HfnF*;4s+BL'&zs08m8 ]T+Ęee,2S vaDјHyQH=2)ܝJ:#$o!%?u|Q{em&lLΗb=M5/A-/e-c,8Zz?q}8-;K ֖/yɟ!DY'={T-@OV6N1b1ET[6-⼡'ݵ3|`g4}JS8% 613OD~a圙A&!%`(Pk7폅Ⱦ0Ϝ+DLGKi]^}zfa2*rB"6M5wٸc0"hZ(tVTνyn $AC]%j0tu~buacB"X@N`3G Jy`1: fqXfzx|QR P4?(NBbd)H'%z_F *4Y7?XKAl^ wNm8[#8 PhDZȢ?ZȢ6Sx4|ęՔ^c_*ʢ*krڨ4*螹g|* r8Pk!r__3ƥXi?T|8%&X CP ͎vGRµ.tே(nY_-|PIq籄^D6^. >}7AbH]{H;%@O<+)UˁQ'jr%4D|ahW2 KMf. ܾuE;a#=x0SZ> |M!okӮHD-lSa)U׬"n1YK]{0L1 [VqJi@DvȏIy= y]Vfhd*H_18:BfB77h;G.}Z2ɣk+kdZ+x ɠy7+ vhДəm&\P$e9G[["VȵwaW}ZQ S=3sw֤ɈбmXw(L+ظ>I`z׃-W; z8cgA_E'$MsbSὋ0?Ej\8PК! TNLWm=DyubExmc&rg_oT>A'G-W0&]Fg7M)#n _$UPPKq] ,T6^>YBe?p꡵Z$ j\2+j)B =Z-`ۻ vSjJ8 t]},XSA-9#HTd2BNXq(F#{ԗ @_q-,Ƣ2PI` ǟo{}HF~ IL-}&Geͪ7b9R GقL(S438c+OSS[{Lݒs/ȪC,r!U(׊fjP4BC )/Yk8H082.[Hf2U?yF5a69X>ϠW~gB9|!NV)/_,PM4C5d__ 𽍰C+ݎu1k0.(sN/A).cZ2)B@+ a|ppD CN)fkI0 5mܲ $0! ƋI ^iYg8ӡDa"ɛᶖ.[3keP&H9=ٻ?*G$@?޴Nx^%ݓ9j 7t|~AhgiSZT}P,sZ(ppS?S9 XI"y+s`ڂ%cJENGu VNǬlOځ%-sHkdZvS 9MU'^!rA!3SA?Ihgz||2Y]*p@܈^VΜ[|~*BԻ/PNvrCǗO#m4Ikmc&y0uBmC>3w&{:9:6*076B^8izw_=/}pw⩘NJTSb*h A/.، ^4BA4aϖ@ hD4OAǚ㫓Q#յUd;9Ϛ ;ׄ,]f!ߊjMSO *L^UvxVDdf@a` 5gEH&8tM2߿^wP'5,sy4[xuZY=ށٍtY Z!MC@`%֫ ZozU}t0ŮTA&tfc>^$p }"rr>%K ͮ[͸G[/#iƦ_K'1LKRj۹^NX:1Ä烽}^Շ`bm7kH2ȅ R={ʌE/Ztt y7Z~!wl)b EbF6m/*6\@`Lܠ{[j1ו+%_ޜQB5d` y8=j9=҅4xzemW}_/= =uz+'0Ξ[f"6޶Vdzz\_٨dmYľAv4NK5mܴ"ٍp5a9yCbWVa-pwFØE% p-/a:>E^V!b}j(0aQt`-RI(Hj%$cMD̢M< I 2PZw`PМJ_R_\O\oO&We vSjJa{MP.rH@v (|mg6ftHo ~NH8hXZ b姎jȿygE38S._YFoĻV򌈩v]jVկ}&}{9! vwl+mV.豑29L,+T fOhe U!~Q(jHJ6liX%:6押tTi;c]g[N{fX(G#vjtj$OF|XXLw6U۠~2\| fL1a1͇ar>L}JM.姪/)Ѫ5|_Ɠ+Qv46Mزh L3x#oA@ 8YFH$c 1\ D րNG kº栨 qDapdBI_nkm4Y˪]nok8{UڌhQb E#]Ab{3ȿRYB Cw\8&IfBoUMz#t26[ޅrv6(y;hWᄎ@0w&$?wA.ٶW"⩐t'%vxi V7q>'`ʁӲO:5Vjy3I/RrbAOmXt1̽0Pjb8M|wŗGNmCW?I0ĝXf2 y9,PUڌkA4^{k+DWݍc_GA3nsƁԴvyU d86J&]kN ,QQꓙbdxUVNTBDP8 <]z$} !WQgsd,]gn8ȵvz\0x.7]zq >k;7g^,`:p12/N]Qv|^ j|eS}hW^j ɕcljV- 1 %B_S9ZƋ4N " o.apбuX֚yk oh“soHUuf+KQ?ߌuU똽A;Q2׶~, 5rHM")1G ;iQz12U>kD:DJW<_xkφqdaМJ8o?Vl~ɗmt.ng6qj7t@%haBPF;s" #AKeᥗ۴z~c=3 g o& Vh $%)-P*C|{:ۂ#2#A(h3rc@?K|{MVufUI(iq.NAJ0DbGjl?V4qVqHĴ50{P]F͂BҴgZm21x\?Ih/f7# ZZw,h[ 3&EC[K6e.o= _fĵ]PYd_I`[Z6)Gsf ˶VUݓK@rΜMp9٪h)]/;ZrhՄd%N1a tP1 `ERcm|%esak}Ŋ?}U0,o]OvW_VJܓA,eܳui,_Zg&/펙xSrߟU {7OR/ ,ݦ6 3Zu"U ɂ I mǐJvV>23^}KЂ`юd6N'+ޛRżhVn0e 'G#пhPg[px:zQIVI>Y8iMl`uxMu18^SL\*p_,~z{ydžՑ~" +>s׍9ޤbG^< R SXL/&ٺ< ߙ0hnߛa2CW'\_Hb"ږb~Hp]~וPI"iF!Z(WO?b_?m 'Jb MU_\T)Lۤߍrt5>klnX^u7nGQq;ں"~e -tw3LWϟMy 쁧.fpG ˨ܑ雸tNAh2y Z(m܍`KM,"&KҸeyjx;W`oʳJ NnMIhL^RMߕ4'ptD_HGڇھUD%.b$Bu61[y#7}U0oN%qMHu}'fAuY{P({//tmCS7`3 IKF\ 0T("K"p?0XmN,9}Y OJKF橱.E2i@ƯRJXAܮd+Sbsmni* _S‘_,p4ЁObIY'\~&.)mmK6P |;cx[ȜAv/=n1UGBCz‘\Z V;Io5ZlMeדJz\ͪ1֗< cl>ި_-ܵi{2Ļ[~eAuˇP\:)a{9}2x;~Xǚ'JH> [!*c;ё ǽdHe+Q9xd݆%R@}K8:藃'2ªATP} ]y>6^6`&* 4x\V'qj$rzryTW[E4PPЮV!﫾xUv(_,~{'cL\/qȒ* (JgRe h7՗ǠsM؍|v`*D_8>%߲  quLdzܳF͂h_,Y7#d )IHnD%=.Vdv\CcCĴ"WJ僴}73~-zѪ`Դ q/!uXE>)?kx^;FS/+8꡵C],IW&8͵?hQľD@_/_X#O 6LlzEQ(;RkR~EהThn<eEJ餇tj{F}Ԣ=Bx֚[*;tuPZ_b\mn.1k˘/>CĎwV2bO55 {+cMxws첰YGՌv0aXL&\0"iNJf?0iJKl-LD{g,qXeR KDY,Jݽ&=(&3W{o'JfRNC)aϣtM bC"a_gO\4rmλB >E3omŕ3qp|"ζ¶Kp"nd (D (( lVp_כE/X>/f7#?!w؊@;.ڏ0O- PJH5ge6 ux˱uEm˦L-]y6 wg~nt ' τm'= <'jia gR}Ko7N@bᬊOݷAPHё_xu2^O>`QCek7Nތ!5!݋dUWmDL OmR_EU!@]uJ$d*r#T TJZ+=^@i1M,w ZňpDТM2: I@9i,RX}hV Bi;3m UFq 铸;V<5c'폅ȿT+ "|*7{a+y: vsv- Y /Xw쭬t ^<.I5v-BY؝q'Dвmx c jz <ƯEi$u 2(tmgn߀Quea(݀l5&=.Ud?0E-hAX >'knA@ EDZڜB҈XtI Ja,pB#IK/Oᶕv0,Tg;3^w  }-:YzC;/4wʶ ;ŗԘ %sq+_ŗ``Fɸڀ"rО*xmE+{H]pښД9$JȨ|*c; \'-2v눧FKu93e@Ob Rb +0=/f7# +ӹgE2Q)Q.̑nH=׬"3~o !ŔI5DbMO=m|1ߓ,v".EkC#~K-J@=OK|85 LM*hY4ZZ3cl,])#k8&PJ@~!)[4 {%p_N% nMS[ٿ*֟/l;' wr~}=2Rvo+u/ӆFd6KU(xN"]܉p|n͟ρsv47M&۠ C+_Yo[k-ns8&>LnFLl{uiZs^h¯˽D](&+fh,bOm'NO82^f`fO 1I$"$Q! 9K\ $ϺͿQ3}Mz/}AC ^jt,@ pj+^Fq2߸&]Ec#, (cc`iuJIC5Tιe- Bx.Ѿ#p頽`^;DR{1{jȿygE38S.Yᕢ 9M_J?FVRDT?lr~ NV0?&Po@RjFO8el9H¤v"f$7eހ-u³:Q H Fs]oF!Qۂ#3jq v4z.0X2v*gS h6$nd4x%{I4ȿ{s.WnJ֗z,\FE_K?8G؜ab4p7s9%*W &&/F\gg9u>YBe;rG PޠbܷZᯉ8))ոvP>F b5#q_*Ǫc|f*cmκfaӿ vB6(#P&`g^>ƘݿInc}>;+`6^bI Q{3ud7++\p f/DFi+h_ a/xumjBcOWXLNBIB}k}9gODA~* Fezj9?OebfmSx.kS `p:ԓ_UR<9ktt OZs"$2&OtM!КL }.>^CؘrGp*lm41إZ\fnK 9&aBisDJ03Bq< }}\kx|psiɔyT*Ek7I4+A:ʚ*e9YEQ)DPb,nx3~%n#'teaͰih v/KVV׬.g<ծ`58MDՂe(?CU/zv k)> G,vO.Kgm[;zxa!J3"[TZf׬/?Eeh?|1zBq+)|6iM$f> FʺAtW?val `< n6 6dLXs rNPB'`ԋ* GY+oiIoKv{j`*Xg-&( ^{n$Zޫc"K+)IJ_/}`t _Xdwj@p^ Q]㫌lMm:ЁqzHzظ'r;pͲV(inPƝG#SUڕ|0[ֺ j׋Ts)yѣ5=e秈THnִ% ow2=Bn2^+*J.QXM\uC18^S+enKtpd=L2} iExw Mt-.\{Tz!c\~M@NJ]nL{I#"SF#D/l.߄jM_d|d߸ PcfEG5E%O H_)PK'+wDT3y5v5mIX濡ͣy\V7 MArx9`_GLЕ)æ75+dk~''KJh tQ;kRA%B9m(?-2,_Uۏ;șxPQ:)]E̹N8Q >CaTM]>6P"tOI=?7-uw0ZꓜȤ y;>'v@zf5c"d+_ 3Eϧ(E[=3sw֤ɈбmXw(L+ظ>IKݎO'ۤ*ţp2q|Pљo |n)z" ;ʽ .84[fRGm&`ޔ[yB c*`X?zڡU~`>':b>jN"Fqo,_4 *zsJkjg1ɑ(Gl^KƝn<f ]l7E`DMU /xvP;%dyZ 72Faϙ#I8/Te=PI5/šn}Yf|Y6Aƞ-m ϞXT&>Տ XX:g xpcvx*bvη* "SZP7$ lA55MW)pra=f`IRD/Yž( )bJISl p klx~ۀ Y=* :`׍vlځjvic j& ZżW$0z-cMTe.L 1%*{H| h s&-WjJcP$e$:a,83={0(tb>> DBLS띕=mۄrI]@O>0^\ǥQ`oŠuPY9P#kMhM6FߩTtsC"4!8;|*1z͜Fj(_l퇌B䦠5kOA[%g"LN6MC \f5Pfƒ3 Y2{Xj]}mc*Ըrը#Aho *vX",PZȢ?ZȢ6Sx4|ęՔ^c_*ʢ)P^w_8M uBeH8bp&V_ᶖۿz{d/3B>&7ZkK?*P%냏q D,kOɨrKe|ʰw僅UȁR&-FzZ3\: 3IZfҪ i\{ZK朲%qGd$Rv#y1hl .Gw_, EIAԟOYAGPZ-N6RX7ugEM⎴ @G`\k~u7.=-h&Q `E5O:>IW*6 RI?TlޟO1ޮ*?Wk785u)զ0qdIqL۷ 0qT(&Uo9UVp+&&)Ds'k,~Mʘ{}|ء4"GMX^2eP_*`un Ϭ5IāL@i"9Tcwww#RɆ?}ۿȵ`3SA?I]tHoz8[-.\{Tz!c\~M@NJ]o. /IW%5!LZ0-^We~))hЕ`R0²Xvkθ!W\Y- f@B',[dՍrB-zr%hP rzOgzƧ 8}}5;% 0zoF>P&%Jsy9:?sM~Q_>j2g0 F4ko(O{Mx ǣ] վՁzⲦˍ!=k)o<ݢMIhs&Q0QB5d` JL5>b&4,[e8.rcc[|AM AU}@xh1U ӺGOJ^w\-Դ* *٨|zbdu Я6d=6g$a-ϞO҅pP4] |I3k|lj!mDm5C8:+)d.17Bq+)} JZ\UO!|f3JcO6ojŇ#6F3"g9wM=o \v#B-(^`EV(ǓS>Z螺ڀTj @_q-,Ƣ2PI` ǟ$%И|qᩔvԺć|D0O~'Zd}틚Iɰdߏ[(m{2h>ATz[_-6Wz~pGՄZ]d_FϸP}Q$G "k^3J 咯PmR#Zjv~4˯?'nƑThvC6.tV\7VIv46Mز&|Md(Ƨ!@ ,~PZwpT+UEkMNm{ʃ3/ FA7iؒᚙkV"]ݜ!,WeߔSmT2j2g6h%^{ -88)Չ)Չ,$ ԧ<Ɖ^ VV lǨ5WH (tO`Zr`v5ݾ7r#DHTMƱtu,SJѯԙh^1Q!p4p#~WeW<'ϗ.cKVRD !<-kSn}wIǃ9I^۵Iy>>^"eCF pNTx0x_k9MHKcJ^ w}q>Hw[m;؊&V.>_.En,PlϪ3dַ)E$AXeM,Q)^'9Cl7CO}bRv&s/Ggu:@jTD4.#'#ػBљ\/9?BtR >jit-l2\)nx d A›uC-NC zf볉RyCQǜV‏,D v=IꕴO!4[A줈m[U)=\m*-39}D*z>V_}syA |Ow-eY(2#i=y}>F =Z>_wιD2q֠ISCcud֠r1v΍h:XIW&,TŽvW0FE41`168:V>ӻU F|d fEm ~-ԐzMuP`Ahgi|A YK-xcsu+%y,/ұVs%y0 ìlV X(k˦. F xpM+,<1דJy`Qq EZ jȟNUAqV'0m 4 $/i]`ϜkUdN-J :$/Li` O- 9S(EuTF`T}p:ݾȹ*Kw#!iCkuP|%(YɁ F5*vJ6گz$>B(.<] *) %x;k';zT T o5!B7&8[LUb~<8NJAwDc35yg`o#ߴ *s鰎+nj>BŦN_Ԛjz vf]lLUBRwM lo7RcNk'H iF!&^cH082Bx8+)L%yA%)y\ q*^_Uڸ}ϹdӒYM=OLاS}ݤ"~ϡ~=e1T72PMI\K.FSX5}yݗ=3q-uz&(yRj]*P6ח1i6 7e|9_QYGEu}Okce7 ,6Qiq'(2Ū7'0X2 "?T@nOX1=6C/8*SdK$]ώW=WgtK=#ѐFj|M=c" _MزPZˌ2LynRAV1evS'&Z'e7$0v->A.ٶGWxRZێ*~&kydžՑ~"?lȻ+ ](=$aP˖>YNޑЖ+gN61NDb9qw3%>[,>@OI4/[},;]qqm|G=5k\$8~AU};.kf5xtQC6^Q>hz+bk1쪺hn0@|v( 1Q+3" -_p&eWl2|4~ Z?rYpr|R\ .vᧅQ>,Z ߲]+4bA 5wGPVWo"jB[{ ma1\S&e ݱ37U xJUȻ!Aa#;$R<+clj2v|@$rHjdž^X>^:j"54[>\|̵FƂ:vvIɛ~bocjdv7`4/,'xW*5V~:R^w_dr.#&7)},^4ua"6 %t-ۖoKa6as=ġzdڗ%?'3F_G{5Z#>%M{=3u?}xaZx˳ hU42nM8RGnmBr 6|DWQk2qo%<5wgES`w@w4dv\CcCĴ"^,ݣAP9WbH[]d6>:ғb*h9sy<q[xM8d4f0/7\X$V&W3Axkߵؒ,BcK~Fxi1K/÷i53) 4{LD*&ţ֥ĭQ 8=ZIݺ3}7N%k⇖SStYHm.~Xvc,F\`+B3S-)=6";"_N?]fY|3pmW@ Uͨ+2}Nc|CO[H{w L)Tw_\ׁ T LL6_Uڸ}fލԷ؈D+dD&Fzm!}?Nx!N}x(y.ƃwe_Ϧ/y f*`v `ݿInctX L'_'fBězc6HJb<FE.R4j*" 6G:o FÏGA Э"yepgO("34XnZ@h ڰ^ rb/svFZ1n7e slYhwz&?aqi.| TKko*$OXAyѮlnbGQ|ʳ\L+9} #$}t*uo< x;畋j4/MZJovZN';rm.<3CeG׭SZk]nr㴫LBv=n{E vWHTT]/KT[D(&*^M/}B.\TUO&a;񼚉z!as=At?*/<kM? H47:]ͨx!ZO븟Z|ஏWr^YqWQ6(dmu ru9ViF*dWy4ʒv.gݒ~с=ig[_: F(86~R'j~<}5ga5eUm"^[ڽufB%;(df+Fs):ԣiZLsoUڒ)mޖ%Sz7udC |I_[)!{gh19rhif,CVyopJz7g'"-fw{K6_꓾;­$>jx`],RU.^KTd~8B>ng΁70Ʊ,ϕ4m%Яr=M %E[mϟ&R}\XW߅ڙ8ԉHx)gz'v=Vu@be\BЖ (Ui- W`vړ WՓn*AE7' _i!&,hP,8;0RSr;MZ\ޙc GNqmؗ66;Ho Ȕܗ =i,Ԩ;fVN: \39,;֟2(2^k!2j sd)Ҿΰ jMaXsE^&5umzOܦ-,TmZj<Ahl˂zC;ٞP9 ^Τ*_b=T>_t UZvC(,]S bu}ax O2I.R;(Φm z-b&dR#rWy`qĵ.!g/fOA(/.`L$-!dMFqNf'ym+=Ԝ.ndS3wH{Kλ_О*AǬ>`/FQŴDo@o0=E1 n,[)cBl#q>IՓ%,F]4Np|9/2Zyd?H6Bzϙ.uLo]_g,]/ yItf[TA}E:;4#mi)%WHqѐT<\:HJFVѕXI| #@=a!fdrŻ=U :T~WVm;tlPJkx^cϭmalzBFY9їi!H¡ 5}jK+sY12) `N.Q;͔YgծR.>9 A◫io# avJ:V Z?1ǃwV*1He’9Fc _$U(]DlS~jﳗ{;wnӟ$Y| K°i+kc^RMl/hKEIgz"sFZQ!x88 x"0 ٍSRrkdZ+P9rlcb'@핦Ŗm[cQfj Kg[ )kGBkiAZӵ.b-8#<%˦fTyE[҇5@}Lh{ Jz.@) #Ks׾$0M+&&y[:txד&5w~5Wj \)k)Ef*8R9d"M;,=(Q^V&ِcDY)Ҋ 8fe|)tvX/s[Yb|/l0ЅpX[K'sTdWy4ʒs@"Pk,*Fp) T-7s!kB˾W@P*,)7SUW!HA>\>b{]+Rg0Joefi 3>W@A;H72A0J)TE>նs5^_u! ,֤oSVºķ^&- l4b2B,5~k]V0/+!燐\EaqqS۩ =,&"u%m@5sR \R<]~h㦿d+<4 ~)~ fio5e6C,91Oz=1żjpҴ~ELINgOngb;*G#Uk?ʨ&ɔ@=s <9g>h~y)$4ƢHu" 9~^OpʉkٌaZjAĸ.䀲2ϐbJa&ƃ_ڰ1HQ=5@+T-4%2/TIN)hLE[ssH11clC~UlSVUFg\UzoOjW@UP枑1Eԗu3Ja^v#@2` Eb,t 6"@؋b*l GO`>3Ef{,Q*xt!򭴛Fy{4JIT~H'Dw$j/qY,O@Rdm{Kd_"n[Y}jCu5q.}?ixFkO+/x Z^Ԡe0\e4/SޡU!՚?Bl ǖRYOϛw6eȞ&[cSl/nN=+f䇊Em (%Syoƌh3ff6ķ" $ݚ v bQfTwt<|o^Hx s?SxX$[kFXI|qtkc0H"?oYP?`<`>N [`[ `_Nb:z./^0okVTy{tJ_.mA#]Cqҹ9؜e9 9g*w'!&6AJ2p3ochUIU*}Ą,GtqhktLX`w\qdZ 1:UNp_#S ߙ\;95҉U;Fk86Ő+мtf!3k *ι-()߷d+LNDsW&lC&i }co3c7í6jh)Q$~)k6 \*ٕOpTMA>I}̴8;}>̵8xD UDd.Y7L>_dj/FBJ_Q1hr }dv;hoPU& -d%b9hzF#("BxTۂKL˦7$6YB-pmbY\>SGģ #;Qvr9\lId!8ЈPu|S;*3N6Ly3nx}I):#9Ժ*P M<&SW>u-Ocʀu 8VN4y9nI]5AF̣ã_l ZVkYe>U[b9DAT&=HQ P绽te&eϸ:ǝpVmvbS<립[HpbAiZTOO9+bcY A+|O#e~凟b!qh_i.JKrZ~$wߥC+Gm*M$~ * "d!{zunɫx*#XWZxL0KxĭsCm..;4FK܅-4E R𞕑[:je 5jl췓27jNZ2>{B(yɣ^as|N/#F+%fpsbzIM<==As>5@8Ց\E4) 1q"_x: t80B~;5'߯!(^:5̸([s=?,^a>f3e (%qfWƶ8Hy^?S 6ReK_;7J|iz4@(/6|}-#}5`ԪUoL;cDO C4f e --Z6,Pc%mokBGD ųjQH64Y88vWP\;&c6("lҔF2:}wA?po܎Ӽxѧk.JhMsҡ>[%k?n](̙07ƕk \5%vR1,%X週u17j-zARC~8\%MX7`]*!g[wd{uSc>mKOPos\0; lB~-@,~jثRR€gdW!J v;ZN ̪Z+ԧ_qP W`2b@%!}lҵ{Qf"/4 Zn8roυV~mI Wau{E|5&!0Ȍke 5_zg! ~Bs3UЭȡo9XaTt2KSU& KeO2b8b7>ZYLwM䁺YGYʂY`s?s&cF)wTX[2ut&[-XA bŎ#u@O7̞@%o#v!(M=?ɼ!gv%H$9)dX6vPCdĦ/!7ē3ܣx\@3,:vD6)9,xzSCiժ?JiX{_] !쵨,~OΛ>8ˠcwime΂6`: fUԔ8m@nRSCQUV=՞Տ*m~p[9|0DrKE-_ = h#ٺJ.(q#W!Q~'4udN [756c^)+`?f&a"$^I40[#vۑjО-^K\Jt{A-`KV*KWv`Wt U)!m.BS)m]V% $tED;~.}(%mˉFD? 7I=E׼"i^pWg%D-JƜ0Fe;W1%JZYC]7GFC&/HX|-޶.ts'B$K( s;H2$WVz葡(*e^ܭi /~c"AkuE? eƬ6[{#l#T B^ XSǨմ@sB-rb4:fSDmoǷp:$7n>828\m K n ^L,/c1tSZ?1ro5q!i4IAy$-%z|EGqW1 }͓$6%[O:HyԚ['a{ dOisSu|ρowk,">ߓ"y>oȮ(y0r svYw=JD7< I]d7RF@*2nuX`YӼa{Ģw^k,CY75\ Htu:--QSj X'd[t \qSȳh*?ēU) `;:'62 >, 6 -?7鐻jp.E"W;*ݹB K8q"/]RB8Pl?iv8۝,g{Rzsx4X~LGbꯛCbww Vl.KDSi}kc`ҩvSx`c;Qk & T>7St#u!&WEG!6;kE[*ݳJd \MKR}$|??Tx̴֛~VPq6T:BR~~yMtf8~Z{>`A3 ]on?-kvoݵ @d@\ 4)/ ӬylRP"+|@ߵ[&)^L>" u$GYŪҠ:&+O Cy&AA%v[Tڙ-KڳZA:2Ob+$GHaحA &S{iƩg4IހJT#Zэ~Ft漧P96Qa6.E͵N u6*dmAUDDdYI곙H\ȲS5#{1Z,bϮx3fEB `0j>T-te,n qsϕ/tː>hiTO;z%(MdF( TY.V J~4׮QdD.X3S?שH`/צ1GK ԁ B*$bJ]羥}9ٰn_fvPfCo}s!4(UVǸPf_nk$`2Ng+^n^w4e/LKR<ƣʕY sPf3Kz^Fu69xts'XY+~ nI[%*qaR"l79ri N~:yP N x %s{lAB>x(JY]^zU ލ]+Wra酈@h =6J6ޞ& heYIݯNXbU/cI/.Y->!{᱄jL4Ks"-Ɛ}6mCaputWbYH./ JuqQj'WXb@t/;1O[2h1j(ݥ-:x&aØ&T}`-O$PΦLG}dqPW&gU>y|:RRl+)k?5HV?>Zt] $_ŲĦq5ɝ } trlQd Ñ̪Ď"]̌?OOmvA+Ԥ!#by$<[Px[Ր\ff:0Vi:c(WBתþlUٴOqf5z9;XA&26l%‹ڥP7wkrS輇 +0:}@@,QJcwac`8xyH]n.LYH'T8y \#6O-F%+mL^xc0zgIR@yG9ʺ|iyf"j;aq2Q&*kR7ZsYw<?2< ~2/ogQoB-o~b}Wdr0s4Yt7Ua87;w;艴0 ݝa8ў(  X~$b+2 AJ}xd #g9ׇNÏj˞ikD_@Fا8 vjQѵ(GْZ1'3vQ O@$E"$xB#R+j"qTy6;Zp-,{g_l2Ι; ĺФ%G8Je5@P8n5yLYI|ʩ@wbgN|*a؞ Dg x!BH](oed 9eXtOa68oHA 1ĀJ+թ*s[I:>Jhvs |)7r˹-^WA+f pJ\;c PsbZ8]ޥ 3W峸Fit/6W\Cckʯ>4/ E.4PqPPb[hzY!a\$_>WX8[3ͨ" ]ӧ1`;6+V~]̊a0_w^Ǜ@xOYMa xYY z£KR+!v R/nYRļ@ @| թuf5vܥ4d Mx`Nߝ֤yOSY-~\ڦǧK}',"|swE \GEi爎}H߳xW29.(ش1.obI4j q#ۛ~.&Y ^swV,o90=u~"1l%;')5hIC"P㘑@\P ;,&*P+=dSJ^}uU m~g9eE`pGS*ڗKw 3+9׿m#V~e[ ׎T^y9ҡV8aIda^eV{r~e CG+PP R>ɏx4e' 'ꝠqLbn`Z1Zƕ@LeNQq3p5(.9ূJyiOQU7,y)Qx`pmDMt !!Xi_nMc5d0([.$`de=cSB24.o?Xpln2ʧTV'Zq3Urr 󴻯Z9֜a m>]? Ҍta? ;=h9-]qvi\Zg/ /@[_Ud!#cCU'_[ kPr]o6]t8::_)ac4zmWg+vU C|O%!J(<$fP6h8~Rp -1*Ox<08PBS[| >VDNƴ>{EynD^4ޟJu؆~r | W_Tqba3 'qg *˟r&y8]Z텫=qHʱ)Yqt-ҹ5&c-b^Rsπ'ƨLz ZT^0 ‹EnyZr(ȹgھNc;y lapqvW7#3*{!/)leMvwS螃?[WfPJ M@!m׎ߧ_{w!ݬu8k7AC,c"d.5ʁ),k 2LA`d[DFd;}4BH҄(x ;1fCqP?&7HоWsbtYѯqOTwmN6K3 N'Cv)Nv} b$k'!Մ_ KAR* jHhNEu?>G_i<鮦^S.l~+d]n`+PCy`5-1CXjf|9FZH2Mr{"Ŵ!9>ﮭa8Tfćο aB5O& WY5֫s }J@iX 0JHuďE_kM϶*0GpKղ}|U\;9=?&BīˢOgJ%&=Xv d @fr61뫘H IٵTtF{=w>4F  ρ! !o(IlGvăʿuo؟`sEǼī#lm)PI`]sd6tR9]eJ6Q 7wq4C#(q J63B8iW\vLW c5@`4`Hm!Nlj4/X|cJ էɗxKJ)1-R c_bI8 'qƋ@G\]"m$%)C %E7 {i0 em ;V*z3;G#% )A@ABvBc9@wdR3T=$8_^*CgHpZ2#mӊp"CL =sgz׋ J~ڕdPH^ DV@בI`hOYmsBvP4V0ovN}).xxMD7`s |TV_g ?p6;e2=Qb ltΒPx/E/$JG9A7%M~Zq&`'{ s{Q͟Ʀwt)EYAs=JS+.p<`Iosu1YEN6E?N9Km4Imx@CrJ`V*Yb!]xdADy@PaZ[sf{vIyo;]f B_*{("^۸X,rFYoaV~9/I{Y)OS? YdX,b z(\/#q9:r"jief\6=_^l$؉Y^ԑv P;_mt ^ @QG`7mJ噿}E3vLm`e4SEX &Q!==mu[wEFrZsA޸埩o7EX7RԖ!3XjV |p *i[ս^D$RJ=ycPo|Vl?ո?#*lț駈&CG}:E5*"^F@$ǿ3e!WXؾ!KAPُM3a>go(Ӑns%њ-]@֚::/c2p'NeVqebz]k%G_=tPyND5j8jv]5NŖTN8 Py~jI&*[ԙVjex'iQV<q&BsptXsiut12H ^؎EB7DccOٳⳕ(媅bڭf {f|}EVY+!`+R >XuʒYɕSm/䮼4@T*x~ t>PPр9mՎ>fOzLEA.Jr"m/޽E.I&ǠN2 &>,A-[<3Q󯮿l0Ӈ4j/P7Esq$A]Yf{2]9_Ҷ;.GPN\j}a5XvdJv/ dL&1#翿</#[WP?bpSl<⮆><x/@39MXcCSfMAm~0}u^%N^#q+R2E{Z#R(;N&-h&yUjGzB7.z:~q QB0h_r- @FW% >L=a֒ߋMv`$l/W)c{{=iW *)*>8Rs7 QsF.^Ո4fzxjJm. -1hQoOP4HqV3'F"A \sfΦSeׄ΀?njAd5~Id x5cMo97 @^CdtnVz jq5 9ߛ@Y |aq_<ֿϛ҆.:1kouB(k[E: 0F% ӹz+N1W8|s=9SE(Y'n{/٧kO0\Ư{I^8|_Sa`$Kisg;8#6<8Hi3ďNO[#-<J;(&]b@ l:nC_<+hZ0JK;(~ XE'@R/,\:=msD|T缲cԚŸ [>E`\1J=f+=&Y!IVn^G!;?ԘZ3DBv_L!0n"ơe۰˪!DLzOӖwE/2qH29ݓdLms{@iݨ$+h״nX7Tl8`5\=݇ b8&pPʬЉ;SyWCBszݛR^'^ljgyg5(F#`OuhtmB=I}9gf* f}g۝' 'a%h\3UL 0@190;A~d| Ty簹2^`0voE=D07az9f:PfWlnCqVox?skgҤ$;$w-atDMVY;J@jϔS Fe`XXtU,`.o5n0ʣNTJea06#gn2v\,vh6^ofddY l`JZdCè=QLh'V'nⶔ=ikEbƽ(E-kйZ}MRK}G] ʷmZ~ m&NZSVUgO9F?_YFqEs gOb+Dj8!hr7O_=J;=)tWY|LH J)ȅt$ TK6vo\ qFxۛDNwܹLnlĻ@&j"HG %="@l1wͽZqTx}}+%gA$Fg:o- +zy9;VIyKo~ElfeZӆlrZNT<qoRjt$h8 (`wu`gꀪb3EV u0_  e:IPw>oݝg#݅;r\]|FϊA-hk`RցNXKRNSyJv>v?u.&$w0^Jxs=t@avoXQS&@Qۻ/iqzUY0S1b9!ك>qAj'ukǐFͬ iL ;iJRdYNC׳bGҼ!@4t2UEDlAn`kaf+e;lFVnGRrVw]nPP0kem5@j30'Q'm=-+d7 Ֆ5 N4q~5vhkzxc-0.9.1/docs/man/000077500000000000000000000000001515574030100140315ustar00rootroot00000000000000zxc-0.9.1/docs/man/zxc.1.md000066400000000000000000000125041515574030100153200ustar00rootroot00000000000000# zxc(1) ## NAME **zxc** - High-performance asymmetric lossless compression ## SYNOPSIS **zxc** [*OPTIONS*] [*INPUT-FILE*] [*OUTPUT-FILE*] ## DESCRIPTION **zxc** is a command-line interface for the ZXC compression library, a high-performance lossless compression algorithm optimized for maximum decompression throughput. **zxc** is designed for the *"Write Once, Read Many"* paradigm. It trades compression speed to generate a bitstream specifically structured to maximize decompression speed, effectively offloading complexity from the decoder to the encoder. It aims to provide very high decompression speeds across modern architectures while maintaining competitive compression ratios. ZXC is particularly suited for scenarios such as Game Assets, Firmware or App Bundles where data is compressed once on a build server and decompressed millions of times on user devices. By default, **zxc** compresses a single *INPUT-FILE*. If no *OUTPUT-FILE* is provided, **zxc** will automatically append the `.xc` extension to the input filename. If no *INPUT-FILE* is provided, **zxc** will read from standard input (`stdin`) and write to standard output (`stdout`). ## STANDARD MODES **-z**, **--compress** : Compress FILE. This is the default mode if no mode is specified. **-d**, **--decompress** : Decompress FILE. **-l**, **--list** : List archive information, including compressed size, uncompressed size, compression ratio, and checksum method. **-t**, **--test** : Test the integrity of a compressed FILE. It decodes the file and verifies its checksum (if present) without writing any output. **-b**, **--bench** [*N*] : Benchmark in-memory performance. Loads the input file entirely into RAM and measures raw algorithm throughput (default duration is 5 seconds). ## OPTIONS **-m**, **--multiple** : Process multiple files at once. When specified, all subsequent non-option arguments are treated as input files. For each input file, a corresponding `.xc` file is created (or decompressed into its original name). Output cannot be written to standard output (`stdout`) when this mode is enabled. **-r**, **--recursive** : Recursively process directories. When specified, any directory listed as an argument will be traversed, and all regular files within it will be processed (compressed or decompressed). This option implicitly enables `--multiple` mode. **-1**..**-5** : Set the compression level from 1 (fastest compression) to 5 (highest density). - **-1, -2 (Fast):** Optimized for real-time assets or when compression speed is a priority. - **-3 (Default):** Balanced middle-ground offering efficient compression and superior ratio to fast codecs. - **-4, -5 (Compact):** Best choice for distribution, archival or firmware. **-T**, **--threads** *N* : Set the number of threads to use for compression and decompression. A value of `0` means auto-detection based on the number of available CPU cores. **-B**, **--block-size** *SIZE* : Set the compression block size. *SIZE* must be a power of two between **4K** and **2M** (e.g. `4K`, `64K`, `256K`, `1M`). Smaller blocks reduce memory usage and improve random-access decompression; larger blocks generally yield better compression ratios. The default is **256K**. This option is only meaningful during compression; the block size is stored in the archive header and automatically used during decompression. **-C**, **--checksum** : Enable block hashing during compression using the rapidhash algorithm. Recommended for data integrity validation. Checksum verification is automatically performed during extraction when enabled. **-N**, **--no-checksum** : Explicitly disable checksum generation. **-k**, **--keep** : Keep the input file after compression or decompression. (Currently, the input file is preserved by default, but this flag ensures compatibility with future changes). **-f**, **--force** : Force overwrite of the *OUTPUT-FILE* if it already exists. **-c**, **--stdout** : Force writing to standard output (`stdout`), even if it is the console. **-v**, **--verbose** : Enable verbose logging mode. Outputs more detailed information during operations. **-q**, **--quiet** : Enable quiet mode, suppressing all non-error output (such as progress bars or real-time statistics). **-j**, **--json** : Output results in JSON format. This is particularly useful for scripting, benchmarking, and the `--list` mode. ## SPECIAL OPTIONS **-V**, **--version** : Display the version of the zxc library and the compiled architecture information, then exit. **-h**, **--help** : Display a help message and exit. ## EXAMPLES **Compress a file:** zxc data.txt **Compress a file with high density (Level 5):** zxc -5 data.bin **Decompress a file:** zxc -d data.txt.xc **Compress multiple files independently:** zxc -m file1.txt file2.txt file3.txt **Compress all files in a directory recursively:** zxc -r ./my_folder **Decompress all files in a directory recursively:** zxc -d -r ./my_folder **Decompress a file to standard output:** zxc -dc data.txt.xc > data.txt **List archive information:** zxc -l data.txt.xc **Compress with a custom block size (64 KB — squashfs-style):** zxc -B 64K data.bin data.xc **Compress with maximum block size (2 MB):** zxc -5 -B 2M data.bin data.xc **Run a benchmark for 10 seconds:** zxc -b 10 data.txt ## AUTHORS Written by Bertrand Lebonnois & contributors. ## LICENSE BSD 3-Clause License. zxc-0.9.1/include/000077500000000000000000000000001515574030100137515ustar00rootroot00000000000000zxc-0.9.1/include/zxc.h000066400000000000000000000006251515574030100147310ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ #ifndef ZXC_H #define ZXC_H #include "zxc_buffer.h" // IWYU pragma: keep #include "zxc_constants.h" // IWYU pragma: keep #include "zxc_error.h" // IWYU pragma: keep #include "zxc_stream.h" // IWYU pragma: keep #endif // ZXC_Hzxc-0.9.1/include/zxc_buffer.h000066400000000000000000000210531515574030100162600ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_buffer.h * @brief Buffer-based (single-shot) compression and decompression API. * * This header exposes the simplest way to use ZXC: pass an entire input buffer * and receive the result in a single output buffer. All functions in this * header are single-threaded and blocking. * * @par Typical usage * @code * // Compress * size_t bound = zxc_compress_bound(src_size); * void *dst = malloc(bound); * zxc_compress_opts_t opts = { .level = ZXC_LEVEL_DEFAULT, .checksum = 1 }; * int64_t csize = zxc_compress(src, src_size, dst, bound, &opts); * * // Decompress * uint64_t orig = zxc_get_decompressed_size(dst, csize); * void *out = malloc(orig); * zxc_decompress_opts_t dopts = { .checksum = 1 }; * int64_t dsize = zxc_decompress(dst, csize, out, orig, &dopts); * @endcode * * @see zxc_stream.h for the streaming (multi-threaded) API. * @see zxc_sans_io.h for the low-level sans-I/O building blocks. */ #ifndef ZXC_BUFFER_H #define ZXC_BUFFER_H #include #include #include "zxc_export.h" #include "zxc_stream.h" /* zxc_compress_opts_t, zxc_decompress_opts_t */ #ifdef __cplusplus extern "C" { #endif /** * @defgroup buffer_api Buffer API * @brief Single-shot, buffer-based compression and decompression. * @{ */ /** * @brief Calculates the maximum theoretical compressed size for a given input. * * Useful for allocating output buffers before compression. * Accounts for file headers, block headers, and potential expansion * of incompressible data. * * @param[in] input_size Size of the input data in bytes. * * @return Maximum required buffer size in bytes. */ ZXC_EXPORT uint64_t zxc_compress_bound(const size_t input_size); /** * @brief Compresses a data buffer using the ZXC algorithm. * * This version uses standard size_t types and void pointers. * It executes in a single thread (blocking operation). * It writes the ZXC file header followed by compressed blocks. * * @param[in] src Pointer to the source buffer. * @param[in] src_size Size of the source data in bytes. * @param[out] dst Pointer to the destination buffer. * @param[in] dst_capacity Maximum capacity of the destination buffer. * @param[in] opts Compression options (NULL uses all defaults). * Only @c level, @c block_size, and @c checksum are used. * * @return The number of bytes written to dst (>0 on success), * or a negative zxc_error_t code (e.g., ZXC_ERROR_DST_TOO_SMALL) on failure. */ ZXC_EXPORT int64_t zxc_compress(const void* src, const size_t src_size, void* dst, const size_t dst_capacity, const zxc_compress_opts_t* opts); /** * @brief Decompresses a ZXC compressed buffer. * * This version uses standard size_t types and void pointers. * It executes in a single thread (blocking operation). * It expects a valid ZXC file header followed by compressed blocks. * * @param[in] src Pointer to the source buffer containing compressed data. * @param[in] src_size Size of the compressed data in bytes. * @param[out] dst Pointer to the destination buffer. * @param[in] dst_capacity Capacity of the destination buffer. * @param[in] opts Decompression options (NULL uses all defaults). * Only @c checksum is used. * * @return The number of bytes written to dst (>0 on success), * or a negative zxc_error_t code (e.g., ZXC_ERROR_CORRUPT_DATA) on failure. */ ZXC_EXPORT int64_t zxc_decompress(const void* src, const size_t src_size, void* dst, const size_t dst_capacity, const zxc_decompress_opts_t* opts); /** * @brief Returns the decompressed size stored in a ZXC compressed buffer. * * This function reads the file footer to extract the original uncompressed size * without performing any decompression. Useful for allocating output buffers. * * @param[in] src Pointer to the compressed data buffer. * @param[in] src_size Size of the compressed data in bytes. * * @return The original uncompressed size in bytes, or 0 if the buffer is invalid * or too small to contain a valid ZXC archive. */ ZXC_EXPORT uint64_t zxc_get_decompressed_size(const void* src, const size_t src_size); /* ========================================================================= */ /* Reusable Context API (opaque, heap-allocated) */ /* ========================================================================= */ /** * @defgroup context_api Reusable Context API * @brief Opaque, reusable compression / decompression contexts. * * This API eliminates per-call allocation overhead by letting callers retain * a context across multiple operations. The internal layout is hidden behind * an opaque pointer. * * @{ */ /* --- Compression context ------------------------------------------------- */ /** @brief Opaque compression context (forward-declared). */ typedef struct zxc_cctx_s zxc_cctx; /** * @brief Creates a new reusable compression context. * * When @p opts is non-NULL the context pre-allocates all internal buffers * using the supplied level, block_size, and checksum_enabled settings. * When @p opts is NULL, allocation is deferred to the first call to * zxc_compress_cctx(). * * The returned context must be freed with zxc_free_cctx(). * * @param[in] opts Compression options for eager init, or NULL for lazy init. * @return Pointer to the new context, or @c NULL on allocation failure. */ ZXC_EXPORT zxc_cctx* zxc_create_cctx(const zxc_compress_opts_t* opts); /** * @brief Frees a compression context and all associated resources. * * It is safe to pass @c NULL; the call is a no-op in that case. * * @param[in] cctx Context to free. */ ZXC_EXPORT void zxc_free_cctx(zxc_cctx* cctx); /** * @brief Compresses data using a reusable context. * * Identical to zxc_compress() but reuses the internal buffers from @p cctx, * avoiding per-call malloc/free overhead. The context automatically * re-initializes when block_size or level changes between calls. * * Options are **sticky**: settings passed via @p opts are remembered and * reused on subsequent calls where @p opts is NULL. The initial sticky * values come from the @p opts passed to zxc_create_cctx(). * * @param[in,out] cctx Reusable compression context. * @param[in] src Source data. * @param[in] src_size Source data size in bytes. * @param[out] dst Destination buffer. * @param[in] dst_capacity Capacity of the destination buffer. * @param[in] opts Compression options, or NULL to reuse * settings from create / last call. * * @return Compressed size in bytes (> 0) on success, * or a negative @ref zxc_error_t code on failure. */ ZXC_EXPORT int64_t zxc_compress_cctx(zxc_cctx* cctx, const void* src, size_t src_size, void* dst, size_t dst_capacity, const zxc_compress_opts_t* opts); /* --- Decompression context ----------------------------------------------- */ /** @brief Opaque decompression context (forward-declared). */ typedef struct zxc_dctx_s zxc_dctx; /** * @brief Creates a new reusable decompression context. * * @return Pointer to the new context, or @c NULL on allocation failure. */ ZXC_EXPORT zxc_dctx* zxc_create_dctx(void); /** * @brief Frees a decompression context and all associated resources. * * It is safe to pass @c NULL. * * @param[in] dctx Context to free. */ ZXC_EXPORT void zxc_free_dctx(zxc_dctx* dctx); /** * @brief Decompresses data using a reusable context. * * Identical to zxc_decompress() but reuses buffers from @p dctx. * * @param[in,out] dctx Reusable decompression context. * @param[in] src Compressed data. * @param[in] src_size Compressed data size in bytes. * @param[out] dst Destination buffer. * @param[in] dst_capacity Capacity of the destination buffer. * @param[in] opts Decompression options (NULL for defaults). * * @return Decompressed size in bytes (> 0) on success, * or a negative @ref zxc_error_t code on failure. */ ZXC_EXPORT int64_t zxc_decompress_dctx(zxc_dctx* dctx, const void* src, size_t src_size, void* dst, size_t dst_capacity, const zxc_decompress_opts_t* opts); /** @} */ /* end of context_api */ /** @} */ /* end of buffer_api */ #ifdef __cplusplus } #endif #endif // ZXC_BUFFER_Hzxc-0.9.1/include/zxc_constants.h000066400000000000000000000054621515574030100170310ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_constants.h * @brief Public constants: library version and compression levels. * * Include this header to query the library version at compile time or to * reference the predefined compression-level constants used throughout the API. */ #ifndef ZXC_CONSTANTS_H #define ZXC_CONSTANTS_H /** * @defgroup version Library Version * @brief Compile-time version information. * @{ */ /** @brief Major version number. */ #define ZXC_VERSION_MAJOR 0 /** @brief Minor version number. */ #define ZXC_VERSION_MINOR 9 /** @brief Patch version number. */ #define ZXC_VERSION_PATCH 1 /** @cond INTERNAL */ #define ZXC_STR_HELPER(x) #x #define ZXC_STR(x) ZXC_STR_HELPER(x) /** @endcond */ /** * @brief Human-readable version string (e.g. "0.7.2"). */ #define ZXC_LIB_VERSION_STR \ ZXC_STR(ZXC_VERSION_MAJOR) \ "." ZXC_STR(ZXC_VERSION_MINOR) "." ZXC_STR(ZXC_VERSION_PATCH) /** @} */ /* end of version */ /** * @defgroup block_size Block Size * @brief Block size constraints for compression. * * Block size must be a power of two in range * [@ref ZXC_BLOCK_SIZE_MIN, @ref ZXC_BLOCK_SIZE_MAX]. * Pass 0 to any API to use @ref ZXC_BLOCK_SIZE_DEFAULT. * @{ */ /** @brief log2(ZXC_BLOCK_SIZE_MIN) - exponent code for minimum block size. */ #define ZXC_BLOCK_SIZE_MIN_LOG2 12 /** @brief log2(ZXC_BLOCK_SIZE_MAX) - exponent code for maximum block size. */ #define ZXC_BLOCK_SIZE_MAX_LOG2 21 /** @brief Default block size (256 KB). */ #define ZXC_BLOCK_SIZE_DEFAULT (256 * 1024) /** @brief Minimum allowed block size (4 KB = 2^12). */ #define ZXC_BLOCK_SIZE_MIN (1U << ZXC_BLOCK_SIZE_MIN_LOG2) /** @brief Maximum allowed block size (2 MB = 2^21). */ #define ZXC_BLOCK_SIZE_MAX (1U << ZXC_BLOCK_SIZE_MAX_LOG2) /** @} */ /* end of block_size */ /** * @defgroup levels Compression Levels * @brief Predefined compression levels for the ZXC library. * * Higher levels trade encoding speed for better compression ratio. * All levels produce data that can be decompressed at the same speed. * @{ */ /** * @brief Enumeration of ZXC compression levels. * * Use one of these constants as the @p level parameter of * zxc_compress() or zxc_stream_compress(). */ typedef enum { ZXC_LEVEL_FASTEST = 1, /**< Fastest compression, best for real-time applications. */ ZXC_LEVEL_FAST = 2, /**< Fast compression, good for real-time applications. */ ZXC_LEVEL_DEFAULT = 3, /**< Recommended: ratio > LZ4, decode speed > LZ4. */ ZXC_LEVEL_BALANCED = 4, /**< Good ratio, good decode speed. */ ZXC_LEVEL_COMPACT = 5 /**< High density. Best for storage/firmware/assets. */ } zxc_compression_level_t; /** @} */ /* end of levels */ #endif // ZXC_CONSTANTS_H zxc-0.9.1/include/zxc_error.h000066400000000000000000000052061515574030100161420ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_error.h * @brief Error codes and error-name lookup for the ZXC library. * * Every public function that can fail returns a value from @ref zxc_error_t. * A return value < 0 indicates an error; use zxc_error_name() to convert * any code to a human-readable string. */ #ifndef ZXC_ERROR_H #define ZXC_ERROR_H #include "zxc_export.h" #ifdef __cplusplus extern "C" { #endif /** * @defgroup error Error Handling * @brief Error codes returned by ZXC library functions. * @{ */ /** * @brief Error codes returned by ZXC library functions. * * All error codes are negative integers. Functions that return int or int64_t * will return these codes on failure. Check with `result < 0` for errors. * * Use zxc_error_name() to get a human-readable string for any error code. */ typedef enum { ZXC_OK = 0, /**< Success (no error). */ /* Memory errors */ ZXC_ERROR_MEMORY = -1, /**< Memory allocation failure. */ /* Buffer/capacity errors */ ZXC_ERROR_DST_TOO_SMALL = -2, /**< Destination buffer too small. */ ZXC_ERROR_SRC_TOO_SMALL = -3, /**< Source buffer too small or truncated input. */ /* Format/header errors */ ZXC_ERROR_BAD_MAGIC = -4, /**< Invalid magic word in file header. */ ZXC_ERROR_BAD_VERSION = -5, /**< Unsupported file format version. */ ZXC_ERROR_BAD_HEADER = -6, /**< Corrupted or invalid header (CRC mismatch). */ ZXC_ERROR_BAD_CHECKSUM = -7, /**< Block or global checksum verification failed. */ /* Data integrity errors */ ZXC_ERROR_CORRUPT_DATA = -8, /**< Corrupted compressed data. */ ZXC_ERROR_BAD_OFFSET = -9, /**< Invalid match offset during decompression. */ ZXC_ERROR_OVERFLOW = -10, /**< Buffer overflow detected during processing. */ /* I/O errors */ ZXC_ERROR_IO = -11, /**< Read/write/seek failure on file. */ ZXC_ERROR_NULL_INPUT = -12, /**< Required input pointer is NULL. */ /* Block errors */ ZXC_ERROR_BAD_BLOCK_TYPE = -13, /**< Unknown or unexpected block type. */ ZXC_ERROR_BAD_BLOCK_SIZE = -14, /**< Invalid block size. */ } zxc_error_t; /** * @brief Returns a human-readable name for the given error code. * * @param[in] code An error code from zxc_error_t (or any integer). * @return A constant string such as "ZXC_OK" or "ZXC_ERROR_MEMORY". * Returns "ZXC_UNKNOWN_ERROR" for unrecognized codes. */ ZXC_EXPORT const char* zxc_error_name(const int code); /** @} */ /* end of error */ #ifdef __cplusplus } #endif #endif /* ZXC_ERROR_H */ zxc-0.9.1/include/zxc_export.h000066400000000000000000000057111515574030100163330ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_export.h * @brief Platform-specific symbol visibility macros. * * This header defines the `ZXC_EXPORT`, `ZXC_NO_EXPORT`, and `ZXC_DEPRECATED` * macros that control which symbols are exported from the shared library. * * - Define @c ZXC_STATIC_DEFINE when building or consuming ZXC as a **static** * library to disable import/export annotations. * - When building the shared library the CMake target defines * @c zxc_lib_EXPORTS automatically, selecting `dllexport` / `visibility("default")`. * - When consuming the shared library neither macro is defined, so the header * selects `dllimport` / `visibility("default")`. */ #ifndef ZXC_EXPORT_H #define ZXC_EXPORT_H /** * @defgroup export Symbol Visibility * @brief Macros controlling DLL export/import and deprecation attributes. * @{ */ #ifdef ZXC_STATIC_DEFINE /** * @def ZXC_EXPORT * @brief Marks a symbol as part of the public shared-library API. * * Expands to nothing when building a static library (@c ZXC_STATIC_DEFINE), * to `__declspec(dllexport)` or `__declspec(dllimport)` on Windows, or * to `__attribute__((visibility("default")))` on GCC/Clang. */ #define ZXC_EXPORT /** * @def ZXC_NO_EXPORT * @brief Marks a symbol as hidden (not exported from the shared library). * * Expands to nothing for static builds or Windows, and to * `__attribute__((visibility("hidden")))` on GCC/Clang. */ #define ZXC_NO_EXPORT #else /* shared library */ #ifndef ZXC_EXPORT #ifdef zxc_lib_EXPORTS /* Building the library */ #ifdef _WIN32 #define ZXC_EXPORT __declspec(dllexport) #else #define ZXC_EXPORT __attribute__((visibility("default"))) #endif #else /* Consuming the library */ #ifdef _WIN32 #define ZXC_EXPORT __declspec(dllimport) #else #define ZXC_EXPORT __attribute__((visibility("default"))) #endif #endif #endif #ifndef ZXC_NO_EXPORT #ifdef _WIN32 #define ZXC_NO_EXPORT #else #define ZXC_NO_EXPORT __attribute__((visibility("hidden"))) #endif #endif #endif /* ZXC_STATIC_DEFINE */ #ifndef ZXC_DEPRECATED /** * @def ZXC_DEPRECATED * @brief Marks a symbol as deprecated. * * The compiler will emit a warning when a deprecated symbol is referenced. * Expands to `__declspec(deprecated)` on MSVC or * `__attribute__((__deprecated__))` on GCC/Clang. */ #ifdef _WIN32 #define ZXC_DEPRECATED __declspec(deprecated) #else #define ZXC_DEPRECATED __attribute__((__deprecated__)) #endif #endif /** * @def ZXC_DEPRECATED_EXPORT * @brief Combines `ZXC_EXPORT` with `ZXC_DEPRECATED`. */ #ifndef ZXC_DEPRECATED_EXPORT #define ZXC_DEPRECATED_EXPORT ZXC_EXPORT ZXC_DEPRECATED #endif /** * @def ZXC_DEPRECATED_NO_EXPORT * @brief Combines `ZXC_NO_EXPORT` with `ZXC_DEPRECATED`. */ #ifndef ZXC_DEPRECATED_NO_EXPORT #define ZXC_DEPRECATED_NO_EXPORT ZXC_NO_EXPORT ZXC_DEPRECATED #endif /** @} */ /* end of export */ #endif /* ZXC_EXPORT_H */ zxc-0.9.1/include/zxc_sans_io.h000066400000000000000000000254461515574030100164540ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_sans_io.h * @brief Low-level, sans-I/O compression primitives. * * This header exposes the building blocks used by the higher-level buffer and * streaming APIs. It is intended for callers who need to drive compression or * decompression themselves - for example, to integrate ZXC into a custom I/O * pipeline or an event loop. * * A typical usage pattern is: * -# Allocate and initialise a context with zxc_cctx_init(). * -# Write the file header with zxc_write_file_header(). * -# Compress or decompress individual blocks via the chunk wrappers. * -# Write the footer with zxc_write_file_footer(). * -# Release the context with zxc_cctx_free(). * * @see zxc_buffer.h for the simple one-shot API. * @see zxc_stream.h for the multi-threaded streaming API. */ #ifndef ZXC_SANS_IO_H #define ZXC_SANS_IO_H #include #include #include "zxc_export.h" #ifdef __cplusplus extern "C" { #endif /** * @defgroup sans_io Sans-IO API * @brief Low-level primitives for building custom compression drivers. * @{ */ /** * @struct zxc_cctx_t * @brief Compression Context structure. * * This structure holds the state and buffers required for the compression * process. It is designed to be reused across multiple blocks or calls to avoid * the overhead of repeated memory allocations. * * **Key Fields:** * - `hash_table`: Stores indices of 4-byte sequences. Size is `2 * * ZXC_LZ_HASH_SIZE` to reduce collisions (load factor < 0.5). * - `chain_table`: Handles collisions by storing the *previous* occurrence of a * hash. This forms a linked list for each hash bucket, allowing us to * traverse history. * - `epoch`: Used for "Lazy Hash Table Invalidation". Instead of * `ZXC_MEMSET`ing the entire hash table (which is slow) for every block, we * store `(epoch << 16) | offset`. If the stored epoch doesn't match the current * `ctx->epoch`, the entry is considered invalid/empty. * * hash_table Pointer to the hash table used for LZ77 match finding. * chain_table Pointer to the chain table for collision resolution. * memory_block Pointer to the single allocation block containing all buffers. * epoch Current epoch counter for lazy hash table invalidation. * buf_extras Pointer to the buffer for extra lengths (LL >= 15 or ML >= 15). * buf_offsets Pointer to the buffer for offsets. * buf_tokens Pointer to the buffer for token sequences. * literals Pointer to the buffer for raw literal bytes. * lit_buffer Pointer to a scratch buffer for literal processing (e.g., * RLE decoding). * lit_buffer_cap Current capacity of the literal scratch buffer. * checksum_enabled Flag indicating if checksums should be computed. * compression_level The configured compression level. */ typedef struct { /* Hot zone: random access / high frequency. * Kept at the start to ensure they reside in the first cache line (64 bytes). */ uint32_t* hash_table; /**< Hash table for LZ77 match finding. */ uint16_t* chain_table; /**< Chain table for collision resolution. */ void* memory_block; /**< Single allocation block owner. */ uint32_t epoch; /**< Current epoch for lazy hash table invalidation. */ /* Warm zone: sequential access per sequence. */ uint32_t* buf_sequences; /**< Buffer for sequence records (packed: LL(8)|ML(8)|Offset(16)). */ uint8_t* buf_tokens; /**< Buffer for token sequences. */ uint16_t* buf_offsets; /**< Buffer for offsets. */ uint8_t* buf_extras; /**< Buffer for extra lengths (vbytes for LL/ML). */ uint8_t* literals; /**< Buffer for literal bytes. */ /* Cold zone: configuration / scratch / resizeable. */ uint8_t* lit_buffer; /**< Scratch buffer for literals (RLE). */ size_t lit_buffer_cap; /**< Current capacity of the scratch buffer. */ uint8_t* work_buf; /**< Padded scratch buffer for buffer-API decompression. */ size_t work_buf_cap; /**< Capacity of the work buffer. */ int checksum_enabled; /**< 1 if checksum calculation/verification is enabled. */ int compression_level; /**< Compression level. */ /* Block-size derived parameters (computed once at init). */ size_t chunk_size; /**< Effective block size in bytes. */ uint32_t offset_bits; /**< log2(chunk_size) - governs epoch_mark shift. */ uint32_t offset_mask; /**< (1U << offset_bits) - 1 */ uint32_t max_epoch; /**< 1U << (32 - offset_bits) */ } zxc_cctx_t; /** * @brief Initializes a ZXC compression context. * * Sets up the internal state required for compression operations, allocating * necessary buffers based on the chunk size and compression level. * * @param[out] ctx Pointer to the ZXC compression context structure to initialize. * @param[in] chunk_size The size of the data chunk to be compressed. This * determines the allocation size for various internal buffers. * @param[in] mode The operation mode (1 for compression, 0 for decompression). * @param[in] level The desired compression level to be stored in the context. * @param[in] checksum_enabled 1 to enable checksums, 0 to disable. * @return ZXC_OK on success, or a negative zxc_error_t code (e.g., ZXC_ERROR_MEMORY) if memory * allocation fails. */ ZXC_EXPORT int zxc_cctx_init(zxc_cctx_t* ctx, const size_t chunk_size, const int mode, const int level, const int checksum_enabled); /** * @brief Frees resources associated with a ZXC compression context. * * This function releases all internal buffers and tables associated with the * given ZXC compression context structure. It does not free the context pointer * itself, only its members. * * @param[in,out] ctx Pointer to the compression context to clean up. */ ZXC_EXPORT void zxc_cctx_free(zxc_cctx_t* ctx); /** * @brief Writes the standard ZXC file header to a destination buffer. * * This function stores the magic word (little-endian) and the version number * into the provided buffer. It ensures the buffer has sufficient capacity * before writing. * * @param[out] dst The destination buffer where the header will be written. * @param[in] dst_capacity The total capacity of the destination buffer in bytes. * @param[in] has_checksum Flag indicating whether the checksum bit should be set. * @return The number of bytes written (ZXC_FILE_HEADER_SIZE) on success, * or ZXC_ERROR_DST_TOO_SMALL if the destination capacity is insufficient. */ ZXC_EXPORT int zxc_write_file_header(uint8_t* dst, const size_t dst_capacity, const size_t chunk_size, const int has_checksum); /** * @brief Validates and reads the ZXC file header from a source buffer. * * This function checks if the provided source buffer is large enough to contain * a ZXC file header and verifies that the magic word and version number match * the expected ZXC format specifications. * * @param[in] src Pointer to the source buffer containing the file data. * @param[in] src_size Size of the source buffer in bytes. * @param[out] out_block_size Optional pointer to receive the recommended block size. * @param[out] out_has_checksum Optional pointer to receive the checksum flag. * @return ZXC_OK on success, or a negative error code (e.g., ZXC_ERROR_SRC_TOO_SMALL, * ZXC_ERROR_BAD_MAGIC, ZXC_ERROR_BAD_VERSION). */ ZXC_EXPORT int zxc_read_file_header(const uint8_t* src, const size_t src_size, size_t* out_block_size, int* out_has_checksum); /** * @struct zxc_block_header_t * @brief Represents the on-disk header structure for a ZXC block (8 bytes). * * This structure contains metadata required to parse and decompress a block. * Note: raw_size is not stored in the header; decoders derive it from Section * Descriptors within the compressed payload. */ typedef struct { uint8_t block_type; /**< Block type (see @ref zxc_block_type_t). */ uint8_t block_flags; /**< Flags (e.g., checksum presence). */ uint8_t reserved; /**< Reserved for future protocol extensions. */ uint8_t header_crc; /**< Header integrity checksum (1 byte). */ uint32_t comp_size; /**< Compressed size excluding this header. */ } zxc_block_header_t; /** * @brief Encodes a block header into the destination buffer. * * This function serializes the contents of a `zxc_block_header_t` structure * into a byte array in little-endian format. It ensures the destination buffer * has sufficient capacity before writing. * * @param[out] dst Pointer to the destination buffer where the header will be * written. * @param[in] dst_capacity The total size of the destination buffer in bytes. * @param[in] bh Pointer to the source block header structure containing the data to * write. * * @return The number of bytes written (ZXC_BLOCK_HEADER_SIZE) on success, * or ZXC_ERROR_DST_TOO_SMALL if the destination buffer capacity is insufficient. */ ZXC_EXPORT int zxc_write_block_header(uint8_t* dst, const size_t dst_capacity, const zxc_block_header_t* bh); /** * @brief Reads and parses a ZXC block header from a source buffer. * * This function extracts the block type, flags, reserved fields, compressed * size, and raw size from the first `ZXC_BLOCK_HEADER_SIZE` bytes of the source * buffer. It handles endianness conversion for multi-byte fields (Little * Endian). * * @param[in] src Pointer to the source buffer containing the block data. * @param[in] src_size The size of the source buffer in bytes. * @param[out] bh Pointer to a `zxc_block_header_t` structure where the parsed * header information will be stored. * * @return ZXC_OK on success, or ZXC_ERROR_SRC_TOO_SMALL if the source buffer is smaller * than the required block header size. */ ZXC_EXPORT int zxc_read_block_header(const uint8_t* src, const size_t src_size, zxc_block_header_t* bh); /** * @brief Writes the ZXC file footer. * * The footer stores the original uncompressed size and an optional global * checksum. It is always @c ZXC_FILE_FOOTER_SIZE (12) bytes. * * @param[out] dst Destination buffer. * @param[in] dst_capacity Capacity of destination buffer. * @param[in] src_size Original uncompressed size of the data. * @param[in] global_hash Global checksum hash (if enabled). * @param[in] checksum_enabled Flag indicating if checksum is enabled. * @return Number of bytes written (12) on success, or ZXC_ERROR_DST_TOO_SMALL on failure. */ ZXC_EXPORT int zxc_write_file_footer(uint8_t* dst, const size_t dst_capacity, const uint64_t src_size, const uint32_t global_hash, const int checksum_enabled); /** @} */ /* end of sans_io */ #ifdef __cplusplus } #endif #endif // ZXC_SANS_IO_Hzxc-0.9.1/include/zxc_stream.h000066400000000000000000000126631515574030100163110ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_stream.h * @brief Multi-threaded streaming compression and decompression API. * * This header provides the streaming driver that reads from a @c FILE* input and * writes compressed (or decompressed) output to a @c FILE*. Internally the * driver uses an asynchronous Producer-Consumer pipeline via a ring buffer to * separate I/O from CPU-intensive work: * * 1. **Reader thread** - reads chunks from `f_in`. * 2. **Worker threads** - compress/decompress chunks in parallel. * 3. **Writer thread** - orders the results and writes them to `f_out`. * * @see zxc_buffer.h for the simple one-shot buffer API. * @see zxc_sans_io.h for low-level sans-I/O building blocks. */ #ifndef ZXC_STREAM_H #define ZXC_STREAM_H #include #include #include #include "zxc_export.h" #ifdef __cplusplus extern "C" { #endif /** * @defgroup stream_api Streaming API * @brief Multi-threaded, FILE*-based compression and decompression. * @{ */ /** * @brief Progress callback function type. * * This callback is invoked periodically during compression/decompression to report * progress. It is called from the writer thread after each block is processed. * * @param[in] bytes_processed Total input bytes processed so far. * @param[in] bytes_total Total input bytes to process (0 if unknown, e.g., stdin). * @param[in] user_data User-provided context pointer (passed through from API call). * * @note The callback should be fast and non-blocking. Avoid heavy I/O or mutex locks. */ typedef void (*zxc_progress_callback_t)(uint64_t bytes_processed, uint64_t bytes_total, const void* user_data); /** * @brief Options for streaming compression. * * Zero-initialise for safe defaults: level 0 maps to ZXC_LEVEL_DEFAULT (3), * block_size 0 maps to ZXC_BLOCK_SIZE_DEFAULT (256 KB), n_threads 0 means * auto-detect, and all other fields are disabled. * * @code * zxc_compress_opts_t opts = { .level = ZXC_LEVEL_COMPACT }; * zxc_stream_compress(f_in, f_out, &opts); * @endcode */ typedef struct { int n_threads; /**< Worker thread count (0 = auto-detect CPU cores). */ int level; /**< Compression level 1-5 (0 = default, ZXC_LEVEL_DEFAULT). */ size_t block_size; /**< Block size in bytes (0 = default 256 KB). Must be power of 2, [4KB - 2MB]. */ int checksum_enabled; /**< 1 to enable per-block and global checksums, 0 to disable. */ zxc_progress_callback_t progress_cb; /**< Optional progress callback (NULL to disable). */ void* user_data; /**< User context pointer passed to progress_cb. */ } zxc_compress_opts_t; /** * @brief Options for streaming decompression. * * Zero-initialise for safe defaults. * * @code * zxc_decompress_opts_t opts = { .checksum = 1 }; * zxc_stream_decompress(f_in, f_out, &opts); * @endcode */ typedef struct { int n_threads; /**< Worker thread count (0 = auto-detect CPU cores). */ int checksum_enabled; /**< 1 to verify per-block and global checksums, 0 to skip. */ zxc_progress_callback_t progress_cb; /**< Optional progress callback (NULL to disable). */ void* user_data; /**< User context pointer passed to progress_cb. */ } zxc_decompress_opts_t; /** * @brief Compresses data from an input stream to an output stream. * * This function sets up a multi-threaded pipeline: * 1. Reader Thread: Reads chunks from f_in. * 2. Worker Threads: Compress chunks in parallel (LZ77 + Bitpacking). * 3. Writer Thread: Orders the processed chunks and writes them to f_out. * * @param[in] f_in Input file stream (must be opened in "rb" mode). * @param[out] f_out Output file stream (must be opened in "wb" mode). * @param[in] opts Compression options (NULL uses all defaults). * * @return Total compressed bytes written, or a negative zxc_error_t code (e.g., * ZXC_ERROR_IO) if an error occurred. */ ZXC_EXPORT int64_t zxc_stream_compress(FILE* f_in, FILE* f_out, const zxc_compress_opts_t* opts); /** * @brief Decompresses data from an input stream to an output stream. * * Uses the same pipeline architecture as compression to maximize throughput. * * @param[in] f_in Input file stream (must be opened in "rb" mode). * @param[out] f_out Output file stream (must be opened in "wb" mode). * @param[in] opts Decompression options (NULL uses all defaults). * * @return Total decompressed bytes written, or a negative zxc_error_t code (e.g., * ZXC_ERROR_BAD_HEADER) if an error occurred. */ ZXC_EXPORT int64_t zxc_stream_decompress(FILE* f_in, FILE* f_out, const zxc_decompress_opts_t* opts); /** * @brief Returns the decompressed size stored in a ZXC compressed file. * * This function reads the file footer to extract the original uncompressed size * without performing any decompression. The file position is restored after reading. * * @param[in] f_in Input file stream (must be opened in "rb" mode). * * @return The original uncompressed size in bytes, or a negative zxc_error_t code (e.g., * ZXC_ERROR_BAD_MAGIC) if the file is invalid or an I/O error occurred. */ ZXC_EXPORT int64_t zxc_stream_get_decompressed_size(FILE* f_in); /** @} */ /* end of stream_api */ #ifdef __cplusplus } #endif #endif // ZXC_STREAM_Hzxc-0.9.1/src/000077500000000000000000000000001515574030100131155ustar00rootroot00000000000000zxc-0.9.1/src/cli/000077500000000000000000000000001515574030100136645ustar00rootroot00000000000000zxc-0.9.1/src/cli/main.c000066400000000000000000001434501515574030100147630ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file main.c * @brief Command Line Interface (CLI) entry point for the ZXC compression tool. * * This file handles argument parsing, file I/O setup, platform-specific * compatibility layers (specifically for Windows), and the execution of * compression, decompression, or benchmarking modes. */ #include #include #include #include #include #include "../../include/zxc_buffer.h" #include "../../include/zxc_constants.h" #include "../../include/zxc_stream.h" #include "../lib/zxc_internal.h" #if defined(_WIN32) #define ZXC_OS "windows" #elif defined(__APPLE__) #define ZXC_OS "darwin" #elif defined(__linux__) #define ZXC_OS "linux" #else #define ZXC_OS "unknown" #endif #if defined(__x86_64__) || defined(_M_AMD64) #define ZXC_ARCH "x86_64" #elif defined(__aarch64__) || defined(_M_ARM64) #define ZXC_ARCH "arm64" #else #define ZXC_ARCH "unknown" #endif #ifdef _WIN32 // Windows Implementation #include #include #include #include // Map POSIX macros to MSVC equivalents #define F_OK 0 #define access _access #define isatty _isatty #define fileno _fileno #define unlink _unlink #define fseeko _fseeki64 #define ftello _ftelli64 /** * @brief Returns the current monotonic time in seconds using Windows * Performance Counter. * @return double Time in seconds. */ static double zxc_now(void) { LARGE_INTEGER frequency; LARGE_INTEGER count; QueryPerformanceFrequency(&frequency); QueryPerformanceCounter(&count); return (double)count.QuadPart / frequency.QuadPart; } struct option { const char* name; int has_arg; int* flag; int val; }; #define no_argument 0 #define required_argument 1 #define optional_argument 2 char* optarg = NULL; int optind = 1; int optopt = 0; /** * @brief Minimal implementation of getopt_long for Windows. * Handles long options (--option) and short options (-o). */ static int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex) { if (optind >= argc) return -1; char* curr = argv[optind]; if (curr[0] == '-' && curr[1] == '-') { char* name_end = strchr(curr + 2, '='); const size_t name_len = name_end ? (size_t)(name_end - (curr + 2)) : strlen(curr + 2); const struct option* p = longopts; while (p && p->name) { const size_t opt_len = strlen(p->name); if (name_len == opt_len && strncmp(curr + 2, p->name, name_len) == 0) { optind++; if (p->has_arg == required_argument) { if (name_end) optarg = name_end + 1; else if (optind < argc) optarg = argv[optind++]; else return '?'; } else if (p->has_arg == optional_argument) { if (name_end) optarg = name_end + 1; else optarg = NULL; } if (p->flag) { *p->flag = p->val; return 0; } return p->val; } p++; } return '?'; } if (curr[0] == '-') { char c = curr[1]; optind++; const char* os = strchr(optstring, c); if (!os) return '?'; if (os[1] == ':') { if (os[2] == ':') { // Optional argument (::) if (curr[2] != '\0') optarg = curr + 2; else optarg = NULL; } else { // Required argument (:) if (curr[2] != '\0') optarg = curr + 2; else if (optind < argc) optarg = argv[optind++]; else return '?'; } } return c; } return -1; } #else // POSIX / Linux / macOS Implementation #include #include #include #include #include #include #include #include #include #include #include #include /** * @brief Returns the current monotonic time in seconds using clock_gettime. * @return double Time in seconds. */ static double zxc_now(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ts.tv_sec + ts.tv_nsec / 1e9; } #endif /** * @brief Validates and resolves the input file path to prevent directory traversal * and ensure it is a regular file. * * @param[in] path The raw input path from command line. * @param[out] resolved_buffer Buffer to store resolved path (needs sufficient size). * @param[in] buffer_size Size of the resolved_buffer. * @return 0 on success, -1 on error. */ static int zxc_validate_input_path(const char* path, char* resolved_buffer, size_t buffer_size) { #ifdef _WIN32 if (!_fullpath(resolved_buffer, path, buffer_size)) { return -1; } DWORD attr = GetFileAttributesA(resolved_buffer); if (attr == INVALID_FILE_ATTRIBUTES || (attr & FILE_ATTRIBUTE_DIRECTORY)) { // Not a valid file or is a directory errno = (attr == INVALID_FILE_ATTRIBUTES) ? ENOENT : EISDIR; return -1; } return 0; #else char* const res = realpath(path, NULL); if (!res) { // realpath failed (e.g. file does not exist) return -1; } struct stat st; if (stat(res, &st) != 0) { free(res); return -1; } if (!S_ISREG(st.st_mode)) { free(res); errno = EISDIR; // Generic error for non-regular file return -1; } const size_t len = strlen(res); if (len >= buffer_size) { free(res); errno = ENAMETOOLONG; return -1; } memcpy(resolved_buffer, res, len + 1); free(res); return 0; #endif } /** * @brief Validates and resolves the output file path. * * @param[in] path The raw output path. * @param[out] resolved_buffer Buffer to store resolved path. * @param[in] buffer_size Size of the resolved_buffer. * @return 0 on success, -1 on error. */ static int zxc_validate_output_path(const char* path, char* resolved_buffer, size_t buffer_size) { #ifdef _WIN32 if (!_fullpath(resolved_buffer, path, buffer_size)) return -1; DWORD attr = GetFileAttributesA(resolved_buffer); if (attr != INVALID_FILE_ATTRIBUTES && (attr & FILE_ATTRIBUTE_DIRECTORY)) { errno = EISDIR; return -1; } return 0; #else // POSIX output path validation char* const temp_path = strdup(path); if (!temp_path) return -1; char* const temp_path2 = strdup(path); if (!temp_path2) { free(temp_path); return -1; } // Split into dir and base char* const dir = dirname(temp_path); // Note: dirname may modify string or return static const char* const base = basename(temp_path2); char* const resolved_dir = realpath(dir, NULL); if (!resolved_dir) { // Parent directory must exist free(temp_path); free(temp_path2); return -1; } struct stat st; if (stat(resolved_dir, &st) != 0 || !S_ISDIR(st.st_mode)) { free(resolved_dir); free(temp_path); free(temp_path2); errno = EISDIR; return -1; } // Reconstruct valid path: resolved_dir / base // Ensure we don't overflow buffer const int written = snprintf(resolved_buffer, buffer_size, "%s/%s", resolved_dir, base); free(resolved_dir); free(temp_path); free(temp_path2); if (written < 0 || (size_t)written >= buffer_size) { errno = ENAMETOOLONG; return -1; } return 0; #endif } // CLI Logging Helpers static int g_quiet = 0; static int g_verbose = 0; /** * @brief Standard logging function. Respects the global quiet flag. */ static void zxc_log(const char* fmt, ...) { if (g_quiet) return; va_list args; va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); } /** * @brief Verbose logging function. Only prints if verbose is enabled and quiet * is disabled. */ static void zxc_log_v(const char* fmt, ...) { if (!g_verbose || g_quiet) return; va_list args; va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); } // OS-specific helpers for directory checks #ifdef _WIN32 static int zxc_is_directory(const char* path) { DWORD attr = GetFileAttributesA(path); return (attr != INVALID_FILE_ATTRIBUTES && (attr & FILE_ATTRIBUTE_DIRECTORY)); } #else static int zxc_is_directory(const char* path) { struct stat st; if (stat(path, &st) == 0) { return S_ISDIR(st.st_mode); } return 0; } #endif typedef enum { MODE_COMPRESS, MODE_DECOMPRESS, MODE_BENCHMARK, MODE_INTEGRITY, MODE_LIST } zxc_mode_t; enum { OPT_VERSION = 1000, OPT_HELP }; // Forward declaration for recursive mode static int process_single_file(const char* in_path, const char* out_path_override, zxc_mode_t mode, int num_threads, int keep_input, int force, int to_stdout, int checksum, int level, size_t block_size, int json_output); // Forward declaration for processing directory static int process_directory(const char* dir_path, zxc_mode_t mode, int num_threads, int keep_input, int force, int to_stdout, int checksum, int level, size_t block_size, int json_output); // OS-specific implementation of directory processing static int process_directory(const char* dir_path, zxc_mode_t mode, int num_threads, int keep_input, int force, int to_stdout, int checksum, int level, size_t block_size, int json_output) { int overall_ret = 0; #ifdef _WIN32 char search_path[MAX_PATH]; snprintf(search_path, sizeof(search_path), "%s\\*", dir_path); WIN32_FIND_DATAA find_data; HANDLE hFind = FindFirstFileA(search_path, &find_data); if (hFind == INVALID_HANDLE_VALUE) { zxc_log("Error opening directory '%s'\n", dir_path); return 1; } do { if (strcmp(find_data.cFileName, ".") == 0 || strcmp(find_data.cFileName, "..") == 0) { continue; } char full_path[MAX_PATH]; snprintf(full_path, sizeof(full_path), "%s\\%s", dir_path, find_data.cFileName); if (find_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { overall_ret |= process_directory(full_path, mode, num_threads, keep_input, force, to_stdout, checksum, level, block_size, json_output); } else { // Check if it ends with .xc to skip it if compressing to avoid double compression if (mode == MODE_COMPRESS) { const size_t len = strlen(full_path); if (len >= 3 && strcmp(full_path + len - 3, ".xc") == 0) { continue; // Skip already compressed files in recursive compression } } overall_ret |= process_single_file(full_path, NULL, mode, num_threads, keep_input, force, to_stdout, checksum, level, block_size, json_output); } } while (FindNextFileA(hFind, &find_data) != 0); FindClose(hFind); #else DIR* const dir = opendir(dir_path); if (!dir) { zxc_log("Error opening directory '%s': %s\n", dir_path, strerror(errno)); return 1; } const struct dirent* entry; while ((entry = readdir(dir)) != NULL) { if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) { continue; } const size_t path_len = strlen(dir_path) + 1 + strlen(entry->d_name) + 1; char* const full_path = malloc(path_len); if (!full_path) { zxc_log("Error allocating memory for path in directory '%s'\n", dir_path); continue; } const int n = snprintf(full_path, path_len, "%s/%s", dir_path, entry->d_name); if (n < 0 || (size_t)n >= path_len) { zxc_log("Error: path too long in directory '%s'\n", dir_path); free(full_path); continue; } struct stat st; if (stat(full_path, &st) == 0) { if (S_ISDIR(st.st_mode)) { overall_ret |= process_directory(full_path, mode, num_threads, keep_input, force, to_stdout, checksum, level, block_size, json_output); } else if (S_ISREG(st.st_mode)) { // Check if it ends with .xc to skip it if compressing to avoid double compression if (mode == MODE_COMPRESS) { const size_t len = strlen(full_path); if (len >= 3 && strcmp(full_path + len - 3, ".xc") == 0) { free(full_path); continue; // Skip already compressed files in recursive compression } } overall_ret |= process_single_file(full_path, NULL, mode, num_threads, keep_input, force, to_stdout, checksum, level, block_size, json_output); } } free(full_path); } closedir(dir); #endif return overall_ret; } void print_help(const char* app) { printf("Usage: %s [] []...\n\n", app); printf( "Standard Modes:\n" " -z, --compress Compress FILE {default}\n" " -d, --decompress Decompress FILE (or stdin -> stdout)\n" " -l, --list List archive information\n" " -t, --test Test compressed FILE integrity\n" " -b, --bench [N] Benchmark in-memory (N=seconds, default 5)\n\n" "Batch Processing:\n" " -m, --multiple Multiple input files\n" " -r, --recursive Operate recursively on directories\n\n" "Special Options:\n" " -V, --version Show version information\n" " -h, --help Show this help message\n\n" "Options:\n" " -1..-5 Compression level {3}\n" " -B, --block-size Block size: 4K..2M, power of 2 {256K}\n" " -T, --threads N Number of threads (0=auto)\n" " -C, --checksum Enable checksum\n" " -N, --no-checksum Disable checksum\n" " -k, --keep Keep input file\n" " -f, --force Force overwrite\n" " -c, --stdout Write to stdout\n" " -v, --verbose Verbose mode\n" " -q, --quiet Quiet mode\n" " -j, --json JSON output (benchmark mode)\n"); } void print_version(void) { char sys_info[256]; #ifdef _WIN32 snprintf(sys_info, sizeof(sys_info), "%s-%s", ZXC_ARCH, ZXC_OS); #else struct utsname buffer; if (uname(&buffer) == 0) snprintf(sys_info, sizeof(sys_info), "%s-%s-%s", ZXC_ARCH, ZXC_OS, buffer.release); else snprintf(sys_info, sizeof(sys_info), "%s-%s", ZXC_ARCH, ZXC_OS); #endif printf("zxc v%s (%s) by Bertrand Lebonnois & al.\nBSD 3-Clause License\n", ZXC_LIB_VERSION_STR, sys_info); } /** * @brief Formats a byte size into human-readable TB/GB/MB/KB/B format (Base 1000). */ static void format_size_decimal(uint64_t bytes, char* buf, size_t buf_size) { const double TB = 1000.0 * 1000.0 * 1000.0 * 1000.0; const double GB = 1000.0 * 1000.0 * 1000.0; const double MB = 1000.0 * 1000.0; const double KB = 1000.0; if ((double)bytes >= TB) snprintf(buf, buf_size, "%.1f TB", (double)bytes / TB); else if ((double)bytes >= GB) snprintf(buf, buf_size, "%.1f GB", (double)bytes / GB); else if ((double)bytes >= MB) snprintf(buf, buf_size, "%.1f MB", (double)bytes / MB); else if ((double)bytes >= KB) snprintf(buf, buf_size, "%.1f KB", (double)bytes / KB); else snprintf(buf, buf_size, "%llu B", (unsigned long long)bytes); } /** * @brief Progress context for CLI progress bar display. */ typedef struct { double start_time; const char* operation; // "Compressing" or "Decompressing" uint64_t total_size; // Pre-determined total size (0 if unknown) } progress_ctx_t; /** * @brief Progress callback for CLI progress bar. * * Displays a real-time progress bar during compression/decompression. * Shows percentage, processed/total size, and throughput speed. * * Format: [==========> ] 45% | 4.5 GB/10.0 GB | 156 MB/s */ static void cli_progress_callback(uint64_t bytes_processed, uint64_t bytes_total, const void* user_data) { const progress_ctx_t* pctx = (const progress_ctx_t*)user_data; if (!pctx) return; // Use pre-determined total size from context (not the parameter) const uint64_t total = pctx->total_size; const double now = zxc_now(); const double elapsed = now - pctx->start_time; // Calculate throughput speed double speed_mbps = 0.0; if (elapsed > 0.1) // Avoid division by zero for very fast operations speed_mbps = (double)bytes_processed / (1000.0 * 1000.0) / elapsed; // Clear line and move cursor to beginning fprintf(stderr, "\r\033[K"); if (total > 0) { // Known size: show percentage bar int percent = (int)((bytes_processed * 100) / total); if (percent > 100) percent = 100; const int bar_width = 20; int filled = (percent * bar_width) / 100; fprintf(stderr, "%s [", pctx->operation); for (int i = 0; i < bar_width; i++) { if (i < filled) fprintf(stderr, "="); else if (i == filled) fprintf(stderr, ">"); else fprintf(stderr, " "); } fprintf(stderr, "] %d%% | ", percent); char proc_str[32], total_str[32]; format_size_decimal(bytes_processed, proc_str, sizeof(proc_str)); format_size_decimal(total, total_str, sizeof(total_str)); fprintf(stderr, "%s/%s | %.1f MB/s", proc_str, total_str, speed_mbps); } else { // Unknown size (stdin): just show bytes processed char proc_str[32]; format_size_decimal(bytes_processed, proc_str, sizeof(proc_str)); fprintf(stderr, "%s [Processing...] %s | %.1f MB/s", pctx->operation, proc_str, speed_mbps); } fflush(stderr); } /** * @brief Lists the contents of a ZXC archive. * * Reads the file header and footer to display: * - Compressed size * - Uncompressed size * - Compression ratio * - Checksum method * - Filename * * In verbose mode, displays additional header information. * * @param[in] path Path to the ZXC archive file. * @param[in] json_output If 1, output JSON format. * @return 0 on success, 1 on error. */ static int zxc_list_archive(const char* path, int json_output) { char resolved_path[4096]; if (zxc_validate_input_path(path, resolved_path, sizeof(resolved_path)) != 0) { fprintf(stderr, "Error: Invalid input file '%s': %s\n", path, strerror(errno)); return 1; } FILE* f = fopen(resolved_path, "rb"); if (!f) { fprintf(stderr, "Error: Cannot open '%s': %s\n", path, strerror(errno)); return 1; } // Get file size if (fseeko(f, 0, SEEK_END) != 0) { fclose(f); fprintf(stderr, "Error: Cannot seek in file\n"); return 1; } const long long file_size = ftello(f); // Use public API to get decompressed size const int64_t uncompressed_size = zxc_stream_get_decompressed_size(f); if (uncompressed_size < 0) { fclose(f); fprintf(stderr, "Error: Not a valid ZXC archive\n"); return 1; } // Read header for format info (rewind after API call) uint8_t header[ZXC_FILE_HEADER_SIZE]; if (fseeko(f, 0, SEEK_SET) != 0 || fread(header, 1, ZXC_FILE_HEADER_SIZE, f) != ZXC_FILE_HEADER_SIZE) { fclose(f); fprintf(stderr, "Error: Cannot read file header\n"); return 1; } // Extract header fields const uint8_t format_version = header[4]; const size_t block_units = header[5] ? header[5] : 64; // Default 64 units = 256KB // Read footer for checksum info uint8_t footer[ZXC_FILE_FOOTER_SIZE]; if (fseeko(f, file_size - ZXC_FILE_FOOTER_SIZE, SEEK_SET) != 0 || fread(footer, 1, ZXC_FILE_FOOTER_SIZE, f) != ZXC_FILE_FOOTER_SIZE) { fclose(f); fprintf(stderr, "Error: Cannot read file footer\n"); return 1; } fclose(f); // Parse checksum (if non-zero, checksum was enabled) const uint32_t stored_checksum = footer[8] | ((uint32_t)footer[9] << 8) | ((uint32_t)footer[10] << 16) | ((uint32_t)footer[11] << 24); const char* checksum_method = (stored_checksum != 0) ? "RapidHash" : "-"; // Calculate ratio (uncompressed / compressed, e.g., 2.5 means 2.5x compression) const double ratio = (file_size > 0) ? ((double)uncompressed_size / (double)file_size) : 0.0; // Format sizes char comp_str[32], uncomp_str[32]; format_size_decimal((uint64_t)file_size, comp_str, sizeof(comp_str)); format_size_decimal((uint64_t)uncompressed_size, uncomp_str, sizeof(uncomp_str)); if (json_output) { // JSON mode printf( "{\n" " \"filename\": \"%s\",\n" " \"compressed_size_bytes\": %lld,\n" " \"uncompressed_size_bytes\": %lld,\n" " \"compression_ratio\": %.3f,\n" " \"format_version\": %u,\n" " \"block_size_kb\": %zu,\n" " \"checksum_method\": \"%s\",\n" " \"checksum_value\": \"0x%08X\"\n" "}\n", path, (long long)file_size, (long long)uncompressed_size, ratio, format_version, block_units * 4, (stored_checksum != 0) ? "RapidHash" : "none", stored_checksum); } else if (g_verbose) { // Verbose mode: detailed vertical layout printf( "\nFile: %s\n" "-----------------------\n" "Block Format: %u\n" "Block Units: %zu (x 4KB)\n" "Checksum Method: %s\n", path, format_version, block_units, (stored_checksum != 0) ? "RapidHash" : "None"); if (stored_checksum != 0) printf("Checksum Value: 0x%08X\n", stored_checksum); printf( "-----------------------\n" "Comp. Size: %s\n" "Uncomp. Size: %s\n" "Ratio: %.2f\n", comp_str, uncomp_str, ratio); } else { // Normal mode: table format printf("\n %12s %12s %5s %-10s %s\n", "Compressed", "Uncompressed", "Ratio", "Checksum", "Filename"); printf(" %12s %12s %5.2f %-10s %s\n", comp_str, uncomp_str, ratio, checksum_method, path); } return 0; } static int process_single_file(const char* in_path, const char* out_path_override, zxc_mode_t mode, int num_threads, int keep_input, int force, int to_stdout, int checksum_enabled, int level, size_t block_size, int json_output) { FILE* f_in = stdin; FILE* f_out = stdout; char resolved_in_path[4096] = {0}; char out_path[1024] = {0}; char resolved_out_path[4096] = {0}; int use_stdin = 1, use_stdout = 0; int created_out_file = 0; int overall_ret = 0; if (in_path && strcmp(in_path, "-") != 0) { if (zxc_validate_input_path(in_path, resolved_in_path, sizeof(resolved_in_path)) != 0) { zxc_log("Error: Invalid input file '%s': %s\n", in_path, strerror(errno)); return 1; } f_in = fopen(resolved_in_path, "rb"); if (!f_in) { zxc_log("Error open input %s: %s\n", resolved_in_path, strerror(errno)); return 1; } use_stdin = 0; } else { use_stdin = 1; use_stdout = 1; // Default to stdout if reading from stdin in_path = NULL; } if (mode == MODE_INTEGRITY) { use_stdout = 0; f_out = NULL; } else if (to_stdout) { use_stdout = 1; } else if (!use_stdin) { // Auto-generate output filename if input is a file and no output specified if (out_path_override) { snprintf(out_path, sizeof(out_path), "%s", out_path_override); } else if (mode == MODE_COMPRESS) { snprintf(out_path, sizeof(out_path), "%s.xc", in_path); } else { const size_t len = strlen(in_path); if (len > 3 && !strcmp(in_path + len - 3, ".xc")) { const size_t base_len = len - 3; if (base_len >= sizeof(out_path)) { zxc_log("Error: Output path too long\n"); if (f_in) fclose(f_in); return 1; } memcpy(out_path, in_path, base_len); out_path[base_len] = '\0'; } else { snprintf(out_path, sizeof(out_path), "%s", in_path); } } use_stdout = 0; } // Safety check: prevent overwriting input file if (mode != MODE_INTEGRITY && !use_stdin && !use_stdout && strcmp(in_path ? in_path : "", out_path) == 0) { zxc_log("Error: Input and output filenames are identical for '%s'.\n", in_path); if (f_in) fclose(f_in); return 1; } // Open output file if not writing to stdout if (!use_stdout && mode != MODE_INTEGRITY) { if (zxc_validate_output_path(out_path, resolved_out_path, sizeof(resolved_out_path)) != 0) { zxc_log("Error: Invalid output path '%s': %s\n", out_path, strerror(errno)); if (f_in) fclose(f_in); return 1; } if (!force && access(resolved_out_path, F_OK) == 0) { zxc_log("Output exists. Use -f to overwrite '%s'.\n", resolved_out_path); fclose(f_in); return 1; } #ifdef _WIN32 f_out = fopen(resolved_out_path, "wb"); #else // Restrict permissions to 0644 const int fd = open(resolved_out_path, O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (fd == -1) { zxc_log("Error creating output %s: %s\n", resolved_out_path, strerror(errno)); fclose(f_in); return 1; } f_out = fdopen(fd, "wb"); #endif if (!f_out) { zxc_log("Error open output %s: %s\n", resolved_out_path, strerror(errno)); if (f_in) fclose(f_in); #ifndef _WIN32 if (fd != -1) close(fd); #endif return 1; } created_out_file = 1; } // Prevent writing binary data to the terminal unless forced if (use_stdout && isatty(fileno(stdout)) && mode == MODE_COMPRESS && !force) { zxc_log( "Refusing to write compressed data to terminal.\n" "For help, type: zxc -h\n"); if (f_in) fclose(f_in); if (f_out) fclose(f_out); return 1; } // Set stdin/stdout to binary mode if using them #ifdef _WIN32 if (use_stdin) _setmode(_fileno(stdin), _O_BINARY); if (use_stdout) _setmode(_fileno(stdout), _O_BINARY); #else // On POSIX systems, there's no text/binary distinction, but we ensure // no buffering issues occur by using freopen if needed if (use_stdin) { if (!freopen(NULL, "rb", stdin)) zxc_log("Warning: Failed to reopen stdin in binary mode\n"); } if (use_stdout) { if (!freopen(NULL, "wb", stdout)) zxc_log("Warning: Failed to reopen stdout in binary mode\n"); } #endif // Determine if we should show progress bar and get file size // IMPORTANT: This must be done BEFORE setting large buffers with setvbuf // to avoid buffer inconsistency issues when reading the footer int show_progress = 0; uint64_t total_size = 0; if (!g_quiet && !use_stdout && !use_stdin && isatty(fileno(stderr))) { // Get file size based on mode if (mode == MODE_COMPRESS) { // Compression: get input file size const long long saved_pos = ftello(f_in); if (saved_pos >= 0) { if (fseeko(f_in, 0, SEEK_END) == 0) { const long long size = ftello(f_in); if (size > 0) total_size = (uint64_t)size; fseeko(f_in, saved_pos, SEEK_SET); } } } else { // Decompression: get decompressed size from footer (BEFORE starting decompression) const int64_t decomp_size = zxc_stream_get_decompressed_size(f_in); if (decomp_size > 0) total_size = (uint64_t)decomp_size; } // Only show progress for files > 1MB if (total_size > 1024 * 1024) show_progress = 1; } // Set large buffers for I/O performance (AFTER file size detection) char *b1 = malloc(1024 * 1024), *b2 = malloc(1024 * 1024); if (b1) setvbuf(f_in, b1, _IOFBF, 1024 * 1024); if (f_out && b2) setvbuf(f_out, b2, _IOFBF, 1024 * 1024); if (in_path && !g_quiet) { zxc_log_v("Processing %s... (Compression Level %d)\n", in_path, level); } if (g_verbose) zxc_log("Checksum: %s\n", checksum_enabled ? "enabled" : "disabled"); // Prepare progress context progress_ctx_t pctx = {.start_time = zxc_now(), .operation = (mode == MODE_COMPRESS) ? "Compressing" : "Decompressing", .total_size = total_size}; const double t0 = zxc_now(); int64_t bytes; if (mode == MODE_COMPRESS) { zxc_compress_opts_t copts = { .n_threads = num_threads, .level = level, .block_size = block_size, .checksum_enabled = checksum_enabled, .progress_cb = show_progress ? cli_progress_callback : NULL, .user_data = &pctx, }; bytes = zxc_stream_compress(f_in, f_out, &copts); } else { zxc_decompress_opts_t dopts = { .n_threads = num_threads, .checksum_enabled = checksum_enabled, .progress_cb = show_progress ? cli_progress_callback : NULL, .user_data = &pctx, }; bytes = zxc_stream_decompress(f_in, f_out, &dopts); } const double dt = zxc_now() - t0; // Clear progress line on completion if (show_progress) { fprintf(stderr, "\r\033[K"); fflush(stderr); } if (!use_stdin) fclose(f_in); else setvbuf(stdin, NULL, _IONBF, 0); if (f_out && f_out != stdout) fclose(f_out); else if (use_stdout) { fflush(stdout); setvbuf(stdout, NULL, _IONBF, 0); } free(b1); free(b2); if (bytes >= 0) { if (mode == MODE_INTEGRITY) { // Test mode: show result if (json_output) { printf( "{\n" " \"filename\": \"%s\",\n" " \"status\": \"ok\",\n" " \"checksum_verified\": %s,\n" " \"time_seconds\": %.6f\n" "}\n", in_path ? in_path : "", checksum_enabled ? "true" : "false", dt); } else if (g_verbose) { printf( "%s: OK\n" " Checksum: %s\n" " Time: %.3fs\n", in_path ? in_path : "", checksum_enabled ? "verified (RapidHash)" : "not verified", dt); } else { printf("%s: OK\n", in_path ? in_path : ""); } } else { zxc_log_v("Processed %lld bytes in %.3fs\n", (long long)bytes, dt); } if (!use_stdin && !use_stdout && !keep_input && mode != MODE_INTEGRITY) unlink(resolved_in_path); } else { if (mode == MODE_INTEGRITY) { if (json_output) { printf( "{\n" " \"filename\": \"%s\",\n" " \"status\": \"failed\",\n" " \"error\": \"Integrity check failed (corrupted data or invalid checksum)\"\n" "}\n", in_path ? in_path : ""); } else { fprintf(stderr, "%s: FAILED\n", in_path ? in_path : ""); if (g_verbose) fprintf( stderr, " Reason: Integrity check failed (corrupted data or invalid checksum)\n"); } } else { zxc_log("Operation failed on %s.\n", in_path ? in_path : ""); if (created_out_file) unlink(resolved_out_path); } overall_ret = 1; } return overall_ret; } /** * @brief Main entry point. * Parses arguments and dispatches execution to Benchmark, Compress, or * Decompress modes. */ int main(int argc, char** argv) { zxc_mode_t mode = MODE_COMPRESS; int num_threads = 0; int keep_input = 0; int force = 0; int to_stdout = 0; int bench_seconds = 5; int checksum = -1; int level = 3; int json_output = 0; size_t block_size = 0; static const struct option long_options[] = {{"compress", no_argument, 0, 'z'}, {"decompress", no_argument, 0, 'd'}, {"list", no_argument, 0, 'l'}, {"test", no_argument, 0, 't'}, {"bench", optional_argument, 0, 'b'}, {"threads", required_argument, 0, 'T'}, {"keep", no_argument, 0, 'k'}, {"force", no_argument, 0, 'f'}, {"stdout", no_argument, 0, 'c'}, {"verbose", no_argument, 0, 'v'}, {"quiet", no_argument, 0, 'q'}, {"checksum", no_argument, 0, 'C'}, {"no-checksum", no_argument, 0, 'N'}, {"json", no_argument, 0, 'j'}, {"version", no_argument, 0, 'V'}, {"help", no_argument, 0, 'h'}, {"multiple", no_argument, 0, 'm'}, {"recursive", no_argument, 0, 'r'}, {"block-size", required_argument, 0, 'B'}, {0, 0, 0, 0}}; int opt; int multiple_mode = 0; int recursive_mode = 0; while ((opt = getopt_long(argc, argv, "12345b::B:cCdfhjklmrNqT:tvVz", long_options, NULL)) != -1) { switch (opt) { case 'z': mode = MODE_COMPRESS; break; case 'd': mode = MODE_DECOMPRESS; break; case 'l': mode = MODE_LIST; break; case 't': mode = MODE_INTEGRITY; break; case 'b': mode = MODE_BENCHMARK; const char* bench_arg = optarg; if (!bench_arg && optind < argc && argv[optind][0] >= '1' && argv[optind][0] <= '9') bench_arg = argv[optind++]; if (bench_arg) { bench_seconds = atoi(bench_arg); if (bench_seconds < 1 || bench_seconds > 3600) { fprintf(stderr, "Error: duration must be between 1 and 3600 seconds\n"); return 1; } } break; case '1': level = 1; break; case '2': level = 2; break; case '3': level = 3; break; case '4': level = 4; break; case '5': level = 5; break; case 'T': num_threads = atoi(optarg); if (num_threads < 0 || num_threads > ZXC_MAX_THREADS) { fprintf(stderr, "Error: num_threads must be between 0 and %d\n", ZXC_MAX_THREADS); return 1; } break; case 'k': keep_input = 1; break; case 'f': force = 1; break; case 'c': to_stdout = 1; break; case 'v': g_verbose = 1; break; case 'q': g_quiet = 1; break; case 'C': checksum = 1; break; case 'N': checksum = 0; break; case 'j': json_output = 1; break; case 'm': multiple_mode = 1; break; case 'r': recursive_mode = 1; multiple_mode = 1; // Recursive implies multiple mode for files processing break; case 'B': { char* end = NULL; const long long bs_val = strtoll(optarg, &end, 10); if (bs_val <= 0 || end == optarg) { fprintf(stderr, "Error: block-size must be a power of 2 between 4K and 2M\n" " Examples: -B 4K, -B 128K, -B 1M, -B 2M\n"); return 1; } long long multiplier = 1; if (end && (*end == 'k' || *end == 'K')) { multiplier = 1024; end++; if (*end == 'b' || *end == 'B') end++; // optional "B" in "KB" } else if (end && (*end == 'm' || *end == 'M')) { multiplier = 1024 * 1024; end++; if (*end == 'b' || *end == 'B') end++; // optional "B" in "MB" } const long long bs_bytes = bs_val * multiplier; if (!zxc_validate_block_size((size_t)bs_bytes)) { fprintf(stderr, "Error: block-size must be a power of 2 between 4K and 2M\n" " Examples: -B 4K, -B 128K, -B 1M, -B 2M\n"); return 1; } block_size = (size_t)bs_bytes; break; } case '?': case 'V': print_version(); return 0; case 'h': print_help(argv[0]); return 0; default: return 1; } } // Handle positional arguments for mode selection (e.g., "zxc z file") if (optind < argc && mode != MODE_BENCHMARK) { if (strcmp(argv[optind], "z") == 0) { mode = MODE_COMPRESS; optind++; } else if (strcmp(argv[optind], "d") == 0) { mode = MODE_DECOMPRESS; optind++; } else if (strcmp(argv[optind], "l") == 0 || strcmp(argv[optind], "list") == 0) { mode = MODE_LIST; optind++; } else if (strcmp(argv[optind], "t") == 0 || strcmp(argv[optind], "test") == 0) { mode = MODE_INTEGRITY; optind++; } else if (strcmp(argv[optind], "b") == 0) { mode = MODE_BENCHMARK; optind++; } } if (checksum == -1) { checksum = (mode == MODE_INTEGRITY) ? 1 : 0; } /* * Benchmark Mode * Loads the entire input file into RAM to measure raw algorithm throughput * without disk I/O bottlenecks. */ if (mode == MODE_BENCHMARK) { if (optind >= argc) { zxc_log("Benchmark requires input file.\n"); return 1; } const char* in_path = argv[optind]; int ret = 1; uint8_t* ram = NULL; uint8_t* c_dat = NULL; FILE* fm = NULL; char resolved_path[4096]; if (zxc_validate_input_path(in_path, resolved_path, sizeof(resolved_path)) != 0) { zxc_log("Error: Invalid input file '%s': %s\n", in_path, strerror(errno)); return 1; } if (num_threads < 0 || num_threads > ZXC_MAX_THREADS) { zxc_log("Error: num_threads must be between 0 and %d\n", ZXC_MAX_THREADS); return 1; } FILE* f_in = fopen(resolved_path, "rb"); if (!f_in) goto bench_cleanup; if (fseeko(f_in, 0, SEEK_END) != 0) goto bench_cleanup; const long long fsize = ftello(f_in); if (fsize <= 0) goto bench_cleanup; const size_t in_size = (size_t)fsize; if (fseeko(f_in, 0, SEEK_SET) != 0) goto bench_cleanup; ram = malloc(in_size); if (!ram) goto bench_cleanup; if (fread(ram, 1, in_size, f_in) != in_size) goto bench_cleanup; fclose(f_in); f_in = NULL; if (!json_output) printf( "Input: %s (%zu bytes)\n" "Running for %d seconds (threads: %d)...\n", in_path, in_size, bench_seconds, num_threads); #ifdef _WIN32 if (!json_output) printf("Note: Using tmpfile on Windows (slower than fmemopen).\n"); fm = tmpfile(); if (fm) { fwrite(ram, 1, in_size, fm); rewind(fm); } #else fm = fmemopen(ram, in_size, "rb"); #endif if (!fm) goto bench_cleanup; double best_compress = 1e30; int compress_iters = 0; const double compress_deadline = zxc_now() + (double)bench_seconds; const double compress_start = zxc_now(); while (zxc_now() < compress_deadline) { rewind(fm); const double t0 = zxc_now(); zxc_compress_opts_t bench_copts = {.n_threads = num_threads, .level = level, .block_size = block_size, .checksum_enabled = checksum}; zxc_stream_compress(fm, NULL, &bench_copts); const double dt = zxc_now() - t0; if (dt < best_compress) best_compress = dt; compress_iters++; if (!json_output && !g_quiet) fprintf(stderr, "\rCompressing... %d iters (%.1fs)", compress_iters, zxc_now() - compress_start); } if (!json_output && !g_quiet) fprintf(stderr, "\r\033[K"); fclose(fm); fm = NULL; const uint64_t max_c = zxc_compress_bound(in_size); c_dat = malloc(max_c); if (!c_dat) goto bench_cleanup; #ifdef _WIN32 FILE* fm_in = tmpfile(); FILE* fm_out = tmpfile(); if (!fm_in || !fm_out) { if (fm_in) fclose(fm_in); if (fm_out) fclose(fm_out); goto bench_cleanup; } fwrite(ram, 1, in_size, fm_in); rewind(fm_in); #else FILE* fm_in = fmemopen(ram, in_size, "rb"); FILE* fm_out = fmemopen(c_dat, max_c, "wb"); if (!fm_in || !fm_out) { if (fm_in) fclose(fm_in); if (fm_out) fclose(fm_out); goto bench_cleanup; } #endif zxc_compress_opts_t bench_copts2 = {.n_threads = num_threads, .level = level, .block_size = block_size, .checksum_enabled = checksum}; const int64_t c_sz = zxc_stream_compress(fm_in, fm_out, &bench_copts2); if (c_sz < 0) { fclose(fm_in); fclose(fm_out); fm_in = NULL; fm_out = NULL; goto bench_cleanup; } #ifdef _WIN32 rewind(fm_out); fread(c_dat, 1, (size_t)c_sz, fm_out); fclose(fm_in); fclose(fm_out); #else fclose(fm_in); fclose(fm_out); #endif #ifdef _WIN32 FILE* fc = tmpfile(); if (!fc) goto bench_cleanup; fwrite(c_dat, 1, (size_t)c_sz, fc); rewind(fc); #else FILE* fc = fmemopen(c_dat, (size_t)c_sz, "rb"); if (!fc) goto bench_cleanup; #endif double best_decompress = 1e30; int decompress_iters = 0; const double decompress_deadline = zxc_now() + (double)bench_seconds; const double decompress_start = zxc_now(); while (zxc_now() < decompress_deadline) { rewind(fc); const double t0 = zxc_now(); zxc_decompress_opts_t bench_dopts = {.n_threads = num_threads, .checksum_enabled = checksum}; zxc_stream_decompress(fc, NULL, &bench_dopts); const double dt = zxc_now() - t0; if (dt < best_decompress) best_decompress = dt; decompress_iters++; if (!json_output && !g_quiet) fprintf(stderr, "\rDecompressing... %d iters (%.1fs)", decompress_iters, zxc_now() - decompress_start); } if (!json_output && !g_quiet) fprintf(stderr, "\r\033[K"); fclose(fc); const double compress_speed_mbps = (double)in_size / (1000.0 * 1000.0) / best_compress; const double decompress_speed_mbps = (double)in_size / (1000.0 * 1000.0) / best_decompress; const double ratio = (double)in_size / c_sz; if (json_output) printf( "{\n" " \"input_file\": \"%s\",\n" " \"input_size_bytes\": %zu,\n" " \"compressed_size_bytes\": %lld,\n" " \"compression_ratio\": %.3f,\n" " \"duration_seconds\": %d,\n" " \"compress_iterations\": %d,\n" " \"decompress_iterations\": %d,\n" " \"threads\": %d,\n" " \"level\": %d,\n" " \"checksum_enabled\": %s,\n" " \"compress_speed_mbps\": %.3f,\n" " \"decompress_speed_mbps\": %.3f,\n" " \"compress_time_seconds\": %.6f,\n" " \"decompress_time_seconds\": %.6f\n" "}\n", in_path, in_size, (long long)c_sz, ratio, bench_seconds, compress_iters, decompress_iters, num_threads, level, checksum ? "true" : "false", compress_speed_mbps, decompress_speed_mbps, best_compress, best_decompress); else printf( "Compressed: %lld bytes (ratio %.3f)\n" "Compress : %.3f MB/s (%d iters)\n" "Decompress: %.3f MB/s (%d iters)\n", (long long)c_sz, ratio, compress_speed_mbps, compress_iters, decompress_speed_mbps, decompress_iters); ret = 0; bench_cleanup: if (fm) fclose(fm); if (f_in) fclose(f_in); free(ram); free(c_dat); return ret; } /* * List Mode * Displays archive information (compressed size, uncompressed size, ratio). */ if (mode == MODE_LIST) { if (optind >= argc) { zxc_log("List mode requires input file.\n"); return 1; } int ret = 0; const int num_files = argc - optind; if (json_output && num_files > 1) printf("[\n"); for (int i = optind; i < argc; i++) { ret |= zxc_list_archive(argv[i], json_output); if (json_output && num_files > 1 && i < argc - 1) { printf(",\n"); } } if (json_output && num_files > 1) { printf("]\n"); } return ret; } if (multiple_mode && to_stdout) { zxc_log("Error: cannot write to stdout when using multiple files mode (-m).\n"); return 1; } /* * File Processing Mode * Loops over files and determines input/output paths. */ int overall_ret = 0; const int start_optind = optind; // If no files passed but we aren't using stdin, or mode expects files: if (optind >= argc && mode == MODE_INTEGRITY) { zxc_log("Test mode requires at least one input file.\n"); return 1; } if (multiple_mode && optind >= argc) { zxc_log("Multiple files mode requires at least one input file.\n"); return 1; } // Default to processing at least once (for stdin) if no files are passed and not in a mode that // strictly needs files const int num_files_to_process = (optind < argc) ? (argc - optind) : 1; for (int file_idx = 0; file_idx < num_files_to_process; file_idx++) { const char* current_arg = (optind < argc) ? argv[start_optind + file_idx] : NULL; if (recursive_mode && current_arg && strcmp(current_arg, "-") != 0 && zxc_is_directory(current_arg)) { overall_ret |= process_directory(current_arg, mode, num_threads, keep_input, force, to_stdout, checksum, level, block_size, json_output); } else { const char* explicit_out_path = (!multiple_mode && optind + 1 < argc && current_arg && strcmp(current_arg, "-") != 0 && !to_stdout) ? argv[start_optind + 1] : NULL; overall_ret |= process_single_file(current_arg, explicit_out_path, mode, num_threads, keep_input, force, to_stdout, checksum, level, block_size, json_output); } if (!multiple_mode) { break; // Standard mode only does the first argument as input } } return overall_ret; } zxc-0.9.1/src/lib/000077500000000000000000000000001515574030100136635ustar00rootroot00000000000000zxc-0.9.1/src/lib/vendors/000077500000000000000000000000001515574030100153435ustar00rootroot00000000000000zxc-0.9.1/src/lib/vendors/rapidhash.h000066400000000000000000000514151515574030100174650ustar00rootroot00000000000000/* * rapidhash V3 - Very fast, high quality, platform-independent hashing algorithm. * * Based on 'wyhash', by Wang Yi * * Copyright (C) 2025 Nicolas De Carli * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * You can contact the author at: * - rapidhash source repository: https://github.com/Nicoshev/rapidhash */ #pragma once /* * Includes. */ #include #include #if defined(_MSC_VER) # include # if defined(_M_X64) && !defined(_M_ARM64EC) # pragma intrinsic(_umul128) # endif #endif /* * C/C++ macros. */ #ifdef _MSC_VER # define RAPIDHASH_ALWAYS_INLINE __forceinline #elif defined(__GNUC__) # define RAPIDHASH_ALWAYS_INLINE inline __attribute__((__always_inline__)) #else # define RAPIDHASH_ALWAYS_INLINE inline #endif #ifdef __cplusplus # define RAPIDHASH_NOEXCEPT noexcept # define RAPIDHASH_CONSTEXPR constexpr # ifndef RAPIDHASH_INLINE # define RAPIDHASH_INLINE RAPIDHASH_ALWAYS_INLINE # endif # if __cplusplus >= 201402L && !defined(_MSC_VER) # define RAPIDHASH_INLINE_CONSTEXPR RAPIDHASH_ALWAYS_INLINE constexpr # else # define RAPIDHASH_INLINE_CONSTEXPR RAPIDHASH_ALWAYS_INLINE # endif #else # define RAPIDHASH_NOEXCEPT # define RAPIDHASH_CONSTEXPR static const # ifndef RAPIDHASH_INLINE # define RAPIDHASH_INLINE static RAPIDHASH_ALWAYS_INLINE # endif # define RAPIDHASH_INLINE_CONSTEXPR RAPIDHASH_INLINE #endif /* * Unrolled macro. * Improves large input speed, but increases code size and worsens small input speed. * * RAPIDHASH_COMPACT: Normal behavior. * RAPIDHASH_UNROLLED: * */ #ifndef RAPIDHASH_UNROLLED # define RAPIDHASH_COMPACT #elif defined(RAPIDHASH_COMPACT) # error "cannot define RAPIDHASH_COMPACT and RAPIDHASH_UNROLLED simultaneously." #endif /* * Protection macro, alters behaviour of rapid_mum multiplication function. * * RAPIDHASH_FAST: Normal behavior, max speed. * RAPIDHASH_PROTECTED: Extra protection against entropy loss. */ #ifndef RAPIDHASH_PROTECTED # define RAPIDHASH_FAST #elif defined(RAPIDHASH_FAST) # error "cannot define RAPIDHASH_PROTECTED and RAPIDHASH_FAST simultaneously." #endif /* * Likely and unlikely macros. */ #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) # define _likely_(x) __builtin_expect(x,1) # define _unlikely_(x) __builtin_expect(x,0) #else # define _likely_(x) (x) # define _unlikely_(x) (x) #endif /* * Endianness macros. */ #ifndef RAPIDHASH_LITTLE_ENDIAN # if defined(_WIN32) || defined(__LITTLE_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) # define RAPIDHASH_LITTLE_ENDIAN # elif defined(__BIG_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) # define RAPIDHASH_BIG_ENDIAN # else # warning "could not determine endianness! Falling back to little endian." # define RAPIDHASH_LITTLE_ENDIAN # endif #endif /* * Default secret parameters. */ RAPIDHASH_CONSTEXPR uint64_t rapid_secret[8] = { 0x2d358dccaa6c78a5ull, 0x8bb84b93962eacc9ull, 0x4b33a62ed433d4a3ull, 0x4d5a2da51de1aa47ull, 0xa0761d6478bd642full, 0xe7037ed1a0b428dbull, 0x90ed1765281c388cull, 0xaaaaaaaaaaaaaaaaull}; /* * 64*64 -> 128bit multiply function. * * @param A Address of 64-bit number. * @param B Address of 64-bit number. * * Calculates 128-bit C = *A * *B. * * When RAPIDHASH_FAST is defined: * Overwrites A contents with C's low 64 bits. * Overwrites B contents with C's high 64 bits. * * When RAPIDHASH_PROTECTED is defined: * Xors and overwrites A contents with C's low 64 bits. * Xors and overwrites B contents with C's high 64 bits. */ RAPIDHASH_INLINE_CONSTEXPR void rapid_mum(uint64_t *A, uint64_t *B) RAPIDHASH_NOEXCEPT { #if defined(__SIZEOF_INT128__) __uint128_t r=*A; r*=*B; #ifdef RAPIDHASH_PROTECTED *A^=(uint64_t)r; *B^=(uint64_t)(r>>64); #else *A=(uint64_t)r; *B=(uint64_t)(r>>64); #endif #elif defined(_MSC_VER) && (defined(_WIN64) || defined(_M_HYBRID_CHPE_ARM64)) #if defined(_M_X64) #ifdef RAPIDHASH_PROTECTED uint64_t a, b; a=_umul128(*A,*B,&b); *A^=a; *B^=b; #else *A=_umul128(*A,*B,B); #endif #else #ifdef RAPIDHASH_PROTECTED uint64_t a, b; b = __umulh(*A, *B); a = *A * *B; *A^=a; *B^=b; #else uint64_t c = __umulh(*A, *B); *A = *A * *B; *B = c; #endif #endif #else uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B; uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t>32)+(rm1>>32)+c; #ifdef RAPIDHASH_PROTECTED *A^=lo; *B^=hi; #else *A=lo; *B=hi; #endif #endif } /* * Multiply and xor mix function. * * @param A 64-bit number. * @param B 64-bit number. * * Calculates 128-bit C = A * B. * Returns 64-bit xor between high and low 64 bits of C. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapid_mix(uint64_t A, uint64_t B) RAPIDHASH_NOEXCEPT { rapid_mum(&A,&B); return A^B; } /* * Read functions. */ #ifdef RAPIDHASH_LITTLE_ENDIAN RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return v;} RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return v;} #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return __builtin_bswap64(v);} RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return __builtin_bswap32(v);} #elif defined(_MSC_VER) RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return _byteswap_uint64(v);} RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return _byteswap_ulong(v);} #else RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, 8); return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000)); } RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, 4); return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000)); } #endif /* * rapidhash main function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * @param seed 64-bit seed used to alter the hash result predictably. * @param secret Triplet of 64-bit secrets used to alter hash result predictably. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhash_internal(const void *key, size_t len, uint64_t seed, const uint64_t* secret) RAPIDHASH_NOEXCEPT { const uint8_t *p=(const uint8_t *)key; seed ^= rapid_mix(seed ^ secret[2], secret[1]); uint64_t a=0, b=0; size_t i = len; if (_likely_(len <= 16)) { if (len >= 4) { seed ^= len; if (len >= 8) { const uint8_t* plast = p + len - 8; a = rapid_read64(p); b = rapid_read64(plast); } else { const uint8_t* plast = p + len - 4; a = rapid_read32(p); b = rapid_read32(plast); } } else if (len > 0) { a = (((uint64_t)p[0])<<45)|p[len-1]; b = p[len>>1]; } else a = b = 0; } else { if (len > 112) { uint64_t see1 = seed, see2 = seed; uint64_t see3 = seed, see4 = seed; uint64_t see5 = seed, see6 = seed; #ifdef RAPIDHASH_COMPACT do { seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed); see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1); see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2); see3 = rapid_mix(rapid_read64(p + 48) ^ secret[3], rapid_read64(p + 56) ^ see3); see4 = rapid_mix(rapid_read64(p + 64) ^ secret[4], rapid_read64(p + 72) ^ see4); see5 = rapid_mix(rapid_read64(p + 80) ^ secret[5], rapid_read64(p + 88) ^ see5); see6 = rapid_mix(rapid_read64(p + 96) ^ secret[6], rapid_read64(p + 104) ^ see6); p += 112; i -= 112; } while(i > 112); #else while (i > 224) { seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed); see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1); see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2); see3 = rapid_mix(rapid_read64(p + 48) ^ secret[3], rapid_read64(p + 56) ^ see3); see4 = rapid_mix(rapid_read64(p + 64) ^ secret[4], rapid_read64(p + 72) ^ see4); see5 = rapid_mix(rapid_read64(p + 80) ^ secret[5], rapid_read64(p + 88) ^ see5); see6 = rapid_mix(rapid_read64(p + 96) ^ secret[6], rapid_read64(p + 104) ^ see6); seed = rapid_mix(rapid_read64(p + 112) ^ secret[0], rapid_read64(p + 120) ^ seed); see1 = rapid_mix(rapid_read64(p + 128) ^ secret[1], rapid_read64(p + 136) ^ see1); see2 = rapid_mix(rapid_read64(p + 144) ^ secret[2], rapid_read64(p + 152) ^ see2); see3 = rapid_mix(rapid_read64(p + 160) ^ secret[3], rapid_read64(p + 168) ^ see3); see4 = rapid_mix(rapid_read64(p + 176) ^ secret[4], rapid_read64(p + 184) ^ see4); see5 = rapid_mix(rapid_read64(p + 192) ^ secret[5], rapid_read64(p + 200) ^ see5); see6 = rapid_mix(rapid_read64(p + 208) ^ secret[6], rapid_read64(p + 216) ^ see6); p += 224; i -= 224; } if (i > 112) { seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed); see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1); see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2); see3 = rapid_mix(rapid_read64(p + 48) ^ secret[3], rapid_read64(p + 56) ^ see3); see4 = rapid_mix(rapid_read64(p + 64) ^ secret[4], rapid_read64(p + 72) ^ see4); see5 = rapid_mix(rapid_read64(p + 80) ^ secret[5], rapid_read64(p + 88) ^ see5); see6 = rapid_mix(rapid_read64(p + 96) ^ secret[6], rapid_read64(p + 104) ^ see6); p += 112; i -= 112; } #endif seed ^= see1; see2 ^= see3; see4 ^= see5; seed ^= see6; see2 ^= see4; seed ^= see2; } if (i > 16) { seed = rapid_mix(rapid_read64(p) ^ secret[2], rapid_read64(p + 8) ^ seed); if (i > 32) { seed = rapid_mix(rapid_read64(p + 16) ^ secret[2], rapid_read64(p + 24) ^ seed); if (i > 48) { seed = rapid_mix(rapid_read64(p + 32) ^ secret[1], rapid_read64(p + 40) ^ seed); if (i > 64) { seed = rapid_mix(rapid_read64(p + 48) ^ secret[1], rapid_read64(p + 56) ^ seed); if (i > 80) { seed = rapid_mix(rapid_read64(p + 64) ^ secret[2], rapid_read64(p + 72) ^ seed); if (i > 96) { seed = rapid_mix(rapid_read64(p + 80) ^ secret[1], rapid_read64(p + 88) ^ seed); } } } } } } a=rapid_read64(p+i-16) ^ i; b=rapid_read64(p+i-8); } a ^= secret[1]; b ^= seed; rapid_mum(&a, &b); return rapid_mix(a ^ secret[7], b ^ secret[1] ^ i); } /* * rapidhashMicro main function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * @param seed 64-bit seed used to alter the hash result predictably. * @param secret Triplet of 64-bit secrets used to alter hash result predictably. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashMicro_internal(const void *key, size_t len, uint64_t seed, const uint64_t* secret) RAPIDHASH_NOEXCEPT { const uint8_t *p=(const uint8_t *)key; seed ^= rapid_mix(seed ^ secret[2], secret[1]); uint64_t a=0, b=0; size_t i = len; if (_likely_(len <= 16)) { if (len >= 4) { seed ^= len; if (len >= 8) { const uint8_t* plast = p + len - 8; a = rapid_read64(p); b = rapid_read64(plast); } else { const uint8_t* plast = p + len - 4; a = rapid_read32(p); b = rapid_read32(plast); } } else if (len > 0) { a = (((uint64_t)p[0])<<45)|p[len-1]; b = p[len>>1]; } else a = b = 0; } else { if (i > 80) { uint64_t see1 = seed, see2 = seed; uint64_t see3 = seed, see4 = seed; do { seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed); see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1); see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2); see3 = rapid_mix(rapid_read64(p + 48) ^ secret[3], rapid_read64(p + 56) ^ see3); see4 = rapid_mix(rapid_read64(p + 64) ^ secret[4], rapid_read64(p + 72) ^ see4); p += 80; i -= 80; } while(i > 80); seed ^= see1; see2 ^= see3; seed ^= see4; seed ^= see2; } if (i > 16) { seed = rapid_mix(rapid_read64(p) ^ secret[2], rapid_read64(p + 8) ^ seed); if (i > 32) { seed = rapid_mix(rapid_read64(p + 16) ^ secret[2], rapid_read64(p + 24) ^ seed); if (i > 48) { seed = rapid_mix(rapid_read64(p + 32) ^ secret[1], rapid_read64(p + 40) ^ seed); if (i > 64) { seed = rapid_mix(rapid_read64(p + 48) ^ secret[1], rapid_read64(p + 56) ^ seed); } } } } a=rapid_read64(p+i-16) ^ i; b=rapid_read64(p+i-8); } a ^= secret[1]; b ^= seed; rapid_mum(&a, &b); return rapid_mix(a ^ secret[7], b ^ secret[1] ^ i); } /* * rapidhashNano main function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * @param seed 64-bit seed used to alter the hash result predictably. * @param secret Triplet of 64-bit secrets used to alter hash result predictably. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashNano_internal(const void *key, size_t len, uint64_t seed, const uint64_t* secret) RAPIDHASH_NOEXCEPT { const uint8_t *p=(const uint8_t *)key; seed ^= rapid_mix(seed ^ secret[2], secret[1]); uint64_t a=0, b=0; size_t i = len; if (_likely_(len <= 16)) { if (len >= 4) { seed ^= len; if (len >= 8) { const uint8_t* plast = p + len - 8; a = rapid_read64(p); b = rapid_read64(plast); } else { const uint8_t* plast = p + len - 4; a = rapid_read32(p); b = rapid_read32(plast); } } else if (len > 0) { a = (((uint64_t)p[0])<<45)|p[len-1]; b = p[len>>1]; } else a = b = 0; } else { if (i > 48) { uint64_t see1 = seed, see2 = seed; do { seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed); see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1); see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2); p += 48; i -= 48; } while(i > 48); seed ^= see1; seed ^= see2; } if (i > 16) { seed = rapid_mix(rapid_read64(p) ^ secret[2], rapid_read64(p + 8) ^ seed); if (i > 32) { seed = rapid_mix(rapid_read64(p + 16) ^ secret[2], rapid_read64(p + 24) ^ seed); } } a=rapid_read64(p+i-16) ^ i; b=rapid_read64(p+i-8); } a ^= secret[1]; b ^= seed; rapid_mum(&a, &b); return rapid_mix(a ^ secret[7], b ^ secret[1] ^ i); } /* * rapidhash seeded hash function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * @param seed 64-bit seed used to alter the hash result predictably. * * Calls rapidhash_internal using provided parameters and default secrets. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhash_withSeed(const void *key, size_t len, uint64_t seed) RAPIDHASH_NOEXCEPT { return rapidhash_internal(key, len, seed, rapid_secret); } /* * rapidhash general purpose hash function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * * Calls rapidhash_withSeed using provided parameters and the default seed. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhash(const void *key, size_t len) RAPIDHASH_NOEXCEPT { return rapidhash_withSeed(key, len, 0); } /* * rapidhashMicro seeded hash function. * * Designed for HPC and server applications, where cache misses make a noticeable performance detriment. * Clang-18+ compiles it to ~140 instructions without stack usage, both on x86-64 and aarch64. * Faster for sizes up to 512 bytes, just 15%-20% slower for inputs above 1kb. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * @param seed 64-bit seed used to alter the hash result predictably. * * Calls rapidhash_internal using provided parameters and default secrets. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashMicro_withSeed(const void *key, size_t len, uint64_t seed) RAPIDHASH_NOEXCEPT { return rapidhashMicro_internal(key, len, seed, rapid_secret); } /* * rapidhashMicro hash function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * * Calls rapidhash_withSeed using provided parameters and the default seed. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashMicro(const void *key, size_t len) RAPIDHASH_NOEXCEPT { return rapidhashMicro_withSeed(key, len, 0); } /* * rapidhashNano seeded hash function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * @param seed 64-bit seed used to alter the hash result predictably. * * Calls rapidhash_internal using provided parameters and default secrets. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashNano_withSeed(const void *key, size_t len, uint64_t seed) RAPIDHASH_NOEXCEPT { return rapidhashNano_internal(key, len, seed, rapid_secret); } /* * rapidhashNano hash function. * * Designed for Mobile and embedded applications, where keeping a small code size is a top priority. * Clang-18+ compiles it to less than 100 instructions without stack usage, both on x86-64 and aarch64. * The fastest for sizes up to 48 bytes, but may be considerably slower for larger inputs. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * * Calls rapidhash_withSeed using provided parameters and the default seed. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashNano(const void *key, size_t len) RAPIDHASH_NOEXCEPT { return rapidhashNano_withSeed(key, len, 0); } zxc-0.9.1/src/lib/zxc_common.c000066400000000000000000000567631515574030100162240ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /* * @file zxc_common.c * @brief Shared library utilities: context management, header I/O, bitpacking, * compress-bound calculation, and error-code name lookup. * * This translation unit contains the functions shared by both the buffer and * streaming APIs. It is linked into every build of libzxc. */ #include "../../include/zxc_buffer.h" #include "../../include/zxc_error.h" #include "../../include/zxc_sans_io.h" #include "zxc_internal.h" /* * ============================================================================ * CONTEXT MANAGEMENT * ============================================================================ */ /* * @brief Allocates memory aligned to the specified boundary. * * Uses `_aligned_malloc` on Windows and `posix_memalign` elsewhere. * * @param[in] size Number of bytes to allocate. * @param[in] alignment Required alignment (must be a power of two). * @return Pointer to the allocated block, or @c NULL on failure. */ void* zxc_aligned_malloc(const size_t size, const size_t alignment) { #if defined(_WIN32) return _aligned_malloc(size, alignment); #else void* ptr = NULL; if (posix_memalign(&ptr, alignment, size) != 0) return NULL; return ptr; #endif } /* * @brief Frees memory previously allocated by zxc_aligned_malloc(). * * @param[in] ptr Pointer returned by zxc_aligned_malloc() (may be @c NULL). */ void zxc_aligned_free(void* ptr) { #if defined(_WIN32) _aligned_free(ptr); #else free(ptr); #endif } /* * @brief Initialises a compression context, allocating all internal buffers. * * A single cache-line-aligned allocation is carved into hash table, chain * table, sequence buffers, token buffers, offset buffers, extra-length * buffers, and a literal buffer. * * @param[out] ctx Context to initialise (zeroed on entry). * @param[in] chunk_size Maximum uncompressed chunk size (bytes). * @param[in] mode 0 = decompression (skip buffer alloc), 1 = compression. * @param[in] level Compression level (stored in ctx). * @param[in] checksum_enabled Non-zero to enable checksum generation/verification. * @return @ref ZXC_OK on success, or @ref ZXC_ERROR_MEMORY on allocation failure. */ int zxc_cctx_init(zxc_cctx_t* RESTRICT ctx, const size_t chunk_size, const int mode, const int level, const int checksum_enabled) { ZXC_MEMSET(ctx, 0, sizeof(zxc_cctx_t)); ctx->checksum_enabled = checksum_enabled; /* Compute block-size derived parameters. */ ctx->chunk_size = chunk_size; const uint32_t offset_bits = zxc_log2_u32((uint32_t)chunk_size); ctx->offset_bits = offset_bits; ctx->offset_mask = (1U << offset_bits) - 1; ctx->max_epoch = 1U << (32 - offset_bits); if (mode == 0) return ZXC_OK; const size_t max_seq = chunk_size / sizeof(uint32_t) + 256; const size_t sz_hash = 2 * ZXC_LZ_HASH_SIZE * sizeof(uint32_t); const size_t sz_chain = chunk_size * sizeof(uint16_t); const size_t sz_sequences = max_seq * sizeof(uint32_t); const size_t sz_tokens = max_seq * sizeof(uint8_t); const size_t sz_offsets = max_seq * sizeof(uint16_t); const size_t sz_extras = max_seq * 2 * ZXC_VBYTE_ALLOC_LEN; // Max 3 bytes per LL/ML VByte (sufficient for 256KB block) const size_t sz_lit = chunk_size + ZXC_PAD_SIZE; // Calculate sizes with alignment padding (64 bytes for cache line alignment) size_t total_size = 0; const size_t off_hash = total_size; total_size += (sz_hash + ZXC_ALIGNMENT_MASK) & ~ZXC_ALIGNMENT_MASK; const size_t off_chain = total_size; total_size += (sz_chain + ZXC_ALIGNMENT_MASK) & ~ZXC_ALIGNMENT_MASK; const size_t off_sequences = total_size; total_size += (sz_sequences + ZXC_ALIGNMENT_MASK) & ~ZXC_ALIGNMENT_MASK; const size_t off_tokens = total_size; total_size += (sz_tokens + ZXC_ALIGNMENT_MASK) & ~ZXC_ALIGNMENT_MASK; const size_t off_offsets = total_size; total_size += (sz_offsets + ZXC_ALIGNMENT_MASK) & ~ZXC_ALIGNMENT_MASK; const size_t off_extras = total_size; total_size += (sz_extras + ZXC_ALIGNMENT_MASK) & ~ZXC_ALIGNMENT_MASK; const size_t off_lit = total_size; total_size += (sz_lit + ZXC_ALIGNMENT_MASK) & ~ZXC_ALIGNMENT_MASK; uint8_t* const mem = (uint8_t*)zxc_aligned_malloc(total_size, ZXC_CACHE_LINE_SIZE); if (UNLIKELY(!mem)) return ZXC_ERROR_MEMORY; ctx->memory_block = mem; ctx->hash_table = (uint32_t*)(mem + off_hash); ctx->chain_table = (uint16_t*)(mem + off_chain); ctx->buf_sequences = (uint32_t*)(mem + off_sequences); ctx->buf_tokens = (uint8_t*)(mem + off_tokens); ctx->buf_offsets = (uint16_t*)(mem + off_offsets); ctx->buf_extras = (uint8_t*)(mem + off_extras); ctx->literals = (uint8_t*)(mem + off_lit); ctx->compression_level = level; ctx->epoch = 1; ZXC_MEMSET(ctx->hash_table, 0, sz_hash); return ZXC_OK; } /* * @brief Releases all resources owned by a compression context. * * After this call every pointer inside @p ctx is @c NULL and the context * may be safely re-initialised with zxc_cctx_init(). * * @param[in,out] ctx Context to tear down. */ void zxc_cctx_free(zxc_cctx_t* ctx) { if (ctx->memory_block) { zxc_aligned_free(ctx->memory_block); ctx->memory_block = NULL; } if (ctx->lit_buffer) { free(ctx->lit_buffer); ctx->lit_buffer = NULL; } if (ctx->work_buf) { free(ctx->work_buf); ctx->work_buf = NULL; } ctx->hash_table = NULL; ctx->chain_table = NULL; ctx->buf_sequences = NULL; ctx->buf_tokens = NULL; ctx->buf_offsets = NULL; ctx->buf_extras = NULL; ctx->literals = NULL; ctx->epoch = 0; ctx->lit_buffer_cap = 0; ctx->work_buf_cap = 0; } /* * ============================================================================ * HEADER I/O * ============================================================================ */ /* * @brief Serialises a ZXC file header into @p dst. * * Layout (16 bytes): Magic (4) | Version (1) | Chunk (1) | Flags (1) | * Reserved (7) | CRC-16 (2). * * @param[out] dst Destination buffer (>= @ref ZXC_FILE_HEADER_SIZE bytes). * @param[in] dst_capacity Capacity of @p dst. * @param[in] has_checksum Non-zero to set the checksum flag. * @return Number of bytes written (@ref ZXC_FILE_HEADER_SIZE) on success, * or a negative @ref zxc_error_t code. */ int zxc_write_file_header(uint8_t* RESTRICT dst, const size_t dst_capacity, const size_t chunk_size, const int has_checksum) { if (UNLIKELY(dst_capacity < ZXC_FILE_HEADER_SIZE)) return ZXC_ERROR_DST_TOO_SMALL; zxc_store_le32(dst, ZXC_MAGIC_WORD); dst[4] = ZXC_FILE_FORMAT_VERSION; // Block size stored as log2 exponent (e.g. 18 = 256 KB) dst[5] = (uint8_t)zxc_log2_u32((uint32_t)chunk_size); // Flags are at offset 6 dst[6] = has_checksum ? (ZXC_FILE_FLAG_HAS_CHECKSUM | ZXC_CHECKSUM_RAPIDHASH) : 0; // Bytes 7-13: Reserved (must be 0, 7 bytes) ZXC_MEMSET(dst + 7, 0, 7); // Bytes 14-15: CRC (16-bit) zxc_store_le16(dst + 14, 0); // Zero out before hashing const uint16_t crc = zxc_hash16(dst); zxc_store_le16(dst + 14, crc); return ZXC_FILE_HEADER_SIZE; } /* * @brief Parses and validates a ZXC file header from @p src. * * Checks the magic word, format version, and CRC-16. * * @param[in] src Source buffer (>= @ref ZXC_FILE_HEADER_SIZE bytes). * @param[in] src_size Size of @p src. * @param[out] out_block_size Receives the decoded block size (may be @c NULL). * @param[out] out_has_checksum Receives 1 if checksums are present, 0 otherwise * (may be @c NULL). * @return @ref ZXC_OK on success, or a negative @ref zxc_error_t code. */ int zxc_read_file_header(const uint8_t* RESTRICT src, const size_t src_size, size_t* RESTRICT out_block_size, int* RESTRICT out_has_checksum) { if (UNLIKELY(src_size < ZXC_FILE_HEADER_SIZE)) return ZXC_ERROR_SRC_TOO_SMALL; if (UNLIKELY(zxc_le32(src) != ZXC_MAGIC_WORD)) return ZXC_ERROR_BAD_MAGIC; if (UNLIKELY(src[4] != ZXC_FILE_FORMAT_VERSION)) return ZXC_ERROR_BAD_VERSION; uint8_t temp[ZXC_FILE_HEADER_SIZE]; ZXC_MEMCPY(temp, src, ZXC_FILE_HEADER_SIZE); // Zero out CRC bytes (14-15) before hash check temp[14] = 0; temp[15] = 0; if (UNLIKELY(zxc_le16(src + 14) != zxc_hash16(temp))) return ZXC_ERROR_BAD_HEADER; if (out_block_size) { const uint8_t code = src[5]; size_t block_size; if (LIKELY(code >= ZXC_BLOCK_SIZE_MIN_LOG2 && code <= ZXC_BLOCK_SIZE_MAX_LOG2)) { // Exponent encoding: block_size = 2^code (4 KB - 2 MB) block_size = (size_t)1U << code; } else if (code == 64) { // Legacy: hardcoded 256 KB default block_size = ZXC_BLOCK_SIZE_DEFAULT; } else { return ZXC_ERROR_BAD_BLOCK_SIZE; } *out_block_size = block_size; } // Flags are at offset 6 if (out_has_checksum) *out_has_checksum = (src[6] & ZXC_FILE_FLAG_HAS_CHECKSUM) ? 1 : 0; return ZXC_OK; } /* * @brief Serialises a block header (8 bytes) into @p dst. * * @param[out] dst Destination buffer (>= @ref ZXC_BLOCK_HEADER_SIZE bytes). * @param[in] dst_capacity Capacity of @p dst. * @param[in] bh Populated block header descriptor. * @return Number of bytes written (@ref ZXC_BLOCK_HEADER_SIZE) on success, * or a negative @ref zxc_error_t code. */ int zxc_write_block_header(uint8_t* RESTRICT dst, const size_t dst_capacity, const zxc_block_header_t* RESTRICT bh) { if (UNLIKELY(dst_capacity < ZXC_BLOCK_HEADER_SIZE)) return ZXC_ERROR_DST_TOO_SMALL; dst[0] = bh->block_type; dst[1] = 0; // Flags not used currently dst[2] = 0; // Reserved zxc_store_le32(dst + 3, bh->comp_size); dst[7] = 0; // Zero before hashing dst[7] = zxc_hash8(dst); // Checksum at the end return ZXC_BLOCK_HEADER_SIZE; } /* * @brief Parses and validates a block header from @p src. * * Validates the 8-bit CRC embedded in the header. * * @param[in] src Source buffer (>= @ref ZXC_BLOCK_HEADER_SIZE bytes). * @param[in] src_size Size of @p src. * @param[out] bh Receives the decoded block header fields. * @return @ref ZXC_OK on success, or a negative @ref zxc_error_t code. */ int zxc_read_block_header(const uint8_t* RESTRICT src, const size_t src_size, zxc_block_header_t* RESTRICT bh) { if (UNLIKELY(src_size < ZXC_BLOCK_HEADER_SIZE)) return ZXC_ERROR_SRC_TOO_SMALL; uint8_t temp[ZXC_BLOCK_HEADER_SIZE]; ZXC_MEMCPY(temp, src, ZXC_BLOCK_HEADER_SIZE); temp[7] = 0; // Zero out checksum byte before hashing if (UNLIKELY(src[7] != zxc_hash8(temp))) return ZXC_ERROR_BAD_HEADER; bh->block_type = src[0]; bh->block_flags = 0; // Flags not used currently bh->reserved = src[2]; bh->comp_size = zxc_le32(src + 3); bh->header_crc = src[7]; return ZXC_OK; } /* * @brief Writes the 12-byte file footer (source size + global checksum). * * @param[out] dst Destination buffer (>= @ref ZXC_FILE_FOOTER_SIZE bytes). * @param[in] dst_capacity Capacity of @p dst. * @param[in] src_size Original uncompressed size in bytes. * @param[in] global_hash Accumulated global checksum value. * @param[in] checksum_enabled Non-zero to write the checksum; zero to zero-fill. * @return Number of bytes written (@ref ZXC_FILE_FOOTER_SIZE) on success, * or a negative @ref zxc_error_t code. */ int zxc_write_file_footer(uint8_t* RESTRICT dst, const size_t dst_capacity, const uint64_t src_size, const uint32_t global_hash, const int checksum_enabled) { if (UNLIKELY(dst_capacity < ZXC_FILE_FOOTER_SIZE)) return ZXC_ERROR_DST_TOO_SMALL; zxc_store_le64(dst, src_size); if (checksum_enabled) { zxc_store_le32(dst + sizeof(uint64_t), global_hash); } else { ZXC_MEMSET(dst + sizeof(uint64_t), 0, sizeof(uint32_t)); } return ZXC_FILE_FOOTER_SIZE; } /* * @brief Serialises a NUM block header (16 bytes). * * @param[out] dst Destination buffer (>= @ref ZXC_NUM_HEADER_BINARY_SIZE bytes). * @param[in] rem Remaining capacity of @p dst. * @param[in] nh Populated NUM header descriptor. * @return Number of bytes written on success, or a negative @ref zxc_error_t code. */ int zxc_write_num_header(uint8_t* RESTRICT dst, const size_t rem, const zxc_num_header_t* RESTRICT nh) { if (UNLIKELY(rem < ZXC_NUM_HEADER_BINARY_SIZE)) return ZXC_ERROR_DST_TOO_SMALL; zxc_store_le64(dst, nh->n_values); zxc_store_le16(dst + 8, nh->frame_size); zxc_store_le16(dst + 10, 0); zxc_store_le32(dst + 12, 0); return ZXC_NUM_HEADER_BINARY_SIZE; } /* * @brief Parses a NUM block header from @p src. * * @param[in] src Source buffer (>= @ref ZXC_NUM_HEADER_BINARY_SIZE bytes). * @param[in] src_size Size of @p src. * @param[out] nh Receives the decoded NUM header fields. * @return @ref ZXC_OK on success, or a negative @ref zxc_error_t code. */ int zxc_read_num_header(const uint8_t* RESTRICT src, const size_t src_size, zxc_num_header_t* RESTRICT nh) { if (UNLIKELY(src_size < ZXC_NUM_HEADER_BINARY_SIZE)) return ZXC_ERROR_SRC_TOO_SMALL; nh->n_values = zxc_le64(src); nh->frame_size = zxc_le16(src + 8); return ZXC_OK; } /* * @brief Serialises a GLO block header followed by its section descriptors. * * @param[out] dst Destination buffer. * @param[in] rem Remaining capacity of @p dst. * @param[in] gh Populated GLO header descriptor. * @param[in] desc Array of @ref ZXC_GLO_SECTIONS section descriptors. * @return Total bytes written on success, or a negative @ref zxc_error_t code. */ int zxc_write_glo_header_and_desc(uint8_t* RESTRICT dst, const size_t rem, const zxc_gnr_header_t* RESTRICT gh, const zxc_section_desc_t desc[ZXC_GLO_SECTIONS]) { const size_t needed = ZXC_GLO_HEADER_BINARY_SIZE + ZXC_GLO_SECTIONS * ZXC_SECTION_DESC_BINARY_SIZE; if (UNLIKELY(rem < needed)) return ZXC_ERROR_DST_TOO_SMALL; zxc_store_le32(dst, gh->n_sequences); zxc_store_le32(dst + 4, gh->n_literals); dst[8] = gh->enc_lit; dst[9] = gh->enc_litlen; dst[10] = gh->enc_mlen; dst[11] = gh->enc_off; zxc_store_le32(dst + 12, 0); uint8_t* p = dst + ZXC_GLO_HEADER_BINARY_SIZE; for (int i = 0; i < ZXC_GLO_SECTIONS; i++) { zxc_store_le64(p, desc[i].sizes); p += ZXC_SECTION_DESC_BINARY_SIZE; } return (int)needed; } /* * @brief Parses a GLO block header and its section descriptors from @p src. * * @param[in] src Source buffer. * @param[in] len Size of @p src. * @param[out] gh Receives the decoded GLO header. * @param[out] desc Receives @ref ZXC_GLO_SECTIONS decoded section descriptors. * @return @ref ZXC_OK on success, or a negative @ref zxc_error_t code. */ int zxc_read_glo_header_and_desc(const uint8_t* RESTRICT src, const size_t len, zxc_gnr_header_t* RESTRICT gh, zxc_section_desc_t desc[ZXC_GLO_SECTIONS]) { const size_t needed = ZXC_GLO_HEADER_BINARY_SIZE + ZXC_GLO_SECTIONS * ZXC_SECTION_DESC_BINARY_SIZE; if (UNLIKELY(len < needed)) return ZXC_ERROR_SRC_TOO_SMALL; gh->n_sequences = zxc_le32(src); gh->n_literals = zxc_le32(src + 4); gh->enc_lit = src[8]; gh->enc_litlen = src[9]; gh->enc_mlen = src[10]; gh->enc_off = src[11]; const uint8_t* p = src + ZXC_GLO_HEADER_BINARY_SIZE; for (int i = 0; i < ZXC_GLO_SECTIONS; i++) { desc[i].sizes = zxc_le64(p); p += ZXC_SECTION_DESC_BINARY_SIZE; } return ZXC_OK; } /* * @brief Serialises a GHI block header followed by its section descriptors. * * @param[out] dst Destination buffer. * @param[in] rem Remaining capacity of @p dst. * @param[in] gh Populated GHI header descriptor. * @param[in] desc Array of @ref ZXC_GHI_SECTIONS section descriptors. * @return Total bytes written on success, or a negative @ref zxc_error_t code. */ int zxc_write_ghi_header_and_desc(uint8_t* RESTRICT dst, const size_t rem, const zxc_gnr_header_t* RESTRICT gh, const zxc_section_desc_t desc[ZXC_GHI_SECTIONS]) { const size_t needed = ZXC_GHI_HEADER_BINARY_SIZE + ZXC_GHI_SECTIONS * ZXC_SECTION_DESC_BINARY_SIZE; if (UNLIKELY(rem < needed)) return ZXC_ERROR_DST_TOO_SMALL; zxc_store_le32(dst, gh->n_sequences); zxc_store_le32(dst + 4, gh->n_literals); dst[8] = gh->enc_lit; dst[9] = gh->enc_litlen; dst[10] = gh->enc_mlen; dst[11] = gh->enc_off; zxc_store_le32(dst + 12, 0); uint8_t* p = dst + ZXC_GHI_HEADER_BINARY_SIZE; for (int i = 0; i < ZXC_GHI_SECTIONS; i++) { zxc_store_le64(p, desc[i].sizes); p += ZXC_SECTION_DESC_BINARY_SIZE; } return (int)needed; } /* * @brief Parses a GHI block header and its section descriptors from @p src. * * @param[in] src Source buffer. * @param[in] len Size of @p src. * @param[out] gh Receives the decoded GHI header. * @param[out] desc Receives @ref ZXC_GHI_SECTIONS decoded section descriptors. * @return @ref ZXC_OK on success, or a negative @ref zxc_error_t code. */ int zxc_read_ghi_header_and_desc(const uint8_t* RESTRICT src, const size_t len, zxc_gnr_header_t* RESTRICT gh, zxc_section_desc_t desc[ZXC_GHI_SECTIONS]) { const size_t needed = ZXC_GHI_HEADER_BINARY_SIZE + ZXC_GHI_SECTIONS * ZXC_SECTION_DESC_BINARY_SIZE; if (UNLIKELY(len < needed)) return ZXC_ERROR_SRC_TOO_SMALL; gh->n_sequences = zxc_le32(src); gh->n_literals = zxc_le32(src + 4); gh->enc_lit = src[8]; gh->enc_litlen = src[9]; gh->enc_mlen = src[10]; gh->enc_off = src[11]; const uint8_t* p = src + ZXC_GHI_HEADER_BINARY_SIZE; for (int i = 0; i < ZXC_GHI_SECTIONS; i++) { desc[i].sizes = zxc_le64(p); p += ZXC_SECTION_DESC_BINARY_SIZE; } return ZXC_OK; } /* * ============================================================================ * BITPACKING UTILITIES * ============================================================================ */ /* * @brief Bit-packs an array of 32-bit values into a compact byte stream. * * Each value is masked to @p bits width and packed contiguously. * * @param[in] src Source array of 32-bit integers. * @param[in] count Number of values to pack. * @param[out] dst Destination byte buffer. * @param[in] dst_cap Capacity of @p dst. * @param[in] bits Number of bits per value (0-32). * @return Number of bytes written on success, or a negative @ref zxc_error_t code. */ int zxc_bitpack_stream_32(const uint32_t* RESTRICT src, const size_t count, uint8_t* RESTRICT dst, const size_t dst_cap, const uint8_t bits) { const size_t out_bytes = ((count * bits) + ZXC_BITS_PER_BYTE - 1) / ZXC_BITS_PER_BYTE; // +4 bytes: packing may write past out_bytes when the last value straddles a byte boundary. const size_t safe_bytes = out_bytes + sizeof(uint32_t); if (UNLIKELY(dst_cap < safe_bytes)) return ZXC_ERROR_DST_TOO_SMALL; size_t bit_pos = 0; ZXC_MEMSET(dst, 0, safe_bytes); // Create a mask for the input bits to prevent overflow // If bits is 32, the shift (1ULL << 32) is undefined behavior on 32-bit types, // but here we use uint64_t. (1ULL << 32) is fine on 64-bit. // However, if bits=64 (unlikely for a 32-bit packer), it would be an issue. // For 0 < bits <= 32: const uint64_t val_mask = (bits == sizeof(uint32_t) * ZXC_BITS_PER_BYTE) ? UINT32_MAX : ((1ULL << bits) - 1); for (size_t i = 0; i < count; i++) { // Mask the input value to ensure we don't write garbage const uint64_t v = ((uint64_t)src[i] & val_mask) << (bit_pos % ZXC_BITS_PER_BYTE); const size_t byte_idx = bit_pos / ZXC_BITS_PER_BYTE; dst[byte_idx] |= (uint8_t)v; if (bits + (bit_pos % ZXC_BITS_PER_BYTE) > 1 * ZXC_BITS_PER_BYTE) dst[byte_idx + 1] |= (uint8_t)(v >> (1 * ZXC_BITS_PER_BYTE)); if (bits + (bit_pos % ZXC_BITS_PER_BYTE) > 2 * ZXC_BITS_PER_BYTE) dst[byte_idx + 2] |= (uint8_t)(v >> (2 * ZXC_BITS_PER_BYTE)); if (bits + (bit_pos % ZXC_BITS_PER_BYTE) > 3 * ZXC_BITS_PER_BYTE) dst[byte_idx + 3] |= (uint8_t)(v >> (3 * ZXC_BITS_PER_BYTE)); if (bits + (bit_pos % ZXC_BITS_PER_BYTE) > 4 * ZXC_BITS_PER_BYTE) dst[byte_idx + 4] |= (uint8_t)(v >> (4 * ZXC_BITS_PER_BYTE)); bit_pos += bits; } return (int)out_bytes; } /* * ============================================================================ * COMPRESS BOUND CALCULATION * ============================================================================ */ /* * @brief Returns the maximum compressed size for a given input size. * * The result accounts for the file header, per-block headers, block * checksums, worst-case expansion, EOF block, and the file footer. * * @param[in] input_size Uncompressed input size in bytes. * @return Upper bound on compressed size, or 0 if @p input_size would overflow. */ uint64_t zxc_compress_bound(const size_t input_size) { // Guard UINT64_MAX / SIZE_MAX would overflow. if (UNLIKELY(input_size > (SIZE_MAX - (SIZE_MAX >> 8)))) return 0; uint64_t n = ((uint64_t)input_size + ZXC_BLOCK_SIZE_DEFAULT - 1) / ZXC_BLOCK_SIZE_DEFAULT; if (n == 0) n = 1; return ZXC_FILE_HEADER_SIZE + (n * (ZXC_BLOCK_HEADER_SIZE + ZXC_BLOCK_CHECKSUM_SIZE + 64)) + (uint64_t)input_size + ZXC_BLOCK_HEADER_SIZE + ZXC_FILE_FOOTER_SIZE; } /* * ============================================================================ * ERROR CODE UTILITIES * ============================================================================ */ /* * @brief Returns a human-readable string for the given error code. * * @param[in] code An error code from @ref zxc_error_t (or @ref ZXC_OK). * @return A static string such as @c "ZXC_OK" or @c "ZXC_ERROR_MEMORY". * Returns @c "ZXC_UNKNOWN_ERROR" for unrecognised codes. */ const char* zxc_error_name(const int code) { switch ((zxc_error_t)code) { case ZXC_OK: return "ZXC_OK"; case ZXC_ERROR_MEMORY: return "ZXC_ERROR_MEMORY"; case ZXC_ERROR_DST_TOO_SMALL: return "ZXC_ERROR_DST_TOO_SMALL"; case ZXC_ERROR_SRC_TOO_SMALL: return "ZXC_ERROR_SRC_TOO_SMALL"; case ZXC_ERROR_BAD_MAGIC: return "ZXC_ERROR_BAD_MAGIC"; case ZXC_ERROR_BAD_VERSION: return "ZXC_ERROR_BAD_VERSION"; case ZXC_ERROR_BAD_HEADER: return "ZXC_ERROR_BAD_HEADER"; case ZXC_ERROR_BAD_CHECKSUM: return "ZXC_ERROR_BAD_CHECKSUM"; case ZXC_ERROR_CORRUPT_DATA: return "ZXC_ERROR_CORRUPT_DATA"; case ZXC_ERROR_BAD_OFFSET: return "ZXC_ERROR_BAD_OFFSET"; case ZXC_ERROR_OVERFLOW: return "ZXC_ERROR_OVERFLOW"; case ZXC_ERROR_IO: return "ZXC_ERROR_IO"; case ZXC_ERROR_NULL_INPUT: return "ZXC_ERROR_NULL_INPUT"; case ZXC_ERROR_BAD_BLOCK_TYPE: return "ZXC_ERROR_BAD_BLOCK_TYPE"; case ZXC_ERROR_BAD_BLOCK_SIZE: return "ZXC_ERROR_BAD_BLOCK_SIZE"; default: return "ZXC_UNKNOWN_ERROR"; } } zxc-0.9.1/src/lib/zxc_compress.c000066400000000000000000001715361515574030100165630ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_compress.c * @brief Block-level compression: LZ77 parsing, NUM / GLO / GHI / RAW encoding, * and the chunk-wrapper entry point. * * Compiled multiple times with different @c ZXC_FUNCTION_SUFFIX values to * produce AVX2, AVX-512, NEON, and scalar variants dispatched at runtime * by @ref zxc_dispatch.c. */ #include "../../include/zxc_error.h" #include "../../include/zxc_sans_io.h" #include "zxc_internal.h" /* * Function Multi-Versioning Support * If ZXC_FUNCTION_SUFFIX is defined (e.g. _avx2), rename the public entry point. */ #ifdef ZXC_FUNCTION_SUFFIX #define ZXC_CAT_IMPL(x, y) x##y #define ZXC_CAT(x, y) ZXC_CAT_IMPL(x, y) #define zxc_compress_chunk_wrapper ZXC_CAT(zxc_compress_chunk_wrapper, ZXC_FUNCTION_SUFFIX) #endif /** * @brief Computes a hash value for either a 4-byte or 5-byte sequence. * * @param[in] val The 64-bit integer sequence (e.g., 8 bytes read from input stream). * @param[in] use_hash5 Non-zero to use the 5-byte xorshift64* hash (Marsaglia/Vigna), zero for * 4-byte Marsaglia hash. * @return uint32_t A hash value suitable for indexing the match table. */ static ZXC_ALWAYS_INLINE uint32_t zxc_hash_func(uint64_t val, const int use_hash5) { if (use_hash5) { const uint64_t v5 = val & 0xFFFFFFFFFFULL; return (uint32_t)((v5 * ZXC_LZ_HASH_PRIME2) >> (64 - ZXC_LZ_HASH_BITS)); } else { val ^= val >> 15; return ((uint32_t)val * ZXC_LZ_HASH_PRIME1) >> (32 - ZXC_LZ_HASH_BITS); } } #if defined(ZXC_USE_AVX2) /** * @brief Reduces a 256-bit integer vector to a single scalar by finding the maximum unsigned 32-bit * integer element. * * This function performs a horizontal reduction across the 8 packed 32-bit unsigned integers * in the source vector to determine the maximum value. * * @param[in] v The 256-bit vector containing 8 unsigned 32-bit integers. * @return The maximum unsigned 32-bit integer found in the vector. */ // codeql[cpp/unused-static-function] : Used conditionally when ZXC_USE_AVX2 is defined static ZXC_ALWAYS_INLINE uint32_t zxc_mm256_reduce_max_epu32(__m256i v) { __m128i vlow = _mm256_castsi256_si128(v); // Extract the lower 128 bits __m128i vhigh = _mm256_extracti128_si256(v, 1); // Extract the upper 128 bits vlow = _mm_max_epu32(vlow, vhigh); // Element-wise max of lower and upper halves __m128i vshuf = _mm_shuffle_epi32(vlow, _MM_SHUFFLE(1, 0, 3, 2)); // Shuffle to swap pairs vlow = _mm_max_epu32(vlow, vshuf); // Max of original and swapped vshuf = _mm_shuffle_epi32(vlow, _MM_SHUFFLE(2, 3, 0, 1)); // Shuffle to bring remaining candidates vlow = _mm_max_epu32(vlow, vshuf); // Final max comparison return (uint32_t)_mm_cvtsi128_si32(vlow); // Extract the scalar result } #endif /** * @brief Writes a Prefix Varint encoded value to a buffer. * * This function encodes a 32-bit unsigned integer using Prefix Varint encoding * and writes it to the destination buffer. Unary prefix bits in the first * byte determine the total length (1-5 bytes), allowing for branchless or * predictable decoding. * * Format: * - 0xxxxxxx (1 byte) * - 10xxxxxx ... (2 bytes) * - 110xxxxx ... (3 bytes) * ... * * @param[out] dst Pointer to the destination buffer where the encoded value will be written. * @param[in] val The 32-bit unsigned integer value to encode. * @return The number of bytes written to the destination buffer. */ static ZXC_ALWAYS_INLINE size_t zxc_write_varint(uint8_t* RESTRICT dst, uint32_t val) { // Prefix Varint Encoding // 1 byte: 0xxxxxxx (7 bits) -> val < 128 if (LIKELY(val < 128)) { dst[0] = (uint8_t)val; return 1; } // 2 bytes: 10xxxxxx xxxxxxxx (14 bits) -> val < 16384 (2^14) if (LIKELY(val < 16384)) { dst[0] = (uint8_t)(0x80 | (val & 0x3F)); dst[1] = (uint8_t)(val >> 6); return 2; } // 3 bytes: 110xxxxx xxxxxxxx xxxxxxxx (21 bits) -> val < 2097152 (2^21) if (LIKELY(val < 2097152)) { dst[0] = (uint8_t)(0xC0 | (val & 0x1F)); dst[1] = (uint8_t)(val >> 5); dst[2] = (uint8_t)(val >> 13); return 3; } // 4 bytes: 1110xxxx xxxxxxxx xxxxxxxx xxxxxxxx (28 bits) -> val < 268435456 (2^28) if (LIKELY(val < 268435456)) { dst[0] = (uint8_t)(0xE0 | (val & 0x0F)); dst[1] = (uint8_t)(val >> 4); dst[2] = (uint8_t)(val >> 12); dst[3] = (uint8_t)(val >> 20); return 4; } // 5 bytes: 11110xxx ... (35 bits) -> Full 32-bit range dst[0] = (uint8_t)(0xF0 | (val & 0x07)); dst[1] = (uint8_t)(val >> 3); dst[2] = (uint8_t)(val >> 11); dst[3] = (uint8_t)(val >> 19); dst[4] = (uint8_t)(val >> 27); return 5; } /** * @brief Structure representing a match found during compression. * * This structure holds information about a matching sequence found * in the input data during the compression process. * * @param ref Pointer to the reference data where the match was found. * @param len Length of the matching sequence in bytes. * @param backtrack Distance to backtrack from the current position to find the match. */ typedef struct { const uint8_t* ref; uint32_t len; uint32_t backtrack; } zxc_match_t; /** * @brief Finds the best matching sequence for LZ77 compression * * This function searches for the longest matching sequence in the * sliding window dictionary for LZ77 compression algorithm. * It is marked as always inline for performance optimization. * * @param[in] src Pointer to the start of the source buffer. * @param[in] ip Current input position pointer. * @param[in] iend Pointer to the end of the input buffer. * @param[in] mflimit Pointer to the match finding limit. * @param[in] anchor Pointer to the current anchor position. * @param[in,out] hash_table Pointer to the hash table for match finding. * @param[in,out] chain_table Pointer to the chain table for collision handling. * @param[in] epoch_mark Current epoch marker for hash table invalidation. * @param[in] p LZ77 parameters controlling search depth, lazy matching, and stepping. * @return zxc_match_t Structure containing the best match information * (reference pointer, length of the match, and backtrack distance). */ static ZXC_ALWAYS_INLINE zxc_match_t zxc_lz77_find_best_match( const uint8_t* src, const uint8_t* ip, const uint8_t* iend, const uint8_t* mflimit, const uint8_t* anchor, uint32_t* RESTRICT hash_table, uint16_t* RESTRICT chain_table, uint32_t epoch_mark, uint32_t offset_mask, const int level, const zxc_lz77_params_t p) { const int use_hash5 = (level >= 3); // Track the best match found so far. // ref is the pointer to the start of the match in the history buffer, // len is the match length, and backtrack is the distance from ip to ref. // Start with a sentinel length just below the minimum so any valid match will replace it. zxc_match_t best = (zxc_match_t){NULL, ZXC_LZ_MIN_MATCH_LEN - 1, 0}; // Load the 8-byte sequence at the current position. uint64_t cur_val8 = zxc_le64(ip); // First 4 bytes for tag and old lookups uint32_t cur_val = (uint32_t)cur_val8; uint32_t h = zxc_hash_func(cur_val8, use_hash5); // For levels 1-2, enhance tag with byte5 info via XOR (preserves byte4 info) // High byte becomes (byte4 ^ byte5), keeping discrimination from both bytes uint32_t cur_tag = (level <= 2) ? (cur_val ^ ((uint32_t)(cur_val8 >> 32) << 24)) : cur_val; // Current position in the input buffer expressed as a 32-bit index. // This index is what we store in / retrieve from the hash/chain tables. const uint32_t cur_pos = (uint32_t)(ip - src); // Each hash bucket stores: // - raw_head: compressed pointer (epoch in high bits, position in low bits) // - stored_tag: 4-byte tag (or XOR-enhanced for levels 1-2) to quickly reject mismatches. // Epoch bits allow the tables to be lazily invalidated without clearing all entries. const uint32_t raw_head = hash_table[2 * h]; const uint32_t stored_tag = hash_table[2 * h + 1]; // If the epoch in raw_head matches the current epoch_mark, extract the // stored position; otherwise treat this bucket as empty (index 0). // Branchless optimization: // Create a mask that is 0xFFFFFFFF if epochs match, 0 otherwise. const uint32_t epoch_mask = -((int32_t)((raw_head & ~offset_mask) == epoch_mark)); uint32_t match_idx = (raw_head & offset_mask) & epoch_mask; // Decide whether to skip the head entry of the hash chain. const int skip_head = (match_idx != 0) & (stored_tag != cur_tag); // If we should skip the head and level is low (<= 2), we drop the match entirely (match_idx = // 0). drop_mask is 0 if we drop (skip_head && level <= 2 is true becomes 1, 1-1=0), -1 // otherwise. const uint32_t drop_mask = (uint32_t)((skip_head & (level <= 2)) - 1); match_idx &= drop_mask; hash_table[2 * h] = epoch_mark | cur_pos; hash_table[2 * h + 1] = cur_tag; // Branchless chain table update const uint32_t dist = cur_pos - match_idx; const uint32_t valid_mask = -((int32_t)((match_idx != 0) & (dist < ZXC_LZ_WINDOW_SIZE))); chain_table[cur_pos] = (uint16_t)(dist & valid_mask); if (match_idx == 0) return best; int attempts = p.search_depth; // Optimization: If head tag doesn't match, advance immediately without loading the first // mismatch. if (skip_head) { const uint16_t delta = chain_table[match_idx]; const uint32_t next_idx = match_idx - delta; match_idx = (delta != 0) ? next_idx : 0; attempts--; } while (match_idx > 0 && attempts-- >= 0) { if (UNLIKELY(cur_pos - match_idx > ZXC_LZ_MAX_DIST)) break; const uint8_t* ref = src + match_idx; const uint32_t ref_val = zxc_le32(ref); const int tag_match = (ref_val == cur_val); // Simplified check: only tag match and next-byte match required const int should_compare = tag_match && (ref[best.len] == ip[best.len]); if (should_compare) { uint32_t mlen = sizeof(uint32_t); // We already know the first 4 bytes match // Fast path: Scalar 64-bit comparison for short matches (=< 64 bytes) // Most matches are short, so this avoids SIMD overhead for common cases const uint8_t* limit_8 = iend - sizeof(uint64_t); const uint8_t* scalar_limit = ip + mlen + 64; if (scalar_limit > limit_8) scalar_limit = limit_8; while (ip + mlen < scalar_limit) { uint64_t diff = zxc_le64(ip + mlen) ^ zxc_le64(ref + mlen); if (diff == 0) mlen += sizeof(uint64_t); else { mlen += (zxc_ctz64(diff) >> 3); goto _match_len_done; } } // Long match path: Use SIMD for matches exceeding 64 bytes #if defined(ZXC_USE_AVX512) const uint8_t* limit_64 = iend - 64; while (ip + mlen < limit_64) { const __m512i v_src = _mm512_loadu_si512((const void*)(ip + mlen)); const __m512i v_ref = _mm512_loadu_si512((const void*)(ref + mlen)); const __mmask64 mask = _mm512_cmpeq_epi8_mask(v_src, v_ref); if (mask == 0xFFFFFFFFFFFFFFFF) mlen += 64; else { mlen += (uint32_t)zxc_ctz64(~mask); goto _match_len_done; } } #elif defined(ZXC_USE_AVX2) const uint8_t* limit_32 = iend - 32; while (ip + mlen < limit_32) { const __m256i v_src = _mm256_loadu_si256((const __m256i*)(ip + mlen)); const __m256i v_ref = _mm256_loadu_si256((const __m256i*)(ref + mlen)); const __m256i v_cmp = _mm256_cmpeq_epi8(v_src, v_ref); const uint32_t mask = (uint32_t)_mm256_movemask_epi8(v_cmp); if (mask == 0xFFFFFFFF) mlen += 32; else { mlen += zxc_ctz32(~mask); goto _match_len_done; } } #elif defined(ZXC_USE_NEON64) || defined(ZXC_USE_NEON32) const uint8_t* limit_16 = iend - 16; while (ip + mlen < limit_16) { const uint8x16_t v_src = vld1q_u8(ip + mlen); const uint8x16_t v_ref = vld1q_u8(ref + mlen); const uint8x16_t v_cmp = vceqq_u8(v_src, v_ref); #if defined(ZXC_USE_NEON64) if (vminvq_u8(v_cmp) == 0xFF) mlen += 16; else { uint8x16_t v_diff = vmvnq_u8(v_cmp); uint64_t lo = vgetq_lane_u64(vreinterpretq_u64_u8(v_diff), 0); if (lo != 0) mlen += (zxc_ctz64(lo) >> 3); else mlen += 8 + (zxc_ctz64(vgetq_lane_u64(vreinterpretq_u64_u8(v_diff), 1)) >> 3); goto _match_len_done; } #else uint8x8_t p1 = vpmin_u8(vget_low_u8(v_cmp), vget_high_u8(v_cmp)); uint8x8_t p2 = vpmin_u8(p1, p1); uint8x8_t p3 = vpmin_u8(p2, p2); uint8_t min_val = vget_lane_u8(p3, 0); if (min_val == 0xFF) mlen += 16; else { uint8x16_t v_diff = vmvnq_u8(v_cmp); uint64_t lo = (uint64_t)vgetq_lane_u32(vreinterpretq_u32_u8(v_diff), 0) | ((uint64_t)vgetq_lane_u32(vreinterpretq_u32_u8(v_diff), 1) << 32); if (lo != 0) mlen += (zxc_ctz64(lo) >> 3); else mlen += 8 + (zxc_ctz64((uint64_t)vgetq_lane_u32(vreinterpretq_u32_u8(v_diff), 2) | ((uint64_t)vgetq_lane_u32(vreinterpretq_u32_u8(v_diff), 3) << 32)) >> 3); goto _match_len_done; } #endif } #endif while (ip + mlen < limit_8) { const uint64_t diff = zxc_le64(ip + mlen) ^ zxc_le64(ref + mlen); if (diff == 0) mlen += sizeof(uint64_t); else { mlen += (zxc_ctz64(diff) >> 3); goto _match_len_done; } } while (ip + mlen < iend && ref[mlen] == ip[mlen]) mlen++; _match_len_done:; const int better = (mlen > best.len); best.len = better ? mlen : best.len; best.ref = better ? ref : best.ref; if (UNLIKELY(best.len >= (uint32_t)p.sufficient_len || ip + best.len >= iend)) break; } const uint16_t delta = chain_table[match_idx]; const uint32_t next_idx = match_idx - delta; ZXC_PREFETCH_READ(src + next_idx); match_idx = (delta != 0) ? next_idx : 0; } if (best.ref) { // Backtrack to extend match backwards const uint8_t* b_ip = ip; const uint8_t* b_ref = best.ref; while (b_ip > anchor && b_ref > src && b_ip[-1] == b_ref[-1]) { b_ip--; b_ref--; best.len++; best.backtrack++; } best.ref = b_ref; } if (p.use_lazy && best.ref && best.len < 128 && ip + 1 < mflimit) { const uint64_t next_val8 = zxc_le64(ip + 1); const uint32_t next_val = (uint32_t)next_val8; const uint32_t h2 = zxc_hash_func(next_val8, use_hash5); const uint32_t next_head = hash_table[2 * h2]; const uint32_t next_stored_tag = hash_table[2 * h2 + 1]; uint32_t next_idx = (next_head & ~offset_mask) == epoch_mark ? (next_head & offset_mask) : 0; const int skip_lazy_head = (next_idx > 0 && next_stored_tag != next_val); uint32_t max_lazy = 0; int lazy_att = p.lazy_attempts; int is_lazy_first = 1; while (next_idx > 0 && lazy_att-- > 0) { if (UNLIKELY((uint32_t)(ip + 1 - src) - next_idx > ZXC_LZ_MAX_DIST)) break; const uint8_t* ref2 = src + next_idx; if ((!is_lazy_first || !skip_lazy_head) && zxc_le32(ref2) == next_val) { uint32_t l2 = sizeof(uint32_t); const uint8_t* limit8 = iend - sizeof(uint64_t); while (ip + 1 + l2 < limit8) { const uint64_t v1 = zxc_le64(ip + 1 + l2); const uint64_t v2 = zxc_le64(ref2 + l2); if (v1 != v2) { l2 += (uint32_t)(zxc_ctz64(v1 ^ v2) >> 3); goto lazy1_done; } l2 += sizeof(uint64_t); } while (ip + 1 + l2 < iend && ref2[l2] == ip[1 + l2]) l2++; lazy1_done: if (l2 > max_lazy) max_lazy = l2; } const uint16_t delta = chain_table[next_idx]; if (UNLIKELY(delta == 0)) break; next_idx -= delta; is_lazy_first = 0; } if (max_lazy > best.len + 1) { best.ref = NULL; } else if (level >= 4 && ip + 2 < mflimit) { const uint64_t val3_8 = zxc_le64(ip + 2); const uint32_t val3 = (uint32_t)val3_8; const uint32_t h3 = zxc_hash_func(val3_8, use_hash5); const uint32_t head3 = hash_table[2 * h3]; const uint32_t tag3 = hash_table[2 * h3 + 1]; uint32_t idx3 = (head3 & ~offset_mask) == epoch_mark ? (head3 & offset_mask) : 0; const int skip_head3 = (idx3 > 0 && tag3 != val3); int is_first3 = 1; uint32_t max_lazy3 = 0; lazy_att = p.lazy_attempts; while (idx3 > 0 && lazy_att-- > 0) { if (UNLIKELY((uint32_t)(ip + 2 - src) - idx3 > ZXC_LZ_MAX_DIST)) break; const uint8_t* ref3 = src + idx3; if ((!is_first3 || !skip_head3) && zxc_le32(ref3) == val3) { uint32_t l3 = sizeof(uint32_t); const uint8_t* limit8_3 = iend - sizeof(uint64_t); while (ip + 2 + l3 < limit8_3) { const uint64_t v1 = zxc_le64(ip + 2 + l3); const uint64_t v2 = zxc_le64(ref3 + l3); if (v1 != v2) { l3 += (uint32_t)(zxc_ctz64(v1 ^ v2) >> 3); goto lazy2_done; } l3 += sizeof(uint64_t); } while (ip + 2 + l3 < iend && ref3[l3] == ip[2 + l3]) l3++; lazy2_done: if (l3 > max_lazy3) max_lazy3 = l3; } const uint16_t delta = chain_table[idx3]; if (UNLIKELY(delta == 0)) break; idx3 -= delta; is_first3 = 0; } if (max_lazy3 > best.len + 2) best.ref = NULL; } } return best; } /** * @brief Encodes a block of numerical data using delta encoding and * bit-packing. * * This function compresses a source buffer of 32-bit integers. It processes the * data in frames defined by `ZXC_NUM_FRAME_SIZE`. * * **Algorithm Steps:** * 1. **Delta Encoding:** Calculates `delta = value[i] - value[i-1]`. This * reduces the magnitude of numbers if the data is sequential or correlated. * - **SIMD Optimization:** Uses AVX2 (`_mm256_sub_epi32`) to compute deltas * for 8 integers at once. * 2. **ZigZag Encoding:** Maps signed deltas to unsigned integers (`(n << 1) ^ * (n >> 31)`). This ensures small negative numbers become small positive * numbers (e.g., -1 -> 1, 1 -> 2). * 3. **Bit Width Calculation:** Finds the maximum value in the frame to * determine the minimum number of bits (`b`) needed to represent all deltas. * 4. **Bit Packing:** Packs the ZigZag-encoded deltas into a compact bitstream * using `b` bits per value. * * @param[in] src Pointer to the source buffer containing raw 32-bit integer data. * @param[in] src_sz Size of the source buffer in bytes. Must be a multiple of 4 * and non-zero. * @param[out] dst Pointer to the destination buffer where compressed data will be * written. * @param[in] dst_cap Capacity of the destination buffer in bytes. * @param[out] out_sz Pointer to a variable where the total size of the compressed * output will be stored. * * @return ZXC_OK on success, or a negative zxc_error_t code (e.g., ZXC_ERROR_DST_TOO_SMALL) if an * error occurs (e.g., invalid input size, destination buffer too small). */ static int zxc_encode_block_num(const zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, size_t dst_cap, size_t* RESTRICT out_sz) { if (UNLIKELY(src_sz % sizeof(uint32_t) != 0 || src_sz == 0 || dst_cap < ZXC_BLOCK_HEADER_SIZE + ZXC_NUM_HEADER_BINARY_SIZE)) return ZXC_ERROR_DST_TOO_SMALL; const size_t count = src_sz / sizeof(uint32_t); zxc_block_header_t bh = {.block_type = ZXC_BLOCK_NUM}; uint8_t* p_curr = dst + ZXC_BLOCK_HEADER_SIZE; size_t rem = dst_cap - ZXC_BLOCK_HEADER_SIZE; const zxc_num_header_t nh = {.n_values = count, .frame_size = ZXC_NUM_FRAME_SIZE}; const int hs = zxc_write_num_header(p_curr, rem, &nh); if (UNLIKELY(hs < 0)) return hs; p_curr += hs; rem -= hs; uint32_t deltas[ZXC_NUM_FRAME_SIZE]; const uint8_t* in_ptr = src; uint32_t prev = 0; for (size_t i = 0; i < count; i += ZXC_NUM_FRAME_SIZE) { const size_t frames = (count - i < ZXC_NUM_FRAME_SIZE) ? (count - i) : ZXC_NUM_FRAME_SIZE; uint32_t max_d = 0; const uint32_t base = prev; size_t j = 0; #if defined(ZXC_USE_AVX512) if (frames >= 16) { __m512i v_max_accum = _mm512_setzero_si512(); // Initialize max accumulator to 0 for (; j < (frames & ~15); j += 16) { if (UNLIKELY(i == 0 && j == 0)) goto _scalar; // Load 16 consecutive integers const __m512i vc = _mm512_loadu_si512((const void*)(in_ptr + j * 4)); // Load 16 integers offset by -1 to get previous values const __m512i vp = _mm512_loadu_si512((const void*)(in_ptr + j * 4 - 4)); const __m512i diff = _mm512_sub_epi32(vc, vp); // Compute deltas: curr - prev // ZigZag encode: (diff << 1) ^ (diff >> 31) const __m512i zigzag = _mm512_xor_si512(_mm512_slli_epi32(diff, 1), _mm512_srai_epi32(diff, 31)); _mm512_storeu_si512((void*)&deltas[j], zigzag); // Store results v_max_accum = _mm512_max_epu32(v_max_accum, zigzag); // Update max value seen so far } max_d = _mm512_reduce_max_epu32(v_max_accum); // Horizontal max reduction if (j > 0) prev = zxc_le32(in_ptr + (j - 1) * 4); } #elif defined(ZXC_USE_AVX2) if (frames >= 8) { __m256i v_max_accum = _mm256_setzero_si256(); // Initialize max accumulator to 0 for (; j < (frames & ~7); j += 8) { if (UNLIKELY(i == 0 && j == 0)) goto _scalar; // Load 8 consecutive integers const __m256i vc = _mm256_loadu_si256((const __m256i*)(in_ptr + j * 4)); // Load 8 integers offset by -1 const __m256i vp = _mm256_loadu_si256((const __m256i*)(in_ptr + j * 4 - 4)); const __m256i diff = _mm256_sub_epi32(vc, vp); // Compute deltas // ZigZag encode: (diff << 1) ^ (diff >> 31) const __m256i zigzag = _mm256_xor_si256(_mm256_slli_epi32(diff, 1), _mm256_srai_epi32(diff, 31)); _mm256_storeu_si256((__m256i*)&deltas[j], zigzag); // Store results v_max_accum = _mm256_max_epu32(v_max_accum, zigzag); // Update max accumulator } max_d = zxc_mm256_reduce_max_epu32(v_max_accum); // Horizontal max reduction if (j > 0) { prev = zxc_le32(in_ptr + (j - 1) * 4); } } #elif defined(ZXC_USE_NEON64) || defined(ZXC_USE_NEON32) // NEON processes 128-bit vectors (4 uint32 integers) if (frames >= 4) { uint32x4_t v_max_accum = vdupq_n_u32(0); // Initialize vector with zeros for (; j < (frames & ~3); j += 4) { if (UNLIKELY(i == 0 && j == 0)) goto _scalar; // Load 4 32-bit integers const uint32x4_t vc = vld1q_u32((const uint32_t*)(in_ptr + j * 4)); const uint32x4_t vp = vld1q_u32((const uint32_t*)(in_ptr + j * 4 - 4)); const uint32x4_t diff = vsubq_u32(vc, vp); // Calc deltas // ZigZag encode: (diff << 1) ^ (diff >> 31) const uint32x4_t z1 = vshlq_n_u32(diff, 1); // Arithmetic shift right to duplicate sign bit const uint32x4_t z2 = vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_u32(diff), 31)); const uint32x4_t zigzag = veorq_u32(z1, z2); vst1q_u32(&deltas[j], zigzag); // Store results v_max_accum = vmaxq_u32(v_max_accum, zigzag); // Update max accumulator } #if defined(ZXC_USE_NEON64) max_d = vmaxvq_u32(v_max_accum); // Reduce vector to single max value (AArch64) #else // NEON 32-bit (ARMv7) fallback for horizontal max using standard shifts // Reduce 4 elements -> 2 uint32x4_t v_swap = vextq_u32(v_max_accum, v_max_accum, 2); // Swap low/high 64-bit halves uint32x4_t v_max2 = vmaxq_u32(v_max_accum, v_swap); // Reduce 2 -> 1 v_swap = vextq_u32(v_max2, v_max2, 1); // Shift by 32 bits uint32x4_t v_max1 = vmaxq_u32(v_max2, v_swap); max_d = vgetq_lane_u32(v_max1, 0); #endif if (j > 0) prev = zxc_le32(in_ptr + (j - 1) * sizeof(uint32_t)); } #endif #if defined(ZXC_USE_AVX2) || defined(ZXC_USE_AVX512) || defined(ZXC_USE_NEON64) || \ defined(ZXC_USE_NEON32) _scalar: #endif for (; j < frames; j++) { const uint32_t v = zxc_le32(in_ptr + j * sizeof(uint32_t)); const uint32_t diff = zxc_zigzag_encode((int32_t)(v - prev)); deltas[j] = diff; if (diff > max_d) max_d = diff; prev = v; } in_ptr += frames * sizeof(uint32_t); const uint8_t bits = zxc_highbit32(max_d); const size_t packed = ((frames * bits) + ZXC_BITS_PER_BYTE - 1) / ZXC_BITS_PER_BYTE; if (UNLIKELY(rem < ZXC_NUM_CHUNK_HEADER_SIZE + packed + sizeof(uint32_t))) return ZXC_ERROR_DST_TOO_SMALL; zxc_store_le16(p_curr, (uint16_t)frames); zxc_store_le16(p_curr + 2, bits); zxc_store_le64(p_curr + 4, (uint64_t)base); zxc_store_le32(p_curr + 12, (uint32_t)packed); p_curr += ZXC_NUM_CHUNK_HEADER_SIZE; rem -= ZXC_NUM_CHUNK_HEADER_SIZE; const int pb = zxc_bitpack_stream_32(deltas, frames, p_curr, rem, bits); if (UNLIKELY(pb < 0)) return pb; p_curr += pb; rem -= pb; } bh.comp_size = (uint32_t)(p_curr - (dst + ZXC_BLOCK_HEADER_SIZE)); const int hw = zxc_write_block_header(dst, dst_cap, &bh); if (UNLIKELY(hw < 0)) return hw; // Checksum will be appended by the wrapper *out_sz = ZXC_BLOCK_HEADER_SIZE + bh.comp_size; return ZXC_OK; } /** * @brief Encodes a data block using the General (GLO) compression format. * * This function implements the core LZ77 compression logic. It dynamically * adjusts compression parameters (search depth, lazy matching strategy, and * step skipping) based on the compression level configured in the context. * * **LZ77 Implementation Details:** * 1. **Hash Chain:** Uses a hash table (`ctx->hash_table`) to find potential * match positions. Collisions are handled via a `chain_table`, allowing us to * search deeper into the history for a better match. * 2. **Lazy Matching:** If a match is found, we check the *next* byte to see if * it produces a longer match. If so, we output a literal and take the better * match. This is enabled for levels >= 3. * 3. **Step Skipping:** For lower levels (1-3), we skip bytes when updating the * hash table to increase speed (`step > 1`). For levels 4+, we process every * byte to maximize compression ratio. * 4. **SIMD Match Finding:** Uses AVX2/AVX512/NEON to compare 32/64 bytes at a * time during match length calculation, significantly speeding up long match * verification. * 5. **RLE Detection:** Analyzes literals to see if Run-Length Encoding would * be beneficial (saving > 10% space). * * The encoding process consists of: * 1. **LZ77 Parsing**: The function iterates through the source data, * maintaining a hash chain to find repeated patterns (matches). It supports * "Lazy Matching" for higher compression levels to optimize match selection. * 2. **Sequence Storage**: Matches are converted into sequences consisting of * literal lengths, match lengths, and offsets. * 3. **Bitpacking & Serialization**: The sequences are analyzed to determine * optimal bit-widths. The function then writes the block header, encodes * literals (using Raw or RLE encoding), and bit-packs the sequence streams into * the destination buffer. * * @param[in,out] ctx Pointer to the compression context containing hash tables * and configuration. * @param[in] src Pointer to the input source data. * @param[in] src_sz Size of the input data in bytes. * @param[out] dst Pointer to the destination buffer where compressed data will * be written. * @param[in] dst_cap Maximum capacity of the destination buffer. * @param[out] out_sz [Out] Pointer to a variable that will receive the total size * of the compressed output. * * @return ZXC_OK on success, or a negative zxc_error_t code (e.g., ZXC_ERROR_DST_TOO_SMALL) if an * error occurs (e.g., buffer overflow). */ static int zxc_encode_block_glo(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, size_t dst_cap, size_t* RESTRICT out_sz) { const int level = ctx->compression_level; const zxc_lz77_params_t lzp = zxc_get_lz77_params(level); ctx->epoch++; if (UNLIKELY(ctx->epoch >= ctx->max_epoch)) { ZXC_MEMSET(ctx->hash_table, 0, 2 * ZXC_LZ_HASH_SIZE * sizeof(uint32_t)); ctx->epoch = 1; } const uint32_t epoch_mark = ctx->epoch << ctx->offset_bits; const uint8_t *ip = src, *iend = src + src_sz, *anchor = ip, *mflimit = iend - 12; uint32_t* const hash_table = ctx->hash_table; uint16_t* const chain_table = ctx->chain_table; uint8_t* const literals = ctx->literals; uint8_t* const buf_tokens = ctx->buf_tokens; uint16_t* const buf_offsets = ctx->buf_offsets; uint8_t* const buf_extras = ctx->buf_extras; uint32_t seq_c = 0; size_t lit_c = 0; size_t extras_sz = 0; uint16_t max_offset = 0; // Track max offset for 1-byte/2-byte mode decision while (LIKELY(ip < mflimit)) { const size_t dist = (size_t)(ip - anchor); size_t step = lzp.step_base + (dist >> lzp.step_shift); if (UNLIKELY(ip + step >= mflimit)) step = 1; ZXC_PREFETCH_READ(ip + step * 4 + ZXC_CACHE_LINE_SIZE); const zxc_match_t m = zxc_lz77_find_best_match(src, ip, iend, mflimit, anchor, hash_table, chain_table, epoch_mark, ctx->offset_mask, level, lzp); if (m.ref) { ip -= m.backtrack; const uint32_t ll = (uint32_t)(ip - anchor); const uint32_t ml = (uint32_t)(m.len - ZXC_LZ_MIN_MATCH_LEN); const uint32_t off = (uint32_t)(ip - m.ref); if (ll > 0) { if (ll <= 16 && anchor + 16 <= iend) zxc_copy16(literals + lit_c, anchor); else if (ll <= 32 && anchor + 32 <= iend) zxc_copy32(literals + lit_c, anchor); else ZXC_MEMCPY(literals + lit_c, anchor, ll); lit_c += ll; } const uint8_t ll_code = (ll >= ZXC_TOKEN_LL_MASK) ? ZXC_TOKEN_LL_MASK : (uint8_t)ll; const uint8_t ml_code = (ml >= ZXC_TOKEN_ML_MASK) ? ZXC_TOKEN_ML_MASK : (uint8_t)ml; buf_tokens[seq_c] = (ll_code << ZXC_TOKEN_LIT_BITS) | ml_code; buf_offsets[seq_c] = (uint16_t)(off - ZXC_LZ_OFFSET_BIAS); if ((off - ZXC_LZ_OFFSET_BIAS) > max_offset) max_offset = (uint16_t)(off - ZXC_LZ_OFFSET_BIAS); if (ll >= ZXC_TOKEN_LL_MASK) { extras_sz += zxc_write_varint(buf_extras + extras_sz, ll - ZXC_TOKEN_LL_MASK); } if (ml >= ZXC_TOKEN_ML_MASK) { extras_sz += zxc_write_varint(buf_extras + extras_sz, ml - ZXC_TOKEN_ML_MASK); } seq_c++; if (m.len > 2 && level > 4) { const uint8_t* match_end = ip + m.len; if (match_end < iend - 7) { const uint32_t pos_u = (uint32_t)((match_end - 2) - src); const uint64_t val_u8 = zxc_le64(match_end - 2); const uint32_t val_u = (uint32_t)val_u8; const uint32_t h_u = zxc_hash_func(val_u8, 1); // Only for level > 4, uses hash5 const uint32_t prev_head = hash_table[2 * h_u]; const uint32_t prev_idx = (prev_head & ~ctx->offset_mask) == epoch_mark ? (prev_head & ctx->offset_mask) : 0; hash_table[2 * h_u] = epoch_mark | pos_u; hash_table[2 * h_u + 1] = val_u; chain_table[pos_u] = (prev_idx > 0 && (pos_u - prev_idx) < ZXC_LZ_WINDOW_SIZE) ? (uint16_t)(pos_u - prev_idx) : 0; } } ip += m.len; anchor = ip; } else { ip += step; } } const size_t last_lits = iend - anchor; if (last_lits > 0) { ZXC_MEMCPY(literals + lit_c, anchor, last_lits); lit_c += last_lits; } // --- RLE ANALYSIS --- size_t rle_size = 0; int use_rle = 0; if (lit_c > 0) { const uint8_t* p = literals; const uint8_t* const p_end = literals + lit_c; const uint8_t* const p_end_4 = p_end - 3; // Safe limit for 4-byte lookahead while (LIKELY(p < p_end)) { const uint8_t b = *p; const uint8_t* run_start = p++; // Fast run counting with early SIMD exit #if defined(ZXC_USE_AVX512) const __m512i vb = _mm512_set1_epi8((char)b); while (p <= p_end - 64) { const __m512i v = _mm512_loadu_si512((const void*)p); const __mmask64 mask = _mm512_cmpeq_epi8_mask(v, vb); if (mask != 0xFFFFFFFFFFFFFFFFULL) { p += (size_t)zxc_ctz64(~mask); goto _run_done; } p += 64; } #elif defined(ZXC_USE_AVX2) const __m256i vb = _mm256_set1_epi8((char)b); while (p <= p_end - 32) { const __m256i v = _mm256_loadu_si256((const __m256i*)p); const uint32_t mask = (uint32_t)_mm256_movemask_epi8(_mm256_cmpeq_epi8(v, vb)); if (mask != 0xFFFFFFFF) { p += zxc_ctz32(~mask); goto _run_done; } p += 32; } #elif defined(ZXC_USE_NEON64) const uint8x16_t vb = vdupq_n_u8(b); while (p <= p_end - 16) { const uint8x16_t v = vld1q_u8(p); const uint8x16_t eq = vceqq_u8(v, vb); const uint8x16_t not_eq = vmvnq_u8(eq); const uint64_t lo = vgetq_lane_u64(vreinterpretq_u64_u8(not_eq), 0); if (lo != 0) { p += (zxc_ctz64(lo) >> 3); goto _run_done; } uint64_t hi = vgetq_lane_u64(vreinterpretq_u64_u8(not_eq), 1); if (hi != 0) { p += 8 + (zxc_ctz64(hi) >> 3); goto _run_done; } p += 16; } #elif defined(ZXC_USE_NEON32) uint8x16_t vb = vdupq_n_u8(b); while (p <= p_end - 16) { uint8x16_t v = vld1q_u8(p); uint8x16_t eq = vceqq_u8(v, vb); uint8x16_t not_eq = vmvnq_u8(eq); // 32-bit ARM NEON doesn't always support vgetq_lane_u64 / vreinterpretq_u64_u8 so // we treat the 128-bit vector as 4 x 32-bit lanes */ const uint32x4_t neq32 = vreinterpretq_u32_u8(not_eq); const uint32_t l0 = vgetq_lane_u32(neq32, 0); const uint32_t l1 = vgetq_lane_u32(neq32, 1); const uint64_t lo = ((uint64_t)l1 << 32) | l0; if (lo != 0) { p += (size_t)(zxc_ctz64(lo) >> 3); goto _run_done; } const uint32_t h0 = vgetq_lane_u32(neq32, 2); const uint32_t h1 = vgetq_lane_u32(neq32, 3); const uint64_t hi = ((uint64_t)h1 << 32) | h0; if (hi != 0) { p += 8 + (zxc_ctz64(hi) >> 3); goto _run_done; } p += 16; } #endif while (p < p_end && *p == b) p++; #if defined(ZXC_USE_AVX512) || defined(ZXC_USE_AVX2) || defined(ZXC_USE_NEON64) || \ defined(ZXC_USE_NEON32) _run_done:; #endif const size_t run = (size_t)(p - run_start); if (run >= 4) { // RLE run: 2 bytes per 131 values, then remainder // Branchless: full_chunks * 2 + remainder handling const size_t full_chunks = run / 131; const size_t rem = run - full_chunks * 131; // Avoid modulo rle_size += full_chunks * 2; // Remainder: if >= 4 -> 2 bytes (RLE), else 1 + rem (literal) if (rem >= 4) rle_size += 2; else if (rem > 0) rle_size += 1 + rem; } else { // Literal run: scan ahead with fast SIMD lookahead const uint8_t* lit_start = run_start; #if defined(ZXC_USE_AVX512) while (p <= p_end_4 - 64) { const __m512i v0 = _mm512_loadu_si512((const void*)p); const __m512i v1 = _mm512_loadu_si512((const void*)(p + 1)); const __m512i v2 = _mm512_loadu_si512((const void*)(p + 2)); const __m512i v3 = _mm512_loadu_si512((const void*)(p + 3)); const __mmask64 mask = _mm512_cmpeq_epi8_mask(v0, v1) & _mm512_cmpeq_epi8_mask(v1, v2) & _mm512_cmpeq_epi8_mask(v2, v3); if (mask != 0) { p += (size_t)zxc_ctz64(mask); goto _lit_done; } p += 64; } #elif defined(ZXC_USE_AVX2) while (p <= p_end_4 - 32) { __m256i v0 = _mm256_loadu_si256((const __m256i*)p); __m256i v1 = _mm256_loadu_si256((const __m256i*)(p + 1)); __m256i v2 = _mm256_loadu_si256((const __m256i*)(p + 2)); __m256i v3 = _mm256_loadu_si256((const __m256i*)(p + 3)); __m256i vend = _mm256_and_si256( _mm256_cmpeq_epi8(v0, v1), _mm256_and_si256(_mm256_cmpeq_epi8(v1, v2), _mm256_cmpeq_epi8(v2, v3))); uint32_t mask = (uint32_t)_mm256_movemask_epi8(vend); if (mask != 0) { p += zxc_ctz32(mask); goto _lit_done; } p += 32; } #elif defined(ZXC_USE_NEON64) while (p <= p_end_4 - 16) { uint8x16_t v0 = vld1q_u8(p); uint8x16_t v1 = vld1q_u8(p + 1); uint8x16_t v2 = vld1q_u8(p + 2); uint8x16_t v3 = vld1q_u8(p + 3); uint8x16_t eq = vandq_u8(vceqq_u8(v0, v1), vandq_u8(vceqq_u8(v1, v2), vceqq_u8(v2, v3))); uint64_t lo = vgetq_lane_u64(vreinterpretq_u64_u8(eq), 0); if (lo != 0) { p += (zxc_ctz64(lo) >> 3); goto _lit_done; } uint64_t hi = vgetq_lane_u64(vreinterpretq_u64_u8(eq), 1); if (hi != 0) { p += 8 + (zxc_ctz64(hi) >> 3); goto _lit_done; } p += 16; } #elif defined(ZXC_USE_NEON32) while (p <= p_end_4 - 16) { uint8x16_t v0 = vld1q_u8(p); uint8x16_t v1 = vld1q_u8(p + 1); uint8x16_t v2 = vld1q_u8(p + 2); uint8x16_t v3 = vld1q_u8(p + 3); uint8x16_t eq = vandq_u8(vceqq_u8(v0, v1), vandq_u8(vceqq_u8(v1, v2), vceqq_u8(v2, v3))); uint32x4_t eq32 = vreinterpretq_u32_u8(eq); uint32_t l0 = vgetq_lane_u32(eq32, 0); uint32_t l1 = vgetq_lane_u32(eq32, 1); uint64_t lo = ((uint64_t)l1 << 32) | l0; if (lo != 0) { p += (zxc_ctz64(lo) >> 3); goto _lit_done; } uint32_t h0 = vgetq_lane_u32(eq32, 2); uint32_t h1 = vgetq_lane_u32(eq32, 3); uint64_t hi = ((uint64_t)h1 << 32) | h0; if (hi != 0) { p += 8 + (zxc_ctz64(hi) >> 3); goto _lit_done; } p += 16; } #endif while (p < p_end_4) { // Check for RLE opportunity (4 identical bytes) if (UNLIKELY(p[0] == p[1] && p[1] == p[2] && p[2] == p[3])) break; p++; } // Handle remaining bytes near end while (p < p_end) { if (UNLIKELY(p + 3 < p_end && p[0] == p[1] && p[1] == p[2] && p[2] == p[3])) break; p++; } #if defined(ZXC_USE_AVX512) || defined(ZXC_USE_AVX2) || defined(ZXC_USE_NEON64) || \ defined(ZXC_USE_NEON32) _lit_done:; #endif const size_t lit_run = (size_t)(p - lit_start); // 1 header per 128 bytes + all data bytes // lit_run + ceil(lit_run / 128) rle_size += lit_run + ((lit_run + 127) >> 7); } } // Threshold: ~3% savings using integer math (97% ~= 1 - 1/32) if (rle_size < lit_c - (lit_c >> 5)) use_rle = 1; } zxc_block_header_t bh = {.block_type = ZXC_BLOCK_GLO}; uint8_t* const p = dst + ZXC_BLOCK_HEADER_SIZE; size_t rem = dst_cap - ZXC_BLOCK_HEADER_SIZE; // Decide offset encoding mode: 1-byte if all offsets <= 255 const int use_8bit_off = (max_offset <= 255) ? 1 : 0; const size_t off_stream_size = use_8bit_off ? seq_c : (seq_c * 2); const zxc_gnr_header_t gh = {.n_sequences = seq_c, .n_literals = (uint32_t)lit_c, .enc_lit = (uint8_t)use_rle, .enc_litlen = 0, .enc_mlen = 0, .enc_off = (uint8_t)use_8bit_off}; zxc_section_desc_t desc[ZXC_GLO_SECTIONS] = {0}; desc[0].sizes = (uint64_t)(use_rle ? rle_size : lit_c) | ((uint64_t)lit_c << 32); desc[1].sizes = (uint64_t)seq_c | ((uint64_t)seq_c << 32); desc[2].sizes = (uint64_t)off_stream_size | ((uint64_t)off_stream_size << 32); desc[3].sizes = (uint64_t)extras_sz | ((uint64_t)extras_sz << 32); const int ghs = zxc_write_glo_header_and_desc(p, rem, &gh, desc); if (UNLIKELY(ghs < 0)) return ghs; uint8_t* p_curr = p + ghs; rem -= ghs; // Extract stream sizes once const size_t sz_lit = (size_t)(desc[0].sizes & ZXC_SECTION_SIZE_MASK); const size_t sz_tok = (size_t)(desc[1].sizes & ZXC_SECTION_SIZE_MASK); const size_t sz_off = (size_t)(desc[2].sizes & ZXC_SECTION_SIZE_MASK); const size_t sz_ext = (size_t)(desc[3].sizes & ZXC_SECTION_SIZE_MASK); if (UNLIKELY(rem < sz_lit)) return ZXC_ERROR_DST_TOO_SMALL; if (use_rle) { // Write RLE - optimized single-pass encoding const uint8_t* lit_ptr = literals; const uint8_t* const lit_end = literals + lit_c; while (lit_ptr < lit_end) { uint8_t b = *lit_ptr; const uint8_t* run_start = lit_ptr++; // Count run length while (lit_ptr < lit_end && *lit_ptr == b) lit_ptr++; size_t run = (size_t)(lit_ptr - run_start); if (run >= 4) { // RLE runs: emit 2-byte tokens (header + value) while (run >= 4) { size_t chunk = (run > 131) ? 131 : run; *p_curr++ = (uint8_t)(ZXC_LIT_RLE_FLAG | (chunk - 4)); *p_curr++ = b; run -= chunk; } // Leftover < 4 bytes: emit as literal if (run > 0) { *p_curr++ = (uint8_t)(run - 1); ZXC_MEMCPY(p_curr, lit_ptr - run, run); p_curr += run; } } else { // Literal run: scan ahead to find next RLE opportunity const uint8_t* lit_run_start = run_start; while (lit_ptr < lit_end) { // Quick check: need 4 identical bytes to break if (UNLIKELY(lit_ptr + 3 < lit_end && lit_ptr[0] == lit_ptr[1] && lit_ptr[1] == lit_ptr[2] && lit_ptr[2] == lit_ptr[3])) { break; } lit_ptr++; } size_t lit_run = (size_t)(lit_ptr - lit_run_start); const uint8_t* src_ptr = lit_run_start; // Emit literal chunks (max 128 bytes each) while (lit_run > 0) { size_t chunk = (lit_run > 128) ? 128 : lit_run; *p_curr++ = (uint8_t)(chunk - 1); ZXC_MEMCPY(p_curr, src_ptr, chunk); p_curr += chunk; src_ptr += chunk; lit_run -= chunk; } } } } else { ZXC_MEMCPY(p_curr, literals, lit_c); p_curr += lit_c; } rem -= sz_lit; if (UNLIKELY(rem < sz_tok)) return ZXC_ERROR_DST_TOO_SMALL; ZXC_MEMCPY(p_curr, buf_tokens, seq_c); p_curr += seq_c; rem -= sz_tok; if (UNLIKELY(rem < sz_off)) return ZXC_ERROR_DST_TOO_SMALL; if (use_8bit_off) { // Write 1-byte offsets - unroll for better throughput uint32_t i = 0; for (; i + 8 <= seq_c; i += 8) { p_curr[0] = (uint8_t)buf_offsets[i + 0]; p_curr[1] = (uint8_t)buf_offsets[i + 1]; p_curr[2] = (uint8_t)buf_offsets[i + 2]; p_curr[3] = (uint8_t)buf_offsets[i + 3]; p_curr[4] = (uint8_t)buf_offsets[i + 4]; p_curr[5] = (uint8_t)buf_offsets[i + 5]; p_curr[6] = (uint8_t)buf_offsets[i + 6]; p_curr[7] = (uint8_t)buf_offsets[i + 7]; p_curr += 8; } for (; i < seq_c; i++) { *p_curr++ = (uint8_t)buf_offsets[i]; } } else { // Write 2-byte offsets in little-endian order #ifdef ZXC_BIG_ENDIAN for (uint32_t i = 0; i < seq_c; i++) { zxc_store_le16(p_curr, buf_offsets[i]); p_curr += sizeof(uint16_t); } #else ZXC_MEMCPY(p_curr, buf_offsets, seq_c * sizeof(uint16_t)); p_curr += seq_c * sizeof(uint16_t); #endif } rem -= sz_off; if (UNLIKELY(rem < sz_ext)) return ZXC_ERROR_DST_TOO_SMALL; ZXC_MEMCPY(p_curr, buf_extras, extras_sz); p_curr += extras_sz; bh.comp_size = (uint32_t)(p_curr - (dst + ZXC_BLOCK_HEADER_SIZE)); const int hw = zxc_write_block_header(dst, dst_cap, &bh); if (UNLIKELY(hw < 0)) return hw; // Checksum will be appended by the wrapper *out_sz = ZXC_BLOCK_HEADER_SIZE + bh.comp_size; return ZXC_OK; } /** * @brief Encodes a data block using the General High Velocity (GHI) compression format. * * 1. Compression Strategy * It uses an LZ77-based algorithm with a sliding window (64KB) and a hash table/chain table * mechanism. * * 2. Token Format (Fixed-Width) * Unlike the standard GLO block which uses 1-byte tokens (4-bit literal length / 4-bit match * length), GHI uses 4-byte (32-bit) sequence records for better performance on long runs: * Literal Length (LL): 8 bits (stores 0-254; 255 indicates overflow). * Match Length (ML): 8 bits (stores 0-254; 255 indicates overflow). * Offset: 16 bits (supports the full 64KB window). * This format minimizes the number of expensive VByte reads during decompression for common * sequences where lengths are between 16 and 255. * * @param[in,out] ctx Pointer to the compression context containing hash tables * and configuration. * @param[in] src Pointer to the input source data. * @param[in] src_sz Size of the input data in bytes. * @param[out] dst Pointer to the destination buffer where compressed data will * be written. * @param[in] dst_cap Maximum capacity of the destination buffer. * @param[out] out_sz Pointer to a variable that will receive the total size * of the compressed output. * * @return ZXC_OK on success, or a negative zxc_error_t code (e.g., ZXC_ERROR_DST_TOO_SMALL) if an * error occurs (e.g., buffer overflow). */ static int zxc_encode_block_ghi(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap, size_t* RESTRICT const out_sz) { const int level = ctx->compression_level; const zxc_lz77_params_t lzp = zxc_get_lz77_params(level); ctx->epoch++; if (UNLIKELY(ctx->epoch >= ctx->max_epoch)) { ZXC_MEMSET(ctx->hash_table, 0, 2 * ZXC_LZ_HASH_SIZE * sizeof(uint32_t)); ctx->epoch = 1; } const uint32_t epoch_mark = ctx->epoch << ctx->offset_bits; const uint8_t *ip = src, *iend = src + src_sz, *anchor = ip, *mflimit = iend - 12; uint32_t* const hash_table = ctx->hash_table; uint8_t* const buf_extras = ctx->buf_extras; uint16_t* const chain_table = ctx->chain_table; uint8_t* const literals = ctx->literals; uint32_t* const buf_sequences = ctx->buf_sequences; uint32_t seq_c = 0; size_t extras_c = 0; size_t lit_c = 0; uint16_t max_offset = 0; while (LIKELY(ip < mflimit)) { size_t dist = (size_t)(ip - anchor); size_t step = lzp.step_base + (dist >> lzp.step_shift); if (UNLIKELY(ip + step >= mflimit)) step = 1; ZXC_PREFETCH_READ(ip + step * 4 + 64); const zxc_match_t m = zxc_lz77_find_best_match(src, ip, iend, mflimit, anchor, hash_table, chain_table, epoch_mark, ctx->offset_mask, level, lzp); if (m.ref) { ip -= m.backtrack; const uint32_t ll = (uint32_t)(ip - anchor); const uint32_t ml = (uint32_t)(m.len - ZXC_LZ_MIN_MATCH_LEN); const uint32_t off = (uint32_t)(ip - m.ref); if (ll > 0) { if (ll <= 16 && anchor + 16 <= iend) zxc_copy16(literals + lit_c, anchor); else if (ll <= 32 && anchor + 32 <= iend) zxc_copy32(literals + lit_c, anchor); else ZXC_MEMCPY(literals + lit_c, anchor, ll); lit_c += ll; } const uint32_t ll_write = (ll >= ZXC_SEQ_LL_MASK) ? 255U : ll; const uint32_t ml_write = (ml >= ZXC_SEQ_ML_MASK) ? 255U : ml; const uint32_t seq_val = (ll_write << (ZXC_SEQ_ML_BITS + ZXC_SEQ_OFF_BITS)) | (ml_write << ZXC_SEQ_OFF_BITS) | ((off - ZXC_LZ_OFFSET_BIAS) & ZXC_SEQ_OFF_MASK); if ((off - ZXC_LZ_OFFSET_BIAS) > max_offset) max_offset = (uint16_t)(off - ZXC_LZ_OFFSET_BIAS); buf_sequences[seq_c] = seq_val; seq_c++; if (ll >= ZXC_SEQ_LL_MASK) { extras_c += zxc_write_varint(buf_extras + extras_c, ll - ZXC_SEQ_LL_MASK); } if (ml >= ZXC_SEQ_ML_MASK) { extras_c += zxc_write_varint(buf_extras + extras_c, ml - ZXC_SEQ_ML_MASK); } ip += m.len; anchor = ip; } else { ip += step; } } const size_t last_lits = iend - anchor; if (last_lits > 0) { ZXC_MEMCPY(literals + lit_c, anchor, last_lits); lit_c += last_lits; } zxc_block_header_t bh = {.block_type = ZXC_BLOCK_GHI}; uint8_t* const p = dst + ZXC_BLOCK_HEADER_SIZE; size_t rem = dst_cap - ZXC_BLOCK_HEADER_SIZE; // Decide offset encoding mode const zxc_gnr_header_t gh = {.n_sequences = seq_c, .n_literals = (uint32_t)lit_c, .enc_lit = 0, .enc_litlen = 0, .enc_mlen = 0, .enc_off = (uint8_t)(max_offset <= 255) ? 1 : 0}; zxc_section_desc_t desc[ZXC_GHI_SECTIONS] = {0}; desc[0].sizes = (uint64_t)lit_c | ((uint64_t)lit_c << 32); size_t sz_seqs = seq_c * sizeof(uint32_t); desc[1].sizes = (uint64_t)sz_seqs | ((uint64_t)sz_seqs << 32); desc[2].sizes = (uint64_t)extras_c | ((uint64_t)extras_c << 32); const int ghs = zxc_write_ghi_header_and_desc(p, rem, &gh, desc); if (UNLIKELY(ghs < 0)) return ghs; uint8_t* p_curr = p + ghs; rem -= ghs; // Extract stream sizes once const size_t sz_lit = (size_t)(desc[0].sizes & ZXC_SECTION_SIZE_MASK); const size_t sz_seq = (size_t)(desc[1].sizes & ZXC_SECTION_SIZE_MASK); const size_t sz_ext = (size_t)(desc[2].sizes & ZXC_SECTION_SIZE_MASK); if (UNLIKELY(rem < sz_lit + sz_seq + sz_ext)) return ZXC_ERROR_DST_TOO_SMALL; ZXC_MEMCPY(p_curr, literals, lit_c); p_curr += lit_c; rem -= sz_lit; if (UNLIKELY(rem < sz_seq)) return ZXC_ERROR_DST_TOO_SMALL; // Write sequences in little-endian order #ifdef ZXC_BIG_ENDIAN for (uint32_t i = 0; i < seq_c; i++) { zxc_store_le32(p_curr, buf_sequences[i]); p_curr += sizeof(uint32_t); } #else ZXC_MEMCPY(p_curr, buf_sequences, sz_seq); p_curr += sz_seq; #endif // --- WRITE EXTRAS --- ZXC_MEMCPY(p_curr, buf_extras, sz_ext); p_curr += sz_ext; bh.comp_size = (uint32_t)(p_curr - (dst + ZXC_BLOCK_HEADER_SIZE)); const int hw = zxc_write_block_header(dst, dst_cap, &bh); if (UNLIKELY(hw < 0)) return hw; // Checksum will be appended by the wrapper *out_sz = ZXC_BLOCK_HEADER_SIZE + bh.comp_size; return ZXC_OK; } /** * @brief Encodes a raw data block (uncompressed). * * This function prepares and writes a "RAW" type block into the destination * buffer. It handles the block header, copying of source data, and optionally * the calculation and storage of a checksum. * * @param[in] src Pointer to the source data to encode. * @param[in] src_sz Size of the source data in bytes. * @param[out] dst Pointer to the destination buffer. * @param[in] dst_cap Maximum capacity of the destination buffer. * @param[out] out_sz Pointer to a variable receiving the total written size * (header * + data + checksum). * @param[in] chk Boolean flag: if non-zero, a checksum is calculated and added. * * @return ZXC_OK on success, or a negative zxc_error_t code (e.g., ZXC_ERROR_DST_TOO_SMALL) if the * destination buffer capacity is insufficient. */ static int zxc_encode_block_raw(const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT const dst, const size_t dst_cap, size_t* RESTRICT const out_sz) { if (UNLIKELY(dst_cap < ZXC_BLOCK_HEADER_SIZE + src_sz)) return ZXC_ERROR_DST_TOO_SMALL; // Compute block RAW zxc_block_header_t bh; bh.block_type = ZXC_BLOCK_RAW; bh.block_flags = 0; // Checksum flag moved to file header bh.reserved = 0; bh.comp_size = (uint32_t)src_sz; const int hw = zxc_write_block_header(dst, dst_cap, &bh); if (UNLIKELY(hw < 0)) return hw; ZXC_MEMCPY(dst + ZXC_BLOCK_HEADER_SIZE, src, src_sz); // Checksum will be appended by the wrapper *out_sz = ZXC_BLOCK_HEADER_SIZE + src_sz; return ZXC_OK; } /** * @brief Checks if the given byte array represents a numeric value. * * This function examines the provided buffer to determine if it contains * only numeric characters (e.g., ASCII digits '0'-'9'). * * Improved heuristic: * 1. Must be aligned to 4 bytes. * 2. Samples the first 128 integers (more accurate). * 3. Calculates bit width of deltas (fewer bits = better for NUM). * 4. Estimates compression ratio: if NUM would save >20% vs raw, use it. * * @param[in] src Pointer to the input byte array to be checked. * @param[in] size The number of bytes in the input array. * @return int Returns 1 if the array is numeric, 0 otherwise. */ static int zxc_probe_is_numeric(const uint8_t* src, const size_t size) { if (UNLIKELY(size % sizeof(uint32_t) != 0 || size < (4 * sizeof(uint32_t)))) return 0; size_t count = size / sizeof(uint32_t); count = count < 128 ? count : 128; // Sample more values for accuracy uint32_t prev = zxc_le32(src); const uint8_t* p = src + sizeof(uint32_t); uint32_t max_zigzag = 0; uint32_t small_count = 0; // Deltas < 256 (8 bits) uint32_t medium_count = 0; // Deltas < 65536 (16 bits) for (size_t i = 1; i < count; i++) { const uint32_t curr = zxc_le32(p); const int32_t diff = (int32_t)(curr - prev); const uint32_t zigzag = zxc_zigzag_encode(diff); max_zigzag = zigzag > max_zigzag ? zigzag : max_zigzag; small_count += (uint32_t)(zigzag < 256); medium_count += (uint32_t)(zigzag >= 256) & (uint32_t)(zigzag < 65536); prev = curr; p += sizeof(uint32_t); } // Calculate bit width needed for max delta uint32_t bits_needed = 0; uint32_t tmp = max_zigzag; while (tmp > 0) { bits_needed++; tmp >>= 1; } // Estimate compression ratio: // NUM uses ~bits_needed per value, Raw uses 32 bits per value // Worth it if bits_needed <= 20 (saves >37.5%) if (bits_needed <= 16) return 1; if (bits_needed <= 20 && (small_count + medium_count) >= (count * 85) / 100) return 1; // Fallback: if 90% of deltas are small, still use NUM if ((small_count + medium_count) >= (count * 90) / 100) return 1; return 0; } // cppcheck-suppress unusedFunction int zxc_compress_chunk_wrapper(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT chunk, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap) { size_t w = 0; int res = ZXC_OK; int try_num = UNLIKELY(zxc_probe_is_numeric(chunk, src_sz)); if (UNLIKELY(try_num)) { res = zxc_encode_block_num(ctx, chunk, src_sz, dst, dst_cap, &w); if (res != ZXC_OK || w > (src_sz - (src_sz >> 2))) // w > 75% of src_sz try_num = 0; // NUM didn't compress well, try GLO/GHI instead } if (LIKELY(!try_num)) { if (ctx->compression_level <= 2) res = zxc_encode_block_ghi(ctx, chunk, src_sz, dst, dst_cap, &w); else res = zxc_encode_block_glo(ctx, chunk, src_sz, dst, dst_cap, &w); } // Check expansion. W contains Header + Payload. if (UNLIKELY(res != ZXC_OK || w >= src_sz)) { res = zxc_encode_block_raw(chunk, src_sz, dst, dst_cap, &w); if (UNLIKELY(res != ZXC_OK)) return res; } if (ctx->checksum_enabled) { // Calculate checksum on the compressed payload (w currently excludes checksum) // Header is at dst, data starts at dst + ZXC_BLOCK_HEADER_SIZE if (UNLIKELY(w < ZXC_BLOCK_HEADER_SIZE || w + ZXC_BLOCK_CHECKSUM_SIZE > dst_cap)) return ZXC_ERROR_OVERFLOW; uint32_t payload_sz = (uint32_t)(w - ZXC_BLOCK_HEADER_SIZE); uint32_t crc = zxc_checksum(dst + ZXC_BLOCK_HEADER_SIZE, payload_sz, ZXC_CHECKSUM_RAPIDHASH); zxc_store_le32(dst + w, crc); w += ZXC_BLOCK_CHECKSUM_SIZE; } return (int)w; } zxc-0.9.1/src/lib/zxc_decompress.c000066400000000000000000002251771515574030100170750ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_decompress.c * @brief Block-level decompression: NUM / GLO / GHI / RAW decoding with * SIMD-accelerated prefix-sum, bit-unpacking, and overlapping copies. * * Like @ref zxc_compress.c, this file is compiled multiple times with * @c ZXC_FUNCTION_SUFFIX to produce per-ISA variants. */ #include "../../include/zxc_error.h" #include "../../include/zxc_sans_io.h" #include "zxc_internal.h" /* * Function Multi-Versioning Support * If ZXC_FUNCTION_SUFFIX is defined (e.g. _avx2), rename the public entry point. */ #ifdef ZXC_FUNCTION_SUFFIX #define ZXC_CAT_IMPL(x, y) x##y #define ZXC_CAT(x, y) ZXC_CAT_IMPL(x, y) #define zxc_decompress_chunk_wrapper ZXC_CAT(zxc_decompress_chunk_wrapper, ZXC_FUNCTION_SUFFIX) #endif /** * @brief Consumes a specified number of bits from the bit reader buffer without * performing safety checks. * * This function advances the bit reader's state by `n` bits. It is marked as * always inline for performance critical paths. * * @warning This is a "fast" variant, meaning it assumes the buffer has enough * bits available. The caller is responsible for ensuring that at least `n` bits * are present in the accumulator or buffer before calling this function to * avoid undefined behavior or reading past valid memory. * * @param[in,out] br Pointer to the bit reader instance. * @param[in] n The number of bits to consume (must be <= 32, typically <= 24 * depending on implementation). * @return The value of the consumed bits as a 32-bit unsigned integer. */ static ZXC_ALWAYS_INLINE uint32_t zxc_br_consume_fast(zxc_bit_reader_t* br, uint8_t n) { #if defined(__BMI2__) && (defined(__x86_64__) || defined(_M_X64)) // BMI2 Optimization: _bzhi_u64(x, n) copies the lower n bits of x to dst and // clears the rest. It is equivalent to x & ((1ULL << n) - 1) but executes in // a single cycle without dependency chains. const uint32_t val = (uint32_t)_bzhi_u64(br->accum, n); #else const uint32_t val = (uint32_t)(br->accum & ((1ULL << n) - 1)); #endif br->accum >>= n; br->bits -= n; return val; } /** * @brief Reads a Prefix Varint encoded integer from a stream. * * This function decodes a 32-bit unsigned integer encoded in Prefix Varint format * from the provided byte stream. Unary prefix bits in the first byte determine * the total length (1-5 bytes). * * Format: * - 1 byte (0xxxxxxx): 7 bits (val < 128) * - 2 bytes (10xxxxxx ...): 14 bits (val < 16384) * - 3 bytes (110xxxxx ...): 21 bits (val < 2M) * - 4 bytes (1110xxxx ...): 28 bits (val < 256M) * - 5 bytes (11110xxx ...): 32 bits (Full Range) * * @param[in,out] ptr Pointer to a pointer to the current position in the stream. * @param[in] end Pointer to the end of the readable stream (for bounds checking). * @return The decoded 32-bit integer, or 0 if reading would overflow bounds (safe default). */ static ZXC_ALWAYS_INLINE uint32_t zxc_read_varint(const uint8_t** ptr, const uint8_t* end) { const uint8_t* p = *ptr; // Bounds check: need at least 1 byte if (UNLIKELY(p >= end)) return 0; const uint32_t b0 = p[0]; // 1 Byte: 0xxxxxxx (7 bits) -> val < 128 if (LIKELY(b0 < 128)) { *ptr = p + 1; return b0; } // 2 Bytes: 10xxxxxx xxxxxxxx (14 bits) if (LIKELY(b0 < 0xC0)) { if (UNLIKELY(p + 1 >= end)) { *ptr = end; return 0; } *ptr = p + 2; return (b0 & 0x3F) | ((uint32_t)p[1] << 6); } // 3 Bytes: 110xxxxx xxxxxxxx xxxxxxxx (21 bits) if (LIKELY(b0 < 0xE0)) { if (UNLIKELY(p + 2 >= end)) { *ptr = end; return 0; } *ptr = p + 3; return (b0 & 0x1F) | ((uint32_t)p[1] << 5) | ((uint32_t)p[2] << 13); } // 4 Bytes: 1110xxxx ... (28 bits) if (UNLIKELY(b0 < 0xF0)) { if (UNLIKELY(p + 3 >= end)) { *ptr = end; return 0; } *ptr = p + 4; return (b0 & 0x0F) | ((uint32_t)p[1] << 4) | ((uint32_t)p[2] << 12) | ((uint32_t)p[3] << 20); } // 5 Bytes: 11110xxx ... (32 bits) if (UNLIKELY(p + 4 >= end)) { *ptr = end; return 0; } *ptr = p + 5; return (b0 & 0x07) | ((uint32_t)p[1] << 3) | ((uint32_t)p[2] << 11) | ((uint32_t)p[3] << 19) | ((uint32_t)p[4] << 27); } /** * @brief Shuffle masks for overlapping copies with small offsets (0-15). * * Shared between ARM NEON and x86 SSSE3. Each row defines how to replicate * source bytes to fill 16 bytes when offset < 16. */ #if defined(ZXC_USE_NEON64) || defined(ZXC_USE_NEON32) || defined(ZXC_USE_AVX2) || \ defined(ZXC_USE_AVX512) /** * @brief Precomputed masks for handling overlapping data during decompression. * * This 16x16 lookup table contains 128-bit aligned masks used to efficiently * mask off or combine bytes when processing overlapping copy operations or * boundary conditions in the ZXC decompression algorithm. * * The alignment to 16 bytes ensures compatibility with SIMD instructions * (like SSE/AVX) for optimized memory operations. */ static const ZXC_ALIGN(16) uint8_t zxc_overlap_masks[16][16] = { {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, // off=0 (unused) {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, // off=1 (RLE handled separately) {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}, // off=2 {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0}, // off=3 {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3}, // off=4 {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0}, // off=5 {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3}, // off=6 {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1}, // off=7 {0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7}, // off=8 {0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6}, // off=9 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5}, // off=10 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4}, // off=11 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3}, // off=12 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2}, // off=13 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1}, // off=14 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0} // off=15 }; #endif /** * @brief Copies 16 bytes from an overlapping source to the destination. * * This function is designed to handle memory copies where the source and * destination regions might overlap, specifically copying 16 bytes from * `dst - off` to `dst`. It is typically used in decompression routines * (like LZ77) where repeating a previous sequence is required. * * Handles NEON64, NEON32, SSSE3/AVX2 and generic scalar fallback. * * @param[out] dst Pointer to the destination buffer where bytes will be written. * @param[in] off The offset backwards from the destination pointer to read from. * (i.e., source address is `dst - off`). */ // codeql[cpp/unused-static-function] : False positive, used in DECODE_SEQ_SAFE/FAST macros static ZXC_ALWAYS_INLINE void zxc_copy_overlap16(uint8_t* dst, uint32_t off) { // off is always >= ZXC_LZ_OFFSET_BIAS by design (offset bias encoding: stored + // ZXC_LZ_OFFSET_BIAS) #if defined(ZXC_USE_NEON64) uint8x16_t mask = vld1q_u8(zxc_overlap_masks[off]); uint8x16_t src_data = vld1q_u8(dst - off); vst1q_u8(dst, vqtbl1q_u8(src_data, mask)); #elif defined(ZXC_USE_NEON32) uint8x8x2_t src_tbl; src_tbl.val[0] = vld1_u8(dst - off); src_tbl.val[1] = vld1_u8(dst - off + 8); uint8x8_t mask_lo = vld1_u8(zxc_overlap_masks[off]); uint8x8_t mask_hi = vld1_u8(zxc_overlap_masks[off] + 8); vst1_u8(dst, vtbl2_u8(src_tbl, mask_lo)); vst1_u8(dst + 8, vtbl2_u8(src_tbl, mask_hi)); #elif defined(ZXC_USE_AVX2) || defined(ZXC_USE_AVX512) __m128i mask = _mm_load_si128((const __m128i*)zxc_overlap_masks[off]); __m128i src_data = _mm_loadu_si128((const __m128i*)(dst - off)); _mm_storeu_si128((__m128i*)dst, _mm_shuffle_epi8(src_data, mask)); #else const uint8_t* src = dst - off; for (size_t i = 0; i < 16; i++) { dst[i] = src[i % off]; } #endif } #if defined(ZXC_USE_NEON64) || defined(ZXC_USE_NEON32) /** * @brief Computes the prefix sum of a 128-bit vector of 32-bit unsigned * integers using NEON intrinsics. * * This function calculates the running total of the elements in the input * vector `v`. If the input vector is `[a, b, c, d]`, the result will be `[a, * a+b, a+b+c, a+b+c+d]`. This operation is typically used for calculating * cumulative distributions or offsets in parallel. * * @param[in] v The input vector containing four 32-bit unsigned integers. * @return A uint32x4_t vector containing the prefix sums. */ static ZXC_ALWAYS_INLINE uint32x4_t zxc_neon_prefix_sum_u32(uint32x4_t v) { const uint32x4_t zero = vdupq_n_u32(0); // Create a vector of zeros // Rotate right by 1 element (shift 4 bytes) const uint32x4_t s1 = vreinterpretq_u32_u8(vextq_u8(vreinterpretq_u8_u32(zero), vreinterpretq_u8_u32(v), 12)); v = vaddq_u32(v, s1); // Add shifted version: [a, b, c, d] + [0, a, b, c] -> // [a, a+b, b+c, c+d] // Rotate right by 2 elements (shift 8 bytes) const uint32x4_t s2 = vreinterpretq_u32_u8(vextq_u8(vreinterpretq_u8_u32(zero), vreinterpretq_u8_u32(v), 8)); v = vaddq_u32(v, s2); // Add shifted version to complete prefix sum return v; } #endif #if defined(ZXC_USE_AVX2) /** * @brief Computes the prefix sum of 32-bit integers within a 256-bit vector. * * This function calculates the cumulative sum of the eight 32-bit integers * contained in the input vector `v`. * * Operation logic (conceptually): * out[0] = v[0] * out[1] = v[0] + v[1] * ... * out[7] = v[0] + v[1] + ... + v[7] * * @param[in] v The input 256-bit vector containing eight 32-bit integers. * @return A 256-bit vector containing the prefix sums of the input elements. */ // codeql[cpp/unused-static-function] : Used conditionally when ZXC_USE_AVX2 is defined static ZXC_ALWAYS_INLINE __m256i zxc_mm256_prefix_sum_epi32(__m256i v) { v = _mm256_add_epi32(v, _mm256_slli_si256(v, 4)); // Add value shifted by 1 element v = _mm256_add_epi32(v, _mm256_slli_si256(v, 8)); // Add value shifted by 2 elements // Use permute/shuffle to bridge the 128-bit lane gap __m256i v_bridge = _mm256_permute2x128_si256(v, v, 0x00); // Duplicate lower 128 to upper v_bridge = _mm256_shuffle_epi32(v_bridge, 0xFF); // Broadcast last element of lower 128 v_bridge = _mm256_blend_epi32(_mm256_setzero_si256(), v_bridge, 0xF0); // Only apply to upper lane return _mm256_add_epi32(v, v_bridge); // Add bridge value to upper lane } #endif #if defined(ZXC_USE_AVX512) /** * @brief Computes the prefix sum of 32-bit integers within a 512-bit vector. * * This function calculates the running sum of the 16 packed 32-bit integers * in the input vector `v`. * * For an input vector v = [x0, x1, x2, ... x15], the result will be: * [x0, x0+x1, x0+x1+x2, ... , sum(x0..x15)]. * * @note This function is forced inline for performance reasons. * * @param[in] v The input 512-bit vector containing sixteen 32-bit integers. * @return A 512-bit vector containing the prefix sums of the input elements. */ static ZXC_ALWAYS_INLINE __m512i zxc_mm512_prefix_sum_epi32(__m512i v) { __m512i t = _mm512_bslli_epi128(v, 4); // Shift left by 4 bytes (1 int) v = _mm512_add_epi32(v, t); // Add shifted value t = _mm512_bslli_epi128(v, 8); // Shift left by 8 bytes (2 ints) v = _mm512_add_epi32(v, t); // Add shifted value // Propagate sums across 128-bit lanes (sequential dependency) __m512i v_l0 = _mm512_shuffle_i32x4(v, v, 0x00); // Broadcast lane 0 v_l0 = _mm512_shuffle_epi32(v_l0, 0xFF); // Broadcast last element of lane 0 v = _mm512_mask_add_epi32(v, 0x00F0, v, v_l0); // Add to lane 1 only __m512i v_l1 = _mm512_shuffle_i32x4(v, v, 0x55); // Broadcast lane 1 v_l1 = _mm512_shuffle_epi32(v_l1, 0xFF); // Broadcast last element of lane 1 v = _mm512_mask_add_epi32(v, 0x0F00, v, v_l1); // Add to lane 2 only __m512i v_l2 = _mm512_shuffle_i32x4(v, v, 0xAA); // Broadcast lane 2 v_l2 = _mm512_shuffle_epi32(v_l2, 0xFF); // Broadcast last element of lane 2 v = _mm512_mask_add_epi32(v, 0xF000, v, v_l2); // Add to lane 3 only return v; } #endif /** * @brief Decodes a block of numerical data compressed with the ZXC format. * * This function reads a compressed numerical block from the source buffer, * parses the header to determine the number of values and encoding parameters, * and then decompresses the data into the destination buffer. * * **Algorithm Details:** * 1. **Header Parsing:** Reads the `zxc_num_header_t` to get the count of * values. * 2. **Bit Unpacking:** For each chunk of values, it initializes a bit reader. * - **Unrolling:** The main loop is unrolled 4x to minimize branch overhead * and maximize instruction throughput. * 3. **ZigZag Decoding:** Converts the unsigned unpacked value back to a signed * delta using `(n >> 1) ^ -(n & 1)`. * 4. **Delta Reconstruction:** Adds the signed delta to a `running_val` * accumulator to recover the original integer sequence. * * @param[in] src Pointer to the source buffer containing compressed data. * @param[in] src_size Size of the source buffer in bytes. * @param[out] dst Pointer to the destination buffer where decompressed data will be * written. * @param[in] dst_capacity Maximum capacity of the destination buffer in bytes. * * @return The number of bytes written to the destination buffer on success, * or a negative zxc_error_t code if an error occurs (e.g., buffer overflow, invalid header, * or malformed compressed stream). */ static int zxc_decode_block_num(const uint8_t* RESTRICT src, const size_t src_size, uint8_t* RESTRICT dst, const size_t dst_capacity) { zxc_num_header_t nh; if (UNLIKELY(zxc_read_num_header(src, src_size, &nh) != ZXC_OK)) return ZXC_ERROR_BAD_HEADER; size_t offset = ZXC_NUM_HEADER_BINARY_SIZE; uint8_t* d_ptr = dst; const uint8_t* const d_end = dst + dst_capacity; uint64_t vals_remaining = nh.n_values; uint32_t running_val = 0; ZXC_ALIGN(ZXC_CACHE_LINE_SIZE) uint32_t deltas[ZXC_NUM_DEC_BATCH]; while (vals_remaining > 0) { if (UNLIKELY(offset + ZXC_NUM_CHUNK_HEADER_SIZE > src_size)) return ZXC_ERROR_SRC_TOO_SMALL; const uint16_t nvals = zxc_le16(src + offset); const uint16_t bits = zxc_le16(src + offset + 2); const uint32_t psize = zxc_le32(src + offset + 12); // padding + nvals + bits offset += ZXC_NUM_CHUNK_HEADER_SIZE; if (UNLIKELY(nvals > vals_remaining || src_size < offset + psize || (size_t)(d_end - d_ptr) < (size_t)nvals * sizeof(uint32_t) || bits > (sizeof(uint32_t) * ZXC_BITS_PER_BYTE))) return ZXC_ERROR_CORRUPT_DATA; zxc_bit_reader_t br; zxc_br_init(&br, src + offset, psize); size_t i = 0; for (; i + ZXC_NUM_DEC_BATCH <= nvals; i += ZXC_NUM_DEC_BATCH) { for (int k = 0; k < ZXC_NUM_DEC_BATCH; k += 4) { zxc_br_ensure(&br, bits); deltas[k + 0] = zxc_zigzag_decode(zxc_br_consume_fast(&br, (uint8_t)bits)); zxc_br_ensure(&br, bits); deltas[k + 1] = zxc_zigzag_decode(zxc_br_consume_fast(&br, (uint8_t)bits)); zxc_br_ensure(&br, bits); deltas[k + 2] = zxc_zigzag_decode(zxc_br_consume_fast(&br, (uint8_t)bits)); zxc_br_ensure(&br, bits); deltas[k + 3] = zxc_zigzag_decode(zxc_br_consume_fast(&br, (uint8_t)bits)); } uint32_t* batch_dst = (uint32_t*)d_ptr; #if defined(ZXC_USE_AVX512) for (int k = 0; k < ZXC_NUM_DEC_BATCH; k += 16) { __m512i v_deltas = _mm512_load_si512((void*)&deltas[k]); // Load 16 deltas __m512i v_run = _mm512_set1_epi32(running_val); // Broadcast current running total __m512i v_sum = zxc_mm512_prefix_sum_epi32(v_deltas); // Compute local prefix sums v_sum = _mm512_add_epi32(v_sum, v_run); // Add base running total _mm512_storeu_si512((void*)&batch_dst[k], v_sum); // Store decoded values // Extract the last value (15th element) to update running_val for next // batch __m128i v_last128 = _mm512_extracti32x4_epi32(v_sum, 3); running_val = (uint32_t)_mm_cvtsi128_si32(_mm_shuffle_epi32(v_last128, 0xFF)); } #elif defined(ZXC_USE_AVX2) for (int k = 0; k < ZXC_NUM_DEC_BATCH; k += 8) { __m256i v_deltas = _mm256_load_si256((const __m256i*)&deltas[k]); // Load 8 deltas __m256i v_run = _mm256_set1_epi32(running_val); // Broadcast running total __m256i v_sum = zxc_mm256_prefix_sum_epi32(v_deltas); // Compute local prefix sums v_sum = _mm256_add_epi32(v_sum, v_run); // Add base _mm256_storeu_si256((__m256i*)&batch_dst[k], v_sum); // Store decoded values running_val = ((uint32_t*)&batch_dst[k])[7]; // Update running_val } #elif defined(ZXC_USE_NEON64) || defined(ZXC_USE_NEON32) uint32x4_t v_run = vdupq_n_u32(running_val); // Broadcast running total for (int k = 0; k < ZXC_NUM_DEC_BATCH; k += 4) { uint32x4_t v_deltas = vld1q_u32(&deltas[k]); // Load 4 deltas uint32x4_t v_sum = zxc_neon_prefix_sum_u32(v_deltas); // Compute local prefix sums v_sum = vaddq_u32(v_sum, v_run); // Add base vst1q_u32(&batch_dst[k], v_sum); // Store decoded values running_val = vgetq_lane_u32(v_sum, 3); // Extract last element v_run = vdupq_n_u32(running_val); // Update vector for next iter } #else for (int k = 0; k < ZXC_NUM_DEC_BATCH; k++) { running_val += deltas[k]; #ifdef ZXC_BIG_ENDIAN zxc_store_le32(&batch_dst[k], running_val); #else batch_dst[k] = running_val; #endif } #endif d_ptr += ZXC_NUM_DEC_BATCH * sizeof(uint32_t); } for (; i < nvals; i++) { zxc_br_ensure(&br, bits); const uint32_t delta = zxc_zigzag_decode(zxc_br_consume_fast(&br, (uint8_t)bits)); running_val += delta; zxc_store_le32(d_ptr, running_val); d_ptr += sizeof(uint32_t); } offset += psize; vals_remaining -= nvals; } return (int)(d_ptr - dst); } /** * @brief Decodes a General Low (GLO) format compressed block. * * This function handles the decoding of a compressed block formatted with the * internal GLO structure. The decompressed size is derived from Section * Descriptors within the compressed payload. * * @param[in,out] ctx Pointer to the compression context (`zxc_cctx_t`) containing * @param[in] src Pointer to the source buffer containing compressed data. * @param[in] src_size Size of the source buffer in bytes. * @param[out] dst Pointer to the destination buffer for decompressed data. * @param[in] dst_capacity Maximum capacity of the destination buffer. * * @return The number of bytes written to the destination buffer on success, or * a negative zxc_error_t code on failure (e.g., invalid header, buffer overflow, or corrupted * data). */ static int zxc_decode_block_glo(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_size, uint8_t* RESTRICT dst, const size_t dst_capacity) { zxc_gnr_header_t gh; zxc_section_desc_t desc[ZXC_GLO_SECTIONS]; if (UNLIKELY(zxc_read_glo_header_and_desc(src, src_size, &gh, desc) != ZXC_OK)) return ZXC_ERROR_BAD_HEADER; const uint8_t* p_data = src + ZXC_GLO_HEADER_BINARY_SIZE + ZXC_GLO_SECTIONS * ZXC_SECTION_DESC_BINARY_SIZE; const uint8_t* p_curr = p_data; // --- Literal Stream Setup --- const uint8_t* l_ptr; const uint8_t* l_end; uint8_t* rle_buf = NULL; size_t lit_stream_size = (size_t)(desc[0].sizes & ZXC_SECTION_SIZE_MASK); if (gh.enc_lit == 1) { const size_t required_size = (size_t)(desc[0].sizes >> 32); if (required_size > 0) { if (UNLIKELY(required_size > dst_capacity)) return ZXC_ERROR_DST_TOO_SMALL; if (ctx->lit_buffer_cap < required_size + ZXC_PAD_SIZE) { uint8_t* new_buf = (uint8_t*)realloc(ctx->lit_buffer, required_size + ZXC_PAD_SIZE); if (UNLIKELY(!new_buf)) { free(ctx->lit_buffer); ctx->lit_buffer = NULL; ctx->lit_buffer_cap = 0; return ZXC_ERROR_MEMORY; } ctx->lit_buffer = new_buf; ctx->lit_buffer_cap = required_size + ZXC_PAD_SIZE; } rle_buf = ctx->lit_buffer; if (UNLIKELY(!rle_buf || lit_stream_size > (size_t)(src + src_size - p_curr))) return ZXC_ERROR_CORRUPT_DATA; const uint8_t* r_ptr = p_curr; const uint8_t* r_end = r_ptr + lit_stream_size; uint8_t* w_ptr = rle_buf; const uint8_t* const w_end = rle_buf + required_size; while (r_ptr < r_end && w_ptr < w_end) { uint8_t token = *r_ptr++; if (LIKELY(!(token & ZXC_LIT_RLE_FLAG))) { // Raw copy (most common path): use ZXC_PAD_SIZE-byte wild copies // token is 7-bit (0-127), so len is 1-128 bytes const uint32_t len = (uint32_t)token + 1; if (UNLIKELY(w_ptr + len > w_end || r_ptr + len > r_end)) return ZXC_ERROR_CORRUPT_DATA; // Destination has ZXC_PAD_SIZE bytes of safe overrun space. // Source may not - check before wild copy. // Fast path: source has ZXC_PAD_SIZE-byte read headroom (most common) if (LIKELY(r_ptr + ZXC_PAD_SIZE <= r_end)) { // Single 32-byte copy covers len <= ZXC_PAD_SIZE (most tokens) zxc_copy32(w_ptr, r_ptr); if (UNLIKELY(len > ZXC_PAD_SIZE)) { // Unroll: max len=128, so max 4 copies total // Use unconditional stores with overlap - faster than branches if (len <= 2 * ZXC_PAD_SIZE) { zxc_copy32(w_ptr + len - ZXC_PAD_SIZE, r_ptr + len - ZXC_PAD_SIZE); } else if (len <= 3 * ZXC_PAD_SIZE) { zxc_copy32(w_ptr + ZXC_PAD_SIZE, r_ptr + ZXC_PAD_SIZE); zxc_copy32(w_ptr + len - ZXC_PAD_SIZE, r_ptr + len - ZXC_PAD_SIZE); } else { zxc_copy32(w_ptr + ZXC_PAD_SIZE, r_ptr + ZXC_PAD_SIZE); zxc_copy32(w_ptr + 2 * ZXC_PAD_SIZE, r_ptr + 2 * ZXC_PAD_SIZE); zxc_copy32(w_ptr + len - ZXC_PAD_SIZE, r_ptr + len - ZXC_PAD_SIZE); } } } else { // Near end of source: safe copy (rare cold path) ZXC_MEMCPY(w_ptr, r_ptr, len); } w_ptr += len; r_ptr += len; } else { // RLE run: fill with single byte const uint32_t len = (token & ZXC_LIT_LEN_MASK) + 4; if (UNLIKELY(w_ptr + len > w_end || r_ptr >= r_end)) return ZXC_ERROR_CORRUPT_DATA; ZXC_MEMSET(w_ptr, *r_ptr++, len); w_ptr += len; } } if (UNLIKELY(w_ptr != w_end)) return ZXC_ERROR_CORRUPT_DATA; l_ptr = rle_buf; l_end = rle_buf + required_size; } else { l_ptr = p_curr; l_end = p_curr; } } else { l_ptr = p_curr; l_end = p_curr + lit_stream_size; } p_curr += lit_stream_size; // --- Stream Pointers & Validation --- const size_t sz_tokens = (size_t)(desc[1].sizes & ZXC_SECTION_SIZE_MASK); const size_t sz_offsets = (size_t)(desc[2].sizes & ZXC_SECTION_SIZE_MASK); const size_t sz_extras = (size_t)(desc[3].sizes & ZXC_SECTION_SIZE_MASK); // Validate stream sizes match sequence count (early rejection of malformed data) const size_t expected_off_size = (gh.enc_off == 1) ? (size_t)gh.n_sequences : (size_t)gh.n_sequences * 2; const uint8_t* t_ptr = p_curr; const uint8_t* o_ptr = t_ptr + sz_tokens; const uint8_t* e_ptr = o_ptr + sz_offsets; const uint8_t* const e_end = e_ptr + sz_extras; // For vbyte overflow detection // Validate streams don't overflow source buffer + // Validate stream sizes match sequence count (early rejection of malformed data) if (UNLIKELY((e_end != src + src_size) || sz_tokens < gh.n_sequences || sz_offsets < expected_off_size)) return ZXC_ERROR_CORRUPT_DATA; uint8_t* d_ptr = dst; const uint8_t* const d_end = dst + dst_capacity; // Destination safe margin for 4x loop: max output without varint extension. // ll_max = 14, ml_max = 14 + 5 = 19, per-seq = 33, 4x = 132. // Add ZXC_PAD_SIZE (32) for the wild zxc_copy32 overshoot + 4 safety = 168. const uint8_t* const d_end_safe = d_end - (132 + ZXC_PAD_SIZE + 4); // Literal stream safe threshold for 4x-unrolled loops. // Without varint extension, max ll per sequence = ZXC_TOKEN_LL_MASK - 1 = 14. // For 4 sequences: 4 * 14 = 56. With this margin, l_ptr checks are only needed // on the cold varint path, keeping the hot path free of l_ptr overhead. const size_t glo_sz_lit = (size_t)(l_end - l_ptr); const size_t glo_margin_4x = 4 * (ZXC_TOKEN_LL_MASK - 1); // 56 const size_t glo_margin_1x = ZXC_TOKEN_LL_MASK - 1; // 14 const uint8_t* const l_end_safe_4x = (glo_sz_lit > glo_margin_4x) ? l_end - glo_margin_4x : l_ptr; const uint8_t* const l_end_safe_1x = (glo_sz_lit > glo_margin_1x) ? l_end - glo_margin_1x : l_ptr; uint32_t n_seq = gh.n_sequences; // Track bytes written for offset validation // For 1-byte offsets (enc_off==1): validate until 256 bytes written (max 8-bit offset) // For 2-byte offsets (enc_off==0): validate until 65536 bytes written (max 16-bit offset) // After threshold, all offsets are guaranteed valid (can't exceed written bytes) size_t written = 0; // Macro for copy literal + match (uses 32-byte wild copies) // SAFE version: validates offset against written bytes #define DECODE_SEQ_SAFE(ll, ml, off) \ do { \ { \ const uint8_t* src_lit = l_ptr; \ uint8_t* dst_lit = d_ptr; \ zxc_copy32(dst_lit, src_lit); \ if (UNLIKELY(ll > ZXC_PAD_SIZE)) { \ dst_lit += ZXC_PAD_SIZE; \ src_lit += ZXC_PAD_SIZE; \ size_t rem = ll - ZXC_PAD_SIZE; \ while (rem > ZXC_PAD_SIZE) { \ zxc_copy32(dst_lit, src_lit); \ dst_lit += ZXC_PAD_SIZE; \ src_lit += ZXC_PAD_SIZE; \ rem -= ZXC_PAD_SIZE; \ } \ zxc_copy32(dst_lit, src_lit); \ } \ l_ptr += ll; \ d_ptr += ll; \ written += ll; \ } \ { \ if (UNLIKELY(off > written)) return ZXC_ERROR_BAD_OFFSET; \ const uint8_t* match_src = d_ptr - off; \ if (LIKELY(off >= ZXC_PAD_SIZE)) { \ zxc_copy32(d_ptr, match_src); \ if (UNLIKELY(ml > ZXC_PAD_SIZE)) { \ uint8_t* out = d_ptr + ZXC_PAD_SIZE; \ const uint8_t* ref = match_src + ZXC_PAD_SIZE; \ size_t rem = ml - ZXC_PAD_SIZE; \ while (rem > ZXC_PAD_SIZE) { \ zxc_copy32(out, ref); \ out += ZXC_PAD_SIZE; \ ref += ZXC_PAD_SIZE; \ rem -= ZXC_PAD_SIZE; \ } \ zxc_copy32(out, ref); \ } \ d_ptr += ml; \ written += ml; \ } else if (off >= (ZXC_PAD_SIZE / 2)) { \ zxc_copy16(d_ptr, match_src); \ if (UNLIKELY(ml > (ZXC_PAD_SIZE / 2))) { \ uint8_t* out = d_ptr + (ZXC_PAD_SIZE / 2); \ const uint8_t* ref = match_src + (ZXC_PAD_SIZE / 2); \ size_t rem = ml - (ZXC_PAD_SIZE / 2); \ while (rem > (ZXC_PAD_SIZE / 2)) { \ zxc_copy16(out, ref); \ out += (ZXC_PAD_SIZE / 2); \ ref += (ZXC_PAD_SIZE / 2); \ rem -= (ZXC_PAD_SIZE / 2); \ } \ zxc_copy16(out, ref); \ } \ d_ptr += ml; \ written += ml; \ } else if (off == 1) { \ ZXC_MEMSET(d_ptr, match_src[0], ml); \ d_ptr += ml; \ written += ml; \ } else { \ size_t copied = 0; \ while (copied < ml) { \ zxc_copy_overlap16(d_ptr + copied, off); \ copied += (ZXC_PAD_SIZE / 2); \ } \ d_ptr += ml; \ written += ml; \ } \ } \ } while (0) // FAST version: no offset validation (for use after written >= 256 or 65536) #define DECODE_SEQ_FAST(ll, ml, off) \ do { \ { \ const uint8_t* src_lit = l_ptr; \ uint8_t* dst_lit = d_ptr; \ zxc_copy32(dst_lit, src_lit); \ if (UNLIKELY(ll > ZXC_PAD_SIZE)) { \ dst_lit += ZXC_PAD_SIZE; \ src_lit += ZXC_PAD_SIZE; \ size_t rem = ll - ZXC_PAD_SIZE; \ while (rem > ZXC_PAD_SIZE) { \ zxc_copy32(dst_lit, src_lit); \ dst_lit += ZXC_PAD_SIZE; \ src_lit += ZXC_PAD_SIZE; \ rem -= ZXC_PAD_SIZE; \ } \ zxc_copy32(dst_lit, src_lit); \ } \ l_ptr += ll; \ d_ptr += ll; \ } \ { \ const uint8_t* match_src = d_ptr - off; \ if (LIKELY(off >= ZXC_PAD_SIZE)) { \ zxc_copy32(d_ptr, match_src); \ if (UNLIKELY(ml > ZXC_PAD_SIZE)) { \ uint8_t* out = d_ptr + ZXC_PAD_SIZE; \ const uint8_t* ref = match_src + ZXC_PAD_SIZE; \ size_t rem = ml - ZXC_PAD_SIZE; \ while (rem > ZXC_PAD_SIZE) { \ zxc_copy32(out, ref); \ out += ZXC_PAD_SIZE; \ ref += ZXC_PAD_SIZE; \ rem -= ZXC_PAD_SIZE; \ } \ zxc_copy32(out, ref); \ } \ d_ptr += ml; \ } else if (off >= (ZXC_PAD_SIZE / 2)) { \ zxc_copy16(d_ptr, match_src); \ if (UNLIKELY(ml > (ZXC_PAD_SIZE / 2))) { \ uint8_t* out = d_ptr + (ZXC_PAD_SIZE / 2); \ const uint8_t* ref = match_src + (ZXC_PAD_SIZE / 2); \ size_t rem = ml - (ZXC_PAD_SIZE / 2); \ while (rem > (ZXC_PAD_SIZE / 2)) { \ zxc_copy16(out, ref); \ out += (ZXC_PAD_SIZE / 2); \ ref += (ZXC_PAD_SIZE / 2); \ rem -= (ZXC_PAD_SIZE / 2); \ } \ zxc_copy16(out, ref); \ } \ d_ptr += ml; \ } else if (off == 1) { \ ZXC_MEMSET(d_ptr, match_src[0], ml); \ d_ptr += ml; \ } else { \ size_t copied = 0; \ while (copied < ml) { \ zxc_copy_overlap16(d_ptr + copied, off); \ copied += (ZXC_PAD_SIZE / 2); \ } \ d_ptr += ml; \ } \ } \ } while (0) // --- SAFE Loop: offset validation until threshold (4x unroll) --- // For 1-byte offsets: bounds check until 256 bytes written // For 2-byte offsets: bounds check until 65536 bytes written const size_t bounds_threshold = (gh.enc_off == 1) ? (1U << 8) : (1U << 16); while (n_seq >= 4 && d_ptr < d_end_safe && l_ptr < l_end_safe_4x && written < bounds_threshold) { uint32_t tokens = zxc_le32(t_ptr); t_ptr += 4; uint32_t off1 = ZXC_LZ_OFFSET_BIAS, off2 = ZXC_LZ_OFFSET_BIAS, off3 = ZXC_LZ_OFFSET_BIAS, off4 = ZXC_LZ_OFFSET_BIAS; if (gh.enc_off == 1) { // Read 4 x 1-byte offsets uint32_t offsets = zxc_le32(o_ptr); o_ptr += 4; off1 += offsets & 0xFF; off2 += (offsets >> 8) & 0xFF; off3 += (offsets >> 16) & 0xFF; off4 += (offsets >> 24) & 0xFF; } else { // Read 4 x 2-byte offsets uint64_t offsets = zxc_le64(o_ptr); o_ptr += 8; off1 += (uint32_t)(offsets & 0xFFFF); off2 += (uint32_t)((offsets >> 16) & 0xFFFF); off3 += (uint32_t)((offsets >> 32) & 0xFFFF); off4 += (uint32_t)((offsets >> 48) & 0xFFFF); } uint32_t ll1 = (tokens & 0x0F0) >> 4; uint32_t ml1 = (tokens & 0x00F); if (UNLIKELY(ll1 == ZXC_TOKEN_LL_MASK)) { ll1 += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(l_ptr + ll1 > l_end || d_ptr + ll1 > d_end)) return ZXC_ERROR_OVERFLOW; } if (UNLIKELY(ml1 == ZXC_TOKEN_ML_MASK)) { ml1 += zxc_read_varint(&e_ptr, e_end); ml1 += ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(d_ptr + ll1 + ml1 > d_end)) return ZXC_ERROR_OVERFLOW; } else { ml1 += ZXC_LZ_MIN_MATCH_LEN; } DECODE_SEQ_SAFE(ll1, ml1, off1); uint32_t ll2 = (tokens & 0x0F000) >> 12; uint32_t ml2 = (tokens & 0x00F00) >> 8; if (UNLIKELY(ll2 == ZXC_TOKEN_LL_MASK)) { ll2 += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(l_ptr + ll2 > l_end || d_ptr + ll2 > d_end)) return ZXC_ERROR_OVERFLOW; } if (UNLIKELY(ml2 == ZXC_TOKEN_ML_MASK)) { ml2 += zxc_read_varint(&e_ptr, e_end); ml2 += ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(d_ptr + ll2 + ml2 > d_end)) return ZXC_ERROR_OVERFLOW; } else { ml2 += ZXC_LZ_MIN_MATCH_LEN; } DECODE_SEQ_SAFE(ll2, ml2, off2); uint32_t ll3 = (tokens & 0x0F00000) >> 20; uint32_t ml3 = (tokens & 0x00F0000) >> 16; if (UNLIKELY(ll3 == ZXC_TOKEN_LL_MASK)) { ll3 += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(l_ptr + ll3 > l_end || d_ptr + ll3 > d_end)) return ZXC_ERROR_OVERFLOW; } if (UNLIKELY(ml3 == ZXC_TOKEN_ML_MASK)) { ml3 += zxc_read_varint(&e_ptr, e_end); ml3 += ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(d_ptr + ll3 + ml3 > d_end)) return ZXC_ERROR_OVERFLOW; } else { ml3 += ZXC_LZ_MIN_MATCH_LEN; } DECODE_SEQ_SAFE(ll3, ml3, off3); uint32_t ll4 = (tokens >> 28); uint32_t ml4 = (tokens >> 24) & 0x0F; if (UNLIKELY(ll4 == ZXC_TOKEN_LL_MASK)) { ll4 += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(l_ptr + ll4 > l_end || d_ptr + ll4 > d_end)) return ZXC_ERROR_OVERFLOW; } if (UNLIKELY(ml4 == ZXC_TOKEN_ML_MASK)) { ml4 += zxc_read_varint(&e_ptr, e_end); ml4 += ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(d_ptr + ll4 + ml4 > d_end)) return ZXC_ERROR_OVERFLOW; } else { ml4 += ZXC_LZ_MIN_MATCH_LEN; } DECODE_SEQ_SAFE(ll4, ml4, off4); n_seq -= 4; } // --- FAST Loop: After threshold, no offset validation needed (4x unroll) --- while (n_seq >= 4 && d_ptr < d_end_safe && l_ptr < l_end_safe_4x) { uint32_t tokens = zxc_le32(t_ptr); t_ptr += 4; uint32_t off1 = ZXC_LZ_OFFSET_BIAS, off2 = ZXC_LZ_OFFSET_BIAS, off3 = ZXC_LZ_OFFSET_BIAS, off4 = ZXC_LZ_OFFSET_BIAS; if (gh.enc_off == 1) { // Read 4 x 1-byte offsets uint32_t offsets = zxc_le32(o_ptr); o_ptr += 4; off1 += offsets & 0xFF; off2 += (offsets >> 8) & 0xFF; off3 += (offsets >> 16) & 0xFF; off4 += (offsets >> 24) & 0xFF; } else { // Read 4 x 2-byte offsets uint64_t offsets = zxc_le64(o_ptr); o_ptr += 8; off1 += (uint32_t)(offsets & 0xFFFF); off2 += (uint32_t)((offsets >> 16) & 0xFFFF); off3 += (uint32_t)((offsets >> 32) & 0xFFFF); off4 += (uint32_t)((offsets >> 48) & 0xFFFF); } uint32_t ll1 = (tokens & 0x0F0) >> 4; uint32_t ml1 = (tokens & 0x00F); if (UNLIKELY(ll1 == ZXC_TOKEN_LL_MASK)) { ll1 += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(l_ptr + ll1 > l_end || d_ptr + ll1 > d_end)) return ZXC_ERROR_OVERFLOW; } if (UNLIKELY(ml1 == ZXC_TOKEN_ML_MASK)) { ml1 += zxc_read_varint(&e_ptr, e_end); ml1 += ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(d_ptr + ll1 + ml1 > d_end)) return ZXC_ERROR_OVERFLOW; } else { ml1 += ZXC_LZ_MIN_MATCH_LEN; } DECODE_SEQ_FAST(ll1, ml1, off1); uint32_t ll2 = (tokens & 0x0F000) >> 12; uint32_t ml2 = (tokens & 0x00F00) >> 8; if (UNLIKELY(ll2 == ZXC_TOKEN_LL_MASK)) { ll2 += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(l_ptr + ll2 > l_end || d_ptr + ll2 > d_end)) return ZXC_ERROR_OVERFLOW; } if (UNLIKELY(ml2 == ZXC_TOKEN_ML_MASK)) { ml2 += zxc_read_varint(&e_ptr, e_end); ml2 += ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(d_ptr + ll2 + ml2 > d_end)) return ZXC_ERROR_OVERFLOW; } else { ml2 += ZXC_LZ_MIN_MATCH_LEN; } DECODE_SEQ_FAST(ll2, ml2, off2); uint32_t ll3 = (tokens & 0x0F00000) >> 20; uint32_t ml3 = (tokens & 0x00F0000) >> 16; if (UNLIKELY(ll3 == ZXC_TOKEN_LL_MASK)) { ll3 += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(l_ptr + ll3 > l_end || d_ptr + ll3 > d_end)) return ZXC_ERROR_OVERFLOW; } if (UNLIKELY(ml3 == ZXC_TOKEN_ML_MASK)) { ml3 += zxc_read_varint(&e_ptr, e_end); ml3 += ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(d_ptr + ll3 + ml3 > d_end)) return ZXC_ERROR_OVERFLOW; } else { ml3 += ZXC_LZ_MIN_MATCH_LEN; } DECODE_SEQ_FAST(ll3, ml3, off3); uint32_t ll4 = (tokens >> 28); uint32_t ml4 = (tokens >> 24) & 0x0F; if (UNLIKELY(ll4 == ZXC_TOKEN_LL_MASK)) { ll4 += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(l_ptr + ll4 > l_end || d_ptr + ll4 > d_end)) return ZXC_ERROR_OVERFLOW; } if (UNLIKELY(ml4 == ZXC_TOKEN_ML_MASK)) { ml4 += zxc_read_varint(&e_ptr, e_end); ml4 += ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(d_ptr + ll4 + ml4 > d_end)) return ZXC_ERROR_OVERFLOW; } else { ml4 += ZXC_LZ_MIN_MATCH_LEN; } DECODE_SEQ_FAST(ll4, ml4, off4); n_seq -= 4; } #undef DECODE_SEQ_SAFE #undef DECODE_SEQ_FAST // Validate vbyte reads didn't overflow if (UNLIKELY(e_ptr > e_end)) return ZXC_ERROR_CORRUPT_DATA; // --- Remaining 1 sequence (Fast Path) --- while (n_seq > 0 && d_ptr < d_end_safe && l_ptr < l_end_safe_1x) { // Save pointers before reading (in case we need to fall back to Safe Path) const uint8_t* t_save = t_ptr; const uint8_t* o_save = o_ptr; const uint8_t* e_save = e_ptr; uint8_t token = *t_ptr++; uint32_t ll = token >> ZXC_TOKEN_LIT_BITS; uint32_t ml = token & ZXC_TOKEN_ML_MASK; uint32_t offset = ZXC_LZ_OFFSET_BIAS; if (gh.enc_off == 1) { offset += *o_ptr++; // 1-byte offset (biased) } else { offset += zxc_le16(o_ptr); // 2-byte offset (biased) o_ptr += 2; } if (UNLIKELY(ll == ZXC_TOKEN_LL_MASK)) { ll += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(l_ptr + ll > l_end)) { t_ptr = t_save; o_ptr = o_save; e_ptr = e_save; break; } } if (UNLIKELY(ml == ZXC_TOKEN_ML_MASK)) ml += zxc_read_varint(&e_ptr, e_end); ml += ZXC_LZ_MIN_MATCH_LEN; // Check bounds before wild copies - if too close to end, fall back to Safe Path if (UNLIKELY(d_ptr + ll + ml + ZXC_PAD_SIZE > d_end)) { // Restore pointers and let Safe Path handle this sequence t_ptr = t_save; o_ptr = o_save; e_ptr = e_save; break; } { const uint8_t* src_lit = l_ptr; uint8_t* dst_lit = d_ptr; zxc_copy32(dst_lit, src_lit); if (UNLIKELY(ll > ZXC_PAD_SIZE)) { dst_lit += ZXC_PAD_SIZE; src_lit += ZXC_PAD_SIZE; size_t rem = ll - ZXC_PAD_SIZE; while (rem > ZXC_PAD_SIZE) { zxc_copy32(dst_lit, src_lit); dst_lit += ZXC_PAD_SIZE; src_lit += ZXC_PAD_SIZE; rem -= ZXC_PAD_SIZE; } zxc_copy32(dst_lit, src_lit); } l_ptr += ll; d_ptr += ll; written += ll; } { // Skip check if written >= bounds_threshold (256 for 8-bit, 65536 for 16-bit) if (UNLIKELY(written < bounds_threshold && offset > written)) return ZXC_ERROR_BAD_OFFSET; const uint8_t* match_src = d_ptr - offset; if (LIKELY(offset >= ZXC_PAD_SIZE)) { zxc_copy32(d_ptr, match_src); if (UNLIKELY(ml > ZXC_PAD_SIZE)) { uint8_t* out = d_ptr + ZXC_PAD_SIZE; const uint8_t* ref = match_src + ZXC_PAD_SIZE; size_t rem = ml - ZXC_PAD_SIZE; while (rem > ZXC_PAD_SIZE) { zxc_copy32(out, ref); out += ZXC_PAD_SIZE; ref += ZXC_PAD_SIZE; rem -= ZXC_PAD_SIZE; } zxc_copy32(out, ref); } d_ptr += ml; written += ml; } else if (offset == 1) { ZXC_MEMSET(d_ptr, match_src[0], ml); d_ptr += ml; written += ml; } else { for (size_t i = 0; i < ml; i++) d_ptr[i] = match_src[i]; d_ptr += ml; written += ml; } } n_seq--; } // --- Safe Path for Remaining Sequences --- while (n_seq > 0) { uint8_t token = *t_ptr++; uint32_t ll = token >> ZXC_TOKEN_LIT_BITS; uint32_t ml = token & ZXC_TOKEN_ML_MASK; uint32_t offset = ZXC_LZ_OFFSET_BIAS; if (gh.enc_off == 1) { offset += *o_ptr++; // 1-byte offset (biased) } else { offset += zxc_le16(o_ptr); // 2-byte offset (biased) o_ptr += 2; } if (UNLIKELY(ll == ZXC_TOKEN_LL_MASK)) ll += zxc_read_varint(&e_ptr, e_end); if (UNLIKELY(ml == ZXC_TOKEN_ML_MASK)) ml += zxc_read_varint(&e_ptr, e_end); ml += ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(d_ptr + ll > d_end || l_ptr + ll > l_end)) return ZXC_ERROR_OVERFLOW; ZXC_MEMCPY(d_ptr, l_ptr, ll); l_ptr += ll; d_ptr += ll; const uint8_t* match_src = d_ptr - offset; if (UNLIKELY(match_src < dst || d_ptr + ml > d_end)) return ZXC_ERROR_BAD_OFFSET; if (offset < ml) { for (size_t i = 0; i < ml; i++) d_ptr[i] = match_src[i]; } else { ZXC_MEMCPY(d_ptr, match_src, ml); } d_ptr += ml; n_seq--; } // --- Trailing Literals --- // Copy remaining literals from source stream (literal exhaustion) if (UNLIKELY(l_ptr > l_end)) return ZXC_ERROR_CORRUPT_DATA; const size_t remaining_literals = (size_t)(l_end - l_ptr); if (remaining_literals > 0) { if (UNLIKELY(d_ptr + remaining_literals > d_end)) return ZXC_ERROR_OVERFLOW; ZXC_MEMCPY(d_ptr, l_ptr, remaining_literals); d_ptr += remaining_literals; } return (int)(d_ptr - dst); } /** * @brief Decodes a GHI (General High) format compressed block. * * This function handles the decoding of a compressed block formatted with the * internal GHI structure. The decompressed size is derived from Section * Descriptors within the compressed payload. * * @param[in] ctx Pointer to the decompression context (unused in current implementation). * @param[in] src Pointer to the source buffer containing compressed data. * @param[in] src_size Size of the source buffer in bytes. * @param[out] dst Pointer to the destination buffer for decompressed data. * @param[in] dst_capacity Capacity of the destination buffer in bytes. * @return int Returns the number of bytes written on success, or a negative zxc_error_t code on * failure. */ static int zxc_decode_block_ghi(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_size, uint8_t* RESTRICT dst, const size_t dst_capacity) { (void)ctx; zxc_gnr_header_t gh; zxc_section_desc_t desc[ZXC_GHI_SECTIONS]; if (UNLIKELY(zxc_read_ghi_header_and_desc(src, src_size, &gh, desc) != ZXC_OK)) return ZXC_ERROR_BAD_HEADER; const uint8_t* p_curr = src + ZXC_GHI_HEADER_BINARY_SIZE + ZXC_GHI_SECTIONS * ZXC_SECTION_DESC_BINARY_SIZE; // --- Stream Pointers & Validation --- const size_t sz_lit = (uint32_t)desc[0].sizes; const size_t sz_seqs = (uint32_t)desc[1].sizes; const size_t sz_exts = (uint32_t)desc[2].sizes; const uint8_t* l_ptr = p_curr; const uint8_t* l_end = l_ptr + sz_lit; p_curr += sz_lit; const uint8_t* seq_ptr = p_curr; const uint8_t* extras_ptr = p_curr + sz_seqs; const uint8_t* const extras_end = extras_ptr + sz_exts; // Validate streams don't overflow source buffer + // Validate sequence stream size matches sequence count if (UNLIKELY((extras_end != src + src_size) || (sz_seqs < (size_t)gh.n_sequences * 4))) return ZXC_ERROR_CORRUPT_DATA; uint8_t* d_ptr = dst; const uint8_t* const d_end = dst + dst_capacity; const uint8_t* const d_end_safe = d_end - (ZXC_PAD_SIZE * 4); // 128 // Safety margin for 4x unrolled loop: 4 * (ZXC_SEQ_LL_MASK LL + // ZXC_SEQ_ML_MASK+ZXC_LZ_MIN_MATCH_LEN ML) + ZXC_PAD_SIZE Pad = 4 x (255 + 255 + 5) + 32 = 2092 const uint8_t* const d_end_fast = d_end - (ZXC_PAD_SIZE * 66); // 2112 // Literal stream safe thresholds for GHI loops. // Without varint extension, max ll per sequence = ZXC_SEQ_LL_MASK - 1 = 254. // For 4 sequences: 4 * 254 = 1016. With this margin, l_ptr checks are only needed // on the cold varint path, keeping the hot path free of l_ptr overhead. const size_t ghi_margin_4x = 4 * (ZXC_SEQ_LL_MASK - 1); // 1016 const size_t ghi_margin_1x = ZXC_SEQ_LL_MASK - 1; // 254 const uint8_t* const l_end_safe_4x = (sz_lit > ghi_margin_4x) ? l_end - ghi_margin_4x : l_ptr; const uint8_t* const l_end_safe_1x = (sz_lit > ghi_margin_1x) ? l_end - ghi_margin_1x : l_ptr; uint32_t n_seq = gh.n_sequences; // Track bytes written for offset validation // For 1-byte offsets (enc_off==1): validate until 256 bytes written (max 8-bit offset) // For 2-byte offsets (enc_off==0): validate until 65536 bytes written (max 16-bit offset) // After threshold, all offsets are guaranteed valid (can't exceed written bytes) size_t written = 0; // Macro for copy literal + match (uses 32-byte wild copies) // SAFE version: validates offset against written bytes #define DECODE_SEQ_SAFE(ll, ml, off) \ do { \ { \ const uint8_t* src_lit = l_ptr; \ uint8_t* dst_lit = d_ptr; \ zxc_copy32(dst_lit, src_lit); \ if (UNLIKELY(ll > ZXC_PAD_SIZE)) { \ dst_lit += ZXC_PAD_SIZE; \ src_lit += ZXC_PAD_SIZE; \ size_t rem = ll - ZXC_PAD_SIZE; \ while (rem > ZXC_PAD_SIZE) { \ zxc_copy32(dst_lit, src_lit); \ dst_lit += ZXC_PAD_SIZE; \ src_lit += ZXC_PAD_SIZE; \ rem -= ZXC_PAD_SIZE; \ } \ zxc_copy32(dst_lit, src_lit); \ } \ l_ptr += ll; \ d_ptr += ll; \ written += ll; \ } \ { \ if (UNLIKELY(off > written)) return ZXC_ERROR_BAD_OFFSET; \ const uint8_t* match_src = d_ptr - off; \ if (LIKELY(off >= ZXC_PAD_SIZE)) { \ zxc_copy32(d_ptr, match_src); \ if (UNLIKELY(ml > ZXC_PAD_SIZE)) { \ uint8_t* out = d_ptr + ZXC_PAD_SIZE; \ const uint8_t* ref = match_src + ZXC_PAD_SIZE; \ size_t rem = ml - ZXC_PAD_SIZE; \ while (rem > ZXC_PAD_SIZE) { \ zxc_copy32(out, ref); \ out += ZXC_PAD_SIZE; \ ref += ZXC_PAD_SIZE; \ rem -= ZXC_PAD_SIZE; \ } \ zxc_copy32(out, ref); \ } \ d_ptr += ml; \ written += ml; \ } else if (off >= (ZXC_PAD_SIZE / 2)) { \ zxc_copy16(d_ptr, match_src); \ if (UNLIKELY(ml > (ZXC_PAD_SIZE / 2))) { \ uint8_t* out = d_ptr + (ZXC_PAD_SIZE / 2); \ const uint8_t* ref = match_src + (ZXC_PAD_SIZE / 2); \ size_t rem = ml - (ZXC_PAD_SIZE / 2); \ while (rem > (ZXC_PAD_SIZE / 2)) { \ zxc_copy16(out, ref); \ out += (ZXC_PAD_SIZE / 2); \ ref += (ZXC_PAD_SIZE / 2); \ rem -= (ZXC_PAD_SIZE / 2); \ } \ zxc_copy16(out, ref); \ } \ d_ptr += ml; \ written += ml; \ } else if (off == 1) { \ ZXC_MEMSET(d_ptr, match_src[0], ml); \ d_ptr += ml; \ written += ml; \ } else { \ size_t copied = 0; \ while (copied < ml) { \ zxc_copy_overlap16(d_ptr + copied, off); \ copied += (ZXC_PAD_SIZE / 2); \ } \ d_ptr += ml; \ written += ml; \ } \ } \ } while (0) // FAST version: no offset validation (for use after written >= 256 or 65536) #define DECODE_SEQ_FAST(ll, ml, off) \ do { \ { \ const uint8_t* src_lit = l_ptr; \ uint8_t* dst_lit = d_ptr; \ zxc_copy32(dst_lit, src_lit); \ if (UNLIKELY(ll > ZXC_PAD_SIZE)) { \ dst_lit += ZXC_PAD_SIZE; \ src_lit += ZXC_PAD_SIZE; \ size_t rem = ll - ZXC_PAD_SIZE; \ while (rem > ZXC_PAD_SIZE) { \ zxc_copy32(dst_lit, src_lit); \ dst_lit += ZXC_PAD_SIZE; \ src_lit += ZXC_PAD_SIZE; \ rem -= ZXC_PAD_SIZE; \ } \ zxc_copy32(dst_lit, src_lit); \ } \ l_ptr += ll; \ d_ptr += ll; \ } \ { \ const uint8_t* match_src = d_ptr - off; \ if (LIKELY(off >= ZXC_PAD_SIZE)) { \ zxc_copy32(d_ptr, match_src); \ if (UNLIKELY(ml > ZXC_PAD_SIZE)) { \ uint8_t* out = d_ptr + ZXC_PAD_SIZE; \ const uint8_t* ref = match_src + ZXC_PAD_SIZE; \ size_t rem = ml - ZXC_PAD_SIZE; \ while (rem > ZXC_PAD_SIZE) { \ zxc_copy32(out, ref); \ out += ZXC_PAD_SIZE; \ ref += ZXC_PAD_SIZE; \ rem -= ZXC_PAD_SIZE; \ } \ zxc_copy32(out, ref); \ } \ d_ptr += ml; \ } else if (off >= (ZXC_PAD_SIZE / 2)) { \ zxc_copy16(d_ptr, match_src); \ if (UNLIKELY(ml > (ZXC_PAD_SIZE / 2))) { \ uint8_t* out = d_ptr + (ZXC_PAD_SIZE / 2); \ const uint8_t* ref = match_src + (ZXC_PAD_SIZE / 2); \ size_t rem = ml - (ZXC_PAD_SIZE / 2); \ while (rem > (ZXC_PAD_SIZE / 2)) { \ zxc_copy16(out, ref); \ out += (ZXC_PAD_SIZE / 2); \ ref += (ZXC_PAD_SIZE / 2); \ rem -= (ZXC_PAD_SIZE / 2); \ } \ zxc_copy16(out, ref); \ } \ d_ptr += ml; \ } else if (off == 1) { \ ZXC_MEMSET(d_ptr, match_src[0], ml); \ d_ptr += ml; \ } else { \ size_t copied = 0; \ while (copied < ml) { \ zxc_copy_overlap16(d_ptr + copied, off); \ copied += (ZXC_PAD_SIZE / 2); \ } \ d_ptr += ml; \ } \ } \ } while (0) // --- SAFE Loop: offset validation until threshold --- // Since offset is 16-bit, threshold is 65536. // For 1-byte offsets (enc_off==1): validate until 256 bytes written // For 2-byte offsets (enc_off==0): validate until 65536 bytes written const size_t bounds_threshold = (gh.enc_off == 1) ? (1U << 8) : (1U << 16); while (n_seq > 0 && d_ptr < d_end_safe && written < bounds_threshold) { uint32_t seq = zxc_le32(seq_ptr); seq_ptr += 4; uint32_t ll = (uint32_t)(seq >> 24); if (UNLIKELY(ll == ZXC_SEQ_LL_MASK)) ll += zxc_read_varint(&extras_ptr, extras_end); uint32_t m_bits = (uint32_t)((seq >> 16) & 0xFF); uint32_t ml = m_bits + ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(m_bits == ZXC_SEQ_ML_MASK)) ml += zxc_read_varint(&extras_ptr, extras_end); uint32_t offset = (uint32_t)(seq & 0xFFFF) + ZXC_LZ_OFFSET_BIAS; // Strict bounds check: sequence must fit, AND wild copies must not overshoot // Check both destination (d_ptr) and source literal stream (l_ptr) if (UNLIKELY(d_ptr + ll + ml + ZXC_PAD_SIZE > d_end || l_ptr + ll + ZXC_PAD_SIZE > l_end)) { // Fallback to exact copy (slow but safe) if (UNLIKELY(d_ptr + ll > d_end || l_ptr + ll > l_end)) return ZXC_ERROR_OVERFLOW; ZXC_MEMCPY(d_ptr, l_ptr, ll); l_ptr += ll; d_ptr += ll; written += ll; if (UNLIKELY(offset > written || d_ptr + ml > d_end)) return ZXC_ERROR_BAD_OFFSET; const uint8_t* match_src = d_ptr - offset; if (offset < ml) { for (size_t i = 0; i < ml; i++) d_ptr[i] = match_src[i]; } else { ZXC_MEMCPY(d_ptr, match_src, ml); } d_ptr += ml; written += ml; } else { // Safe to process with wild copies DECODE_SEQ_SAFE(ll, ml, offset); } n_seq--; } // --- FAST Loop: After threshold, check large margin to avoid individual bounds checks --- while (n_seq >= 4 && d_ptr < d_end_fast && l_ptr < l_end_safe_4x) { uint32_t s1 = zxc_le32(seq_ptr); uint32_t s2 = zxc_le32(seq_ptr + 4); uint32_t s3 = zxc_le32(seq_ptr + 8); uint32_t s4 = zxc_le32(seq_ptr + 12); seq_ptr += 16; uint32_t ll1 = (uint32_t)(s1 >> 24); if (UNLIKELY(ll1 == ZXC_SEQ_LL_MASK)) { ll1 += zxc_read_varint(&extras_ptr, extras_end); if (UNLIKELY(l_ptr + ll1 > l_end || d_ptr + ll1 > d_end)) return ZXC_ERROR_OVERFLOW; } uint32_t m1b = (uint32_t)((s1 >> 16) & 0xFF); uint32_t ml1 = m1b + ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(m1b == ZXC_SEQ_ML_MASK)) { ml1 += zxc_read_varint(&extras_ptr, extras_end); if (UNLIKELY(d_ptr + ll1 + ml1 > d_end)) return ZXC_ERROR_OVERFLOW; } uint32_t off1 = (uint32_t)(s1 & 0xFFFF) + ZXC_LZ_OFFSET_BIAS; DECODE_SEQ_FAST(ll1, ml1, off1); uint32_t ll2 = (uint32_t)(s2 >> 24); if (UNLIKELY(ll2 == ZXC_SEQ_LL_MASK)) { ll2 += zxc_read_varint(&extras_ptr, extras_end); if (UNLIKELY(l_ptr + ll2 > l_end || d_ptr + ll2 > d_end)) return ZXC_ERROR_OVERFLOW; } uint32_t m2b = (uint32_t)((s2 >> 16) & 0xFF); uint32_t ml2 = m2b + ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(m2b == ZXC_SEQ_ML_MASK)) { ml2 += zxc_read_varint(&extras_ptr, extras_end); if (UNLIKELY(d_ptr + ll2 + ml2 > d_end)) return ZXC_ERROR_OVERFLOW; } uint32_t off2 = (uint32_t)(s2 & 0xFFFF) + ZXC_LZ_OFFSET_BIAS; DECODE_SEQ_FAST(ll2, ml2, off2); uint32_t ll3 = (uint32_t)(s3 >> 24); if (UNLIKELY(ll3 == ZXC_SEQ_LL_MASK)) { ll3 += zxc_read_varint(&extras_ptr, extras_end); if (UNLIKELY(l_ptr + ll3 > l_end || d_ptr + ll3 > d_end)) return ZXC_ERROR_OVERFLOW; } uint32_t m3b = (uint32_t)((s3 >> 16) & 0xFF); uint32_t ml3 = m3b + ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(m3b == ZXC_SEQ_ML_MASK)) { ml3 += zxc_read_varint(&extras_ptr, extras_end); if (UNLIKELY(d_ptr + ll3 + ml3 > d_end)) return ZXC_ERROR_OVERFLOW; } uint32_t off3 = (uint32_t)(s3 & 0xFFFF) + ZXC_LZ_OFFSET_BIAS; DECODE_SEQ_FAST(ll3, ml3, off3); uint32_t ll4 = (uint32_t)(s4 >> 24); if (UNLIKELY(ll4 == ZXC_SEQ_LL_MASK)) { ll4 += zxc_read_varint(&extras_ptr, extras_end); if (UNLIKELY(l_ptr + ll4 > l_end || d_ptr + ll4 > d_end)) return ZXC_ERROR_OVERFLOW; } uint32_t m4b = (uint32_t)((s4 >> 16) & 0xFF); uint32_t ml4 = m4b + ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(m4b == ZXC_SEQ_ML_MASK)) { ml4 += zxc_read_varint(&extras_ptr, extras_end); if (UNLIKELY(d_ptr + ll4 + ml4 > d_end)) return ZXC_ERROR_OVERFLOW; } uint32_t off4 = (uint32_t)(s4 & 0xFFFF) + ZXC_LZ_OFFSET_BIAS; DECODE_SEQ_FAST(ll4, ml4, off4); n_seq -= 4; } #undef DECODE_SEQ_SAFE #undef DECODE_SEQ_FAST // --- Remaining 1 sequence (Fast Path) --- while (n_seq > 0 && d_ptr < d_end_safe && l_ptr < l_end_safe_1x) { // Save state for fallback const uint8_t* seq_save = seq_ptr; const uint8_t* ext_save = extras_ptr; const uint32_t seq = zxc_le32(seq_ptr); seq_ptr += 4; uint32_t ll = (uint32_t)(seq >> 24); if (UNLIKELY(ll == ZXC_SEQ_LL_MASK)) { ll += zxc_read_varint(&extras_ptr, extras_end); if (UNLIKELY(l_ptr + ll > l_end)) { seq_ptr = seq_save; extras_ptr = ext_save; break; } } uint32_t m_bits = (uint32_t)((seq >> 16) & 0xFF); uint32_t ml = m_bits + ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(m_bits == ZXC_SEQ_ML_MASK)) ml += zxc_read_varint(&extras_ptr, extras_end); // Strict bounds checks (including wild copy overrun safety) if (UNLIKELY(d_ptr + ll + ml + ZXC_PAD_SIZE > d_end)) { // Restore state and break to Safe Path seq_ptr = seq_save; extras_ptr = ext_save; break; } uint32_t offset = (uint32_t)(seq & 0xFFFF) + ZXC_LZ_OFFSET_BIAS; { const uint8_t* src_lit = l_ptr; uint8_t* dst_lit = d_ptr; zxc_copy32(dst_lit, src_lit); if (UNLIKELY(ll > ZXC_PAD_SIZE)) { dst_lit += ZXC_PAD_SIZE; src_lit += ZXC_PAD_SIZE; size_t rem = ll - ZXC_PAD_SIZE; while (rem > ZXC_PAD_SIZE) { zxc_copy32(dst_lit, src_lit); dst_lit += ZXC_PAD_SIZE; src_lit += ZXC_PAD_SIZE; rem -= ZXC_PAD_SIZE; } zxc_copy32(dst_lit, src_lit); } l_ptr += ll; d_ptr += ll; written += ll; } { // Skip check if written >= bounds_threshold (256 for 8-bit, 65536 for 16-bit) if (UNLIKELY(written < bounds_threshold && offset > written)) return ZXC_ERROR_BAD_OFFSET; const uint8_t* match_src = d_ptr - offset; if (LIKELY(offset >= ZXC_PAD_SIZE)) { zxc_copy32(d_ptr, match_src); if (UNLIKELY(ml > ZXC_PAD_SIZE)) { uint8_t* out = d_ptr + ZXC_PAD_SIZE; const uint8_t* ref = match_src + ZXC_PAD_SIZE; size_t rem = ml - ZXC_PAD_SIZE; while (rem > ZXC_PAD_SIZE) { zxc_copy32(out, ref); out += ZXC_PAD_SIZE; ref += ZXC_PAD_SIZE; rem -= ZXC_PAD_SIZE; } zxc_copy32(out, ref); } d_ptr += ml; written += ml; } else if (offset == 1) { ZXC_MEMSET(d_ptr, match_src[0], ml); d_ptr += ml; written += ml; } else { for (size_t i = 0; i < ml; i++) d_ptr[i] = match_src[i]; d_ptr += ml; written += ml; } } n_seq--; } // --- Safe Path for Remaining Sequences --- while (n_seq > 0) { uint32_t seq = zxc_le32(seq_ptr); seq_ptr += 4; uint32_t ll = (uint32_t)(seq >> 24); if (UNLIKELY(ll == ZXC_SEQ_LL_MASK)) ll += zxc_read_varint(&extras_ptr, extras_end); uint32_t m_bits = (uint32_t)((seq >> 16) & 0xFF); uint32_t ml = m_bits + ZXC_LZ_MIN_MATCH_LEN; if (UNLIKELY(m_bits == ZXC_SEQ_ML_MASK)) ml += zxc_read_varint(&extras_ptr, extras_end); uint32_t offset = (uint32_t)(seq & 0xFFFF) + ZXC_LZ_OFFSET_BIAS; if (UNLIKELY(d_ptr + ll > d_end || l_ptr + ll > l_end)) return ZXC_ERROR_OVERFLOW; ZXC_MEMCPY(d_ptr, l_ptr, ll); l_ptr += ll; d_ptr += ll; const uint8_t* match_src = d_ptr - offset; if (UNLIKELY(match_src < dst || d_ptr + ml > d_end)) return ZXC_ERROR_BAD_OFFSET; if (offset < ml) { for (size_t i = 0; i < ml; i++) d_ptr[i] = match_src[i]; } else { ZXC_MEMCPY(d_ptr, match_src, ml); } d_ptr += ml; n_seq--; } // --- Trailing Literals --- // Copy remaining literals from source stream (literal exhaustion) if (UNLIKELY(l_ptr > l_end)) return ZXC_ERROR_CORRUPT_DATA; const size_t remaining_literals = (size_t)(l_end - l_ptr); if (remaining_literals > 0) { if (UNLIKELY(d_ptr + remaining_literals > d_end)) return ZXC_ERROR_OVERFLOW; ZXC_MEMCPY(d_ptr, l_ptr, remaining_literals); d_ptr += remaining_literals; } return (int)(d_ptr - dst); } // cppcheck-suppress unusedFunction int zxc_decompress_chunk_wrapper(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap) { if (UNLIKELY(src_sz < ZXC_BLOCK_HEADER_SIZE)) return ZXC_ERROR_SRC_TOO_SMALL; const uint8_t type = src[0]; const uint32_t comp_sz = zxc_le32(src + 3); const int has_crc = ctx->checksum_enabled; // Check bounds: Header + Body + Checksum(if any) const size_t expected_sz = (size_t)ZXC_BLOCK_HEADER_SIZE + comp_sz + (has_crc ? ZXC_BLOCK_CHECKSUM_SIZE : 0); if (UNLIKELY(src_sz < expected_sz)) return ZXC_ERROR_SRC_TOO_SMALL; const uint8_t* data = src + ZXC_BLOCK_HEADER_SIZE; if (has_crc) { const uint32_t stored = zxc_le32(data + comp_sz); const uint32_t calc = zxc_checksum(data, comp_sz, ZXC_CHECKSUM_RAPIDHASH); if (UNLIKELY(stored != calc)) return ZXC_ERROR_BAD_CHECKSUM; } int decoded_sz = ZXC_ERROR_BAD_BLOCK_TYPE; switch (type) { case ZXC_BLOCK_GLO: decoded_sz = zxc_decode_block_glo(ctx, data, comp_sz, dst, dst_cap); break; case ZXC_BLOCK_GHI: decoded_sz = zxc_decode_block_ghi(ctx, data, comp_sz, dst, dst_cap); break; case ZXC_BLOCK_RAW: // For RAW blocks, comp_sz == raw_sz (uncompressed data stored as-is) if (UNLIKELY(comp_sz > dst_cap)) return ZXC_ERROR_DST_TOO_SMALL; ZXC_MEMCPY(dst, data, comp_sz); decoded_sz = (int)comp_sz; break; case ZXC_BLOCK_NUM: decoded_sz = zxc_decode_block_num(data, comp_sz, dst, dst_cap); break; case ZXC_BLOCK_EOF: // EOF should be handled by the dispatcher, not here return ZXC_ERROR_CORRUPT_DATA; default: return ZXC_ERROR_BAD_BLOCK_TYPE; } return decoded_sz; } zxc-0.9.1/src/lib/zxc_dispatch.c000066400000000000000000000755471515574030100165340ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_dispatch.c * @brief Runtime CPU feature detection and SIMD dispatch layer. * * Detects AVX2/AVX512/NEON at runtime and routes compress/decompress calls * to the best available implementation via lazy-initialised function pointers. * Also contains the public one-shot buffer API (@ref zxc_compress, * @ref zxc_decompress, @ref zxc_get_decompressed_size). */ #include "../../include/zxc_error.h" #include "zxc_internal.h" #if defined(_MSC_VER) #include #endif #if defined(__linux__) && (defined(__arm__) || defined(_M_ARM)) #include #include #endif /* * ============================================================================ * PROTOTYPES FOR MULTI-VERSIONED VARIANTS * ============================================================================ * These are compiled in separate translation units with different flags. */ // Decompression Prototypes int zxc_decompress_chunk_wrapper_default(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); #ifndef ZXC_ONLY_DEFAULT #if defined(__x86_64__) || defined(_M_X64) int zxc_decompress_chunk_wrapper_avx2(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); int zxc_decompress_chunk_wrapper_avx512(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); #elif defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) int zxc_decompress_chunk_wrapper_neon(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); #endif #endif // Compression Prototypes int zxc_compress_chunk_wrapper_default(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); #if defined(__x86_64__) || defined(_M_X64) int zxc_compress_chunk_wrapper_avx2(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); int zxc_compress_chunk_wrapper_avx512(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); #elif defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) int zxc_compress_chunk_wrapper_neon(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); #endif /* * ============================================================================ * CPU DETECTION LOGIC * ============================================================================ */ /** * @enum zxc_cpu_feature_t * @brief Detected CPU SIMD capability level. */ typedef enum { ZXC_CPU_GENERIC = 0, /**< @brief Scalar-only fallback. */ ZXC_CPU_AVX2 = 1, /**< @brief x86-64 AVX2 available. */ ZXC_CPU_AVX512 = 2, /**< @brief x86-64 AVX-512F+BW available. */ ZXC_CPU_NEON = 3 /**< @brief ARM NEON available. */ } zxc_cpu_feature_t; /** * @brief Probes the running CPU for SIMD support. * * Uses CPUID on x86-64 (MSVC and GCC/Clang paths), `getauxval` on * 32-bit ARM Linux, and compile-time constants on AArch64. * * @return The highest @ref zxc_cpu_feature_t level supported. */ static zxc_cpu_feature_t zxc_detect_cpu_features(void) { #ifdef ZXC_ONLY_DEFAULT return ZXC_CPU_GENERIC; #else zxc_cpu_feature_t features = ZXC_CPU_GENERIC; #if defined(__x86_64__) || defined(_M_X64) #if defined(_MSC_VER) // MSVC detection using __cpuid // Function ID 1: EAX=1. ECX: Bit 28=AVX. // Function ID 7: EAX=7, ECX=0. EBX: Bit 5=AVX2, Bit 16=AVX512F, Bit 30=AVX512BW. int regs[4]; int avx = 0; int avx2 = 0; int avx512 = 0; __cpuid(regs, 1); if (regs[2] & (1 << 28)) avx = 1; if (avx) { __cpuidex(regs, 7, 0); if (regs[1] & (1 << 5)) avx2 = 1; if ((regs[1] & (1 << 16)) && (regs[1] & (1 << 30))) avx512 = 1; } if (avx512) { features = ZXC_CPU_AVX512; } else if (avx2) { features = ZXC_CPU_AVX2; } #else // GCC/Clang built-in detection __builtin_cpu_init(); if (__builtin_cpu_supports("avx512f") && __builtin_cpu_supports("avx512bw")) { features = ZXC_CPU_AVX512; } else if (__builtin_cpu_supports("avx2")) { features = ZXC_CPU_AVX2; } #endif #elif defined(__aarch64__) || defined(_M_ARM64) // ARM64 usually guarantees NEON features = ZXC_CPU_NEON; #elif defined(__arm__) || defined(_M_ARM) // ARM32 Runtime detection for Linux #if defined(__linux__) const unsigned long hwcaps = getauxval(AT_HWCAP); if (hwcaps & HWCAP_NEON) { features = ZXC_CPU_NEON; } #else // Fallback for non-Linux: rely on compiler flags. // If compiled with -mfpu=neon, we assume target supports it. // Otherwise, safe default is GENERIC. #if defined(__ARM_NEON) features = ZXC_CPU_NEON; #endif #endif #endif return features; #endif } /* * ============================================================================ * DISPATCHERS * ============================================================================ * We use a function pointer initialized on first use (lazy initialization). */ /** @brief Function pointer type for the chunk decompressor. */ typedef int (*zxc_decompress_func_t)(zxc_cctx_t* RESTRICT, const uint8_t* RESTRICT, const size_t, uint8_t* RESTRICT, const size_t); /** @brief Function pointer type for the chunk compressor. */ typedef int (*zxc_compress_func_t)(zxc_cctx_t* RESTRICT, const uint8_t* RESTRICT, const size_t, uint8_t* RESTRICT, const size_t); /** @brief Lazily-resolved pointer to the best decompression variant. */ static ZXC_ATOMIC zxc_decompress_func_t zxc_decompress_ptr = (zxc_decompress_func_t)0; /** @brief Lazily-resolved pointer to the best compression variant. */ static ZXC_ATOMIC zxc_compress_func_t zxc_compress_ptr = (zxc_compress_func_t)0; /** * @brief First-call initialiser for the decompression dispatcher. * * Detects CPU features, selects the best implementation, stores the * pointer atomically, then tail-calls into it. */ static int zxc_decompress_dispatch_init(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap) { const zxc_cpu_feature_t cpu = zxc_detect_cpu_features(); zxc_decompress_func_t zxc_decompress_ptr_local = NULL; #ifndef ZXC_ONLY_DEFAULT #if defined(__x86_64__) || defined(_M_X64) if (cpu == ZXC_CPU_AVX512) zxc_decompress_ptr_local = zxc_decompress_chunk_wrapper_avx512; else if (cpu == ZXC_CPU_AVX2) zxc_decompress_ptr_local = zxc_decompress_chunk_wrapper_avx2; else zxc_decompress_ptr_local = zxc_decompress_chunk_wrapper_default; #elif defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) // cppcheck-suppress knownConditionTrueFalse if (cpu == ZXC_CPU_NEON) zxc_decompress_ptr_local = zxc_decompress_chunk_wrapper_neon; else zxc_decompress_ptr_local = zxc_decompress_chunk_wrapper_default; #else (void)cpu; zxc_decompress_ptr_local = zxc_decompress_chunk_wrapper_default; #endif #else (void)cpu; zxc_decompress_ptr_local = zxc_decompress_chunk_wrapper_default; #endif #if ZXC_USE_C11_ATOMICS atomic_store_explicit(&zxc_decompress_ptr, zxc_decompress_ptr_local, memory_order_release); #else zxc_decompress_ptr = zxc_decompress_ptr_local; #endif return zxc_decompress_ptr_local(ctx, src, src_sz, dst, dst_cap); } /** * @brief First-call initialiser for the compression dispatcher. * * Detects CPU features, selects the best implementation, stores the * pointer atomically, then tail-calls into it. */ static int zxc_compress_dispatch_init(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap) { const zxc_cpu_feature_t cpu = zxc_detect_cpu_features(); zxc_compress_func_t zxc_compress_ptr_local = NULL; #ifndef ZXC_ONLY_DEFAULT #if defined(__x86_64__) || defined(_M_X64) if (cpu == ZXC_CPU_AVX512) zxc_compress_ptr_local = zxc_compress_chunk_wrapper_avx512; else if (cpu == ZXC_CPU_AVX2) zxc_compress_ptr_local = zxc_compress_chunk_wrapper_avx2; else zxc_compress_ptr_local = zxc_compress_chunk_wrapper_default; #elif defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) // cppcheck-suppress knownConditionTrueFalse if (cpu == ZXC_CPU_NEON) zxc_compress_ptr_local = zxc_compress_chunk_wrapper_neon; else zxc_compress_ptr_local = zxc_compress_chunk_wrapper_default; #else (void)cpu; zxc_compress_ptr_local = zxc_compress_chunk_wrapper_default; #endif #else (void)cpu; zxc_compress_ptr_local = zxc_compress_chunk_wrapper_default; #endif #if ZXC_USE_C11_ATOMICS atomic_store_explicit(&zxc_compress_ptr, zxc_compress_ptr_local, memory_order_release); #else zxc_compress_ptr = zxc_compress_ptr_local; #endif return zxc_compress_ptr_local(ctx, src, src_sz, dst, dst_cap); } /** * @brief Public decompression dispatcher (calls lazily-resolved implementation). * * @param[in,out] ctx Decompression context. * @param[in] src Compressed input chunk (header + payload + optional checksum). * @param[in] src_sz Size of @p src in bytes. * @param[out] dst Destination buffer for decompressed data. * @param[in] dst_cap Capacity of @p dst. * @return Decompressed size in bytes, or a negative @ref zxc_error_t code. */ int zxc_decompress_chunk_wrapper(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap) { #if ZXC_USE_C11_ATOMICS const zxc_decompress_func_t func = atomic_load_explicit(&zxc_decompress_ptr, memory_order_acquire); #else const zxc_decompress_func_t func = zxc_decompress_ptr; #endif if (UNLIKELY(!func)) return zxc_decompress_dispatch_init(ctx, src, src_sz, dst, dst_cap); return func(ctx, src, src_sz, dst, dst_cap); } /** * @brief Public compression dispatcher (calls lazily-resolved implementation). * * @param[in,out] ctx Compression context. * @param[in] src Uncompressed input chunk. * @param[in] src_sz Size of @p src in bytes. * @param[out] dst Destination buffer for compressed data. * @param[in] dst_cap Capacity of @p dst. * @return Compressed size in bytes, or a negative @ref zxc_error_t code. */ int zxc_compress_chunk_wrapper(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap) { #if ZXC_USE_C11_ATOMICS const zxc_compress_func_t func = atomic_load_explicit(&zxc_compress_ptr, memory_order_acquire); #else const zxc_compress_func_t func = zxc_compress_ptr; #endif if (UNLIKELY(!func)) return zxc_compress_dispatch_init(ctx, src, src_sz, dst, dst_cap); return func(ctx, src, src_sz, dst, dst_cap); } /* * ============================================================================ * PUBLIC UTILITY API * ============================================================================ * These wrapper functions provide a simplified interface by managing context * allocation and looping over blocks. They call the dispatched wrappers above. */ /** * @brief Compresses an entire buffer in one call. * * Manages context allocation internally, loops over blocks, writes the * file header / EOF block / footer, and accumulates the global checksum. * * @param[in] src Uncompressed input data. * @param[in] src_size Size of @p src in bytes. * @param[out] dst Destination buffer (use zxc_compress_bound() to size). * @param[in] dst_capacity Capacity of @p dst. * @param[in] level Compression level (1-5). * @param[in] checksum_enabled Non-zero to enable per-block and global checksums. * @return Total compressed size in bytes, or a negative @ref zxc_error_t code. */ // cppcheck-suppress unusedFunction int64_t zxc_compress(const void* RESTRICT src, const size_t src_size, void* RESTRICT dst, const size_t dst_capacity, const zxc_compress_opts_t* opts) { if (UNLIKELY(!src || !dst || src_size == 0 || dst_capacity == 0)) return ZXC_ERROR_NULL_INPUT; const int checksum_enabled = opts ? opts->checksum_enabled : 0; const int level = (opts && opts->level > 0) ? opts->level : ZXC_LEVEL_DEFAULT; const size_t block_size = (opts && opts->block_size > 0) ? opts->block_size : ZXC_BLOCK_SIZE_DEFAULT; if (UNLIKELY(!zxc_validate_block_size(block_size))) return ZXC_ERROR_BAD_BLOCK_SIZE; const uint8_t* ip = (const uint8_t*)src; uint8_t* op = (uint8_t*)dst; const uint8_t* op_start = op; const uint8_t* op_end = op + dst_capacity; uint32_t global_hash = 0; zxc_cctx_t ctx; if (UNLIKELY(zxc_cctx_init(&ctx, block_size, 1, level, checksum_enabled) != ZXC_OK)) return ZXC_ERROR_MEMORY; const int h_val = zxc_write_file_header(op, (size_t)(op_end - op), block_size, checksum_enabled); if (UNLIKELY(h_val < 0)) { zxc_cctx_free(&ctx); return h_val; } op += h_val; size_t pos = 0; while (pos < src_size) { const size_t chunk_len = (src_size - pos > block_size) ? block_size : (src_size - pos); const size_t rem_cap = (size_t)(op_end - op); const int res = zxc_compress_chunk_wrapper(&ctx, ip + pos, chunk_len, op, rem_cap); if (UNLIKELY(res < 0)) { zxc_cctx_free(&ctx); return res; } if (checksum_enabled) { // Update Global Hash (Rotation + XOR) // Block checksum is at the end of the written block data if (LIKELY(res >= ZXC_GLOBAL_CHECKSUM_SIZE)) { const uint32_t block_hash = zxc_le32(op + res - ZXC_GLOBAL_CHECKSUM_SIZE); global_hash = zxc_hash_combine_rotate(global_hash, block_hash); } } op += res; pos += chunk_len; } zxc_cctx_free(&ctx); // Write EOF Block (Checksum flag handled by Block Header, but we zero it out now) const size_t rem_cap = (size_t)(op_end - op); const zxc_block_header_t eof_bh = { .block_type = ZXC_BLOCK_EOF, .block_flags = 0, .reserved = 0, .comp_size = 0}; const int eof_val = zxc_write_block_header(op, rem_cap, &eof_bh); if (UNLIKELY(eof_val < 0)) return eof_val; op += eof_val; if (UNLIKELY(rem_cap < (size_t)eof_val + ZXC_FILE_FOOTER_SIZE)) return ZXC_ERROR_DST_TOO_SMALL; // Write 12-byte Footer: [Source Size (8)] + [Global Hash (4)] const int footer_val = zxc_write_file_footer(op, (size_t)(op_end - op), src_size, global_hash, checksum_enabled); if (UNLIKELY(footer_val < 0)) return footer_val; op += footer_val; return (int64_t)(op - op_start); } /** * @brief Decompresses an entire buffer in one call. * * Validates the file header and footer, loops over compressed blocks, * and verifies the global checksum when enabled. * * @param[in] src Compressed input data. * @param[in] src_size Size of @p src in bytes. * @param[out] dst Destination buffer for decompressed data. * @param[in] dst_capacity Capacity of @p dst. * @param[in] checksum_enabled Non-zero to verify per-block and global checksums. * @return Total decompressed size in bytes, or a negative @ref zxc_error_t code. */ // cppcheck-suppress unusedFunction int64_t zxc_decompress(const void* RESTRICT src, const size_t src_size, void* RESTRICT dst, const size_t dst_capacity, const zxc_decompress_opts_t* opts) { if (UNLIKELY(!src || !dst || src_size < ZXC_FILE_HEADER_SIZE)) return ZXC_ERROR_NULL_INPUT; const int checksum_enabled = opts ? opts->checksum_enabled : 0; const uint8_t* ip = (const uint8_t*)src; const uint8_t* ip_end = ip + src_size; uint8_t* op = (uint8_t*)dst; const uint8_t* op_start = op; const uint8_t* op_end = op + dst_capacity; size_t runtime_chunk_size = 0; zxc_cctx_t ctx; int file_has_checksums = 0; // File header verification and context initialization if (UNLIKELY(zxc_read_file_header(ip, src_size, &runtime_chunk_size, &file_has_checksums) != ZXC_OK || zxc_cctx_init(&ctx, runtime_chunk_size, 0, 0, file_has_checksums && checksum_enabled) != ZXC_OK)) { return ZXC_ERROR_BAD_HEADER; } ip += ZXC_FILE_HEADER_SIZE; // GLO/GHI wild copies (zxc_copy32) overshoot by up to ZXC_PAD_SIZE bytes. // Decode into a padded scratch buffer, then memcpy the exact result out. const size_t work_sz = runtime_chunk_size + ZXC_PAD_SIZE; if (ctx.work_buf_cap < work_sz) { free(ctx.work_buf); ctx.work_buf = (uint8_t*)malloc(work_sz); if (UNLIKELY(!ctx.work_buf)) { zxc_cctx_free(&ctx); return ZXC_ERROR_MEMORY; } ctx.work_buf_cap = work_sz; } // Block decompression loop uint32_t global_hash = 0; while (ip < ip_end) { const size_t rem_src = (size_t)(ip_end - ip); zxc_block_header_t bh; // Read the block header to determine the compressed size if (UNLIKELY(zxc_read_block_header(ip, rem_src, &bh) != ZXC_OK)) { zxc_cctx_free(&ctx); return ZXC_ERROR_BAD_HEADER; } // Handle EOF block separately (not a real chunk to decompress) if (UNLIKELY(bh.block_type == ZXC_BLOCK_EOF)) { // Validate we have the footer after the header if (UNLIKELY(rem_src < ZXC_BLOCK_HEADER_SIZE + ZXC_FILE_FOOTER_SIZE)) { zxc_cctx_free(&ctx); return ZXC_ERROR_SRC_TOO_SMALL; } const uint8_t* const footer = ip + ZXC_BLOCK_HEADER_SIZE; // Validate source size matches what we decompressed const uint64_t stored_size = zxc_le64(footer); if (UNLIKELY(stored_size != (uint64_t)(op - op_start))) { zxc_cctx_free(&ctx); return ZXC_ERROR_CORRUPT_DATA; } // Validate global checksum if enabled and file has checksums if (checksum_enabled && file_has_checksums) { const uint32_t stored_hash = zxc_le32(footer + sizeof(uint64_t)); if (UNLIKELY(stored_hash != global_hash)) { zxc_cctx_free(&ctx); return ZXC_ERROR_BAD_CHECKSUM; } } break; // EOF reached, exit loop } const size_t rem_cap = (size_t)(op_end - op); int res; if (LIKELY(rem_cap >= runtime_chunk_size + ZXC_PAD_SIZE)) { // Fast path: decode directly into dst (enough padding for wild copies). res = zxc_decompress_chunk_wrapper(&ctx, ip, rem_src, op, rem_cap); } else { // Safe path: decode into bounce buffer, then copy exact result. res = zxc_decompress_chunk_wrapper(&ctx, ip, rem_src, ctx.work_buf, runtime_chunk_size); if (LIKELY(res > 0)) { if (UNLIKELY((size_t)res > rem_cap)) { zxc_cctx_free(&ctx); return ZXC_ERROR_DST_TOO_SMALL; } ZXC_MEMCPY(op, ctx.work_buf, (size_t)res); } } if (UNLIKELY(res < 0)) { zxc_cctx_free(&ctx); return res; } // Update global hash from block checksum if (checksum_enabled && file_has_checksums) { const uint32_t block_hash = zxc_le32(ip + ZXC_BLOCK_HEADER_SIZE + bh.comp_size); global_hash = zxc_hash_combine_rotate(global_hash, block_hash); } ip += ZXC_BLOCK_HEADER_SIZE + bh.comp_size + (file_has_checksums ? ZXC_BLOCK_CHECKSUM_SIZE : 0); op += res; } zxc_cctx_free(&ctx); return (int64_t)(op - op_start); } /** * @brief Reads the decompressed size from a ZXC-compressed buffer. * * The size is stored in the file footer (last @ref ZXC_FILE_FOOTER_SIZE bytes). * * @param[in] src Compressed data. * @param[in] src_size Size of @p src in bytes. * @return Original uncompressed size, or 0 on error. */ uint64_t zxc_get_decompressed_size(const void* src, const size_t src_size) { if (UNLIKELY(src_size < ZXC_FILE_HEADER_SIZE + ZXC_FILE_FOOTER_SIZE)) return 0; const uint8_t* const p = (const uint8_t*)src; if (UNLIKELY(zxc_le32(p) != ZXC_MAGIC_WORD)) return 0; const uint8_t* const footer = p + src_size - ZXC_FILE_FOOTER_SIZE; return zxc_le64(footer); } /* * ============================================================================ * REUSABLE CONTEXT API (Opaque) * ============================================================================ * * Provides heap-allocated, opaque contexts that integrators can reuse across * multiple compress / decompress calls, eliminating per-call malloc/free * overhead. */ /* --- Compression --------------------------------------------------------- */ struct zxc_cctx_s { zxc_cctx_t inner; /* existing internal context */ int initialized; /* 1 if inner has live allocations */ size_t last_block_size; /* block size used for last init */ /* Sticky options (remembered from create or last compress call). */ int stored_level; int stored_checksum; size_t stored_block_size; }; zxc_cctx* zxc_create_cctx(const zxc_compress_opts_t* opts) { zxc_cctx* const cctx = (zxc_cctx*)calloc(1, sizeof(zxc_cctx)); if (UNLIKELY(!cctx)) return NULL; /* Resolve and store sticky defaults. */ cctx->stored_level = (opts && opts->level > 0) ? opts->level : ZXC_LEVEL_DEFAULT; cctx->stored_block_size = (opts && opts->block_size > 0) ? opts->block_size : ZXC_BLOCK_SIZE_DEFAULT; cctx->stored_checksum = opts ? opts->checksum_enabled : 0; if (opts) { if (UNLIKELY(!zxc_validate_block_size(cctx->stored_block_size) || zxc_cctx_init(&cctx->inner, cctx->stored_block_size, 1, cctx->stored_level, cctx->stored_checksum) != ZXC_OK)) { free(cctx); return NULL; } cctx->last_block_size = cctx->stored_block_size; cctx->initialized = 1; } return cctx; } void zxc_free_cctx(zxc_cctx* cctx) { if (!cctx) return; if (cctx->initialized) zxc_cctx_free(&cctx->inner); free(cctx); } int64_t zxc_compress_cctx(zxc_cctx* cctx, const void* RESTRICT src, const size_t src_size, void* RESTRICT dst, const size_t dst_capacity, const zxc_compress_opts_t* opts) { if (UNLIKELY(!cctx)) return ZXC_ERROR_NULL_INPUT; if (UNLIKELY(!src || !dst || src_size == 0 || dst_capacity == 0)) return ZXC_ERROR_NULL_INPUT; const int checksum_enabled = opts ? opts->checksum_enabled : cctx->stored_checksum; const int level = (opts && opts->level > 0) ? opts->level : cctx->stored_level; const size_t block_size = (opts && opts->block_size > 0) ? opts->block_size : cctx->stored_block_size; cctx->stored_level = level; cctx->stored_block_size = block_size; cctx->stored_checksum = checksum_enabled; if (UNLIKELY(!zxc_validate_block_size(block_size))) return ZXC_ERROR_BAD_BLOCK_SIZE; /* Re-init only when block_size changed (it drives buffer sizes). */ if (!cctx->initialized || cctx->last_block_size != block_size) { if (cctx->initialized) zxc_cctx_free(&cctx->inner); if (UNLIKELY(zxc_cctx_init(&cctx->inner, block_size, 1, level, checksum_enabled) != ZXC_OK)) return ZXC_ERROR_MEMORY; cctx->last_block_size = block_size; cctx->initialized = 1; } else { /* Same block_size: update level + checksum without realloc. */ cctx->inner.compression_level = level; cctx->inner.checksum_enabled = checksum_enabled; } zxc_cctx_t* const ctx = &cctx->inner; uint8_t* op = (uint8_t*)dst; const uint8_t* const op_start = op; const uint8_t* const op_end = op + dst_capacity; const uint8_t* const ip = (const uint8_t*)src; uint32_t global_hash = 0; const int h_val = zxc_write_file_header(op, (size_t)(op_end - op), block_size, checksum_enabled); if (UNLIKELY(h_val < 0)) return h_val; op += h_val; size_t pos = 0; while (pos < src_size) { const size_t chunk_len = (src_size - pos > block_size) ? block_size : (src_size - pos); const size_t rem_cap = (size_t)(op_end - op); const int res = zxc_compress_chunk_wrapper(ctx, ip + pos, chunk_len, op, rem_cap); if (UNLIKELY(res < 0)) return res; if (checksum_enabled) { if (LIKELY(res >= ZXC_GLOBAL_CHECKSUM_SIZE)) { const uint32_t block_hash = zxc_le32(op + res - ZXC_GLOBAL_CHECKSUM_SIZE); global_hash = zxc_hash_combine_rotate(global_hash, block_hash); } } op += res; pos += chunk_len; } /* EOF block */ const size_t rem_cap = (size_t)(op_end - op); const zxc_block_header_t eof_bh = { .block_type = ZXC_BLOCK_EOF, .block_flags = 0, .reserved = 0, .comp_size = 0}; const int eof_val = zxc_write_block_header(op, rem_cap, &eof_bh); if (UNLIKELY(eof_val < 0)) return eof_val; op += eof_val; if (UNLIKELY(rem_cap < (size_t)eof_val + ZXC_FILE_FOOTER_SIZE)) return ZXC_ERROR_DST_TOO_SMALL; const int footer_val = zxc_write_file_footer(op, (size_t)(op_end - op), src_size, global_hash, checksum_enabled); if (UNLIKELY(footer_val < 0)) return footer_val; op += footer_val; return (int64_t)(op - op_start); } /* --- Decompression ------------------------------------------------------- */ struct zxc_dctx_s { zxc_cctx_t inner; /* reuses the same internal context type */ size_t last_block_size; /* block size from last header parse */ int initialized; /* 1 if inner has live allocations */ }; zxc_dctx* zxc_create_dctx(void) { zxc_dctx* const dctx = (zxc_dctx*)calloc(1, sizeof(zxc_dctx)); return dctx; } void zxc_free_dctx(zxc_dctx* dctx) { if (!dctx) return; if (dctx->initialized) zxc_cctx_free(&dctx->inner); free(dctx); } int64_t zxc_decompress_dctx(zxc_dctx* dctx, const void* RESTRICT src, const size_t src_size, void* RESTRICT dst, const size_t dst_capacity, const zxc_decompress_opts_t* opts) { if (UNLIKELY(!dctx)) return ZXC_ERROR_NULL_INPUT; if (UNLIKELY(!src || !dst || src_size < ZXC_FILE_HEADER_SIZE)) return ZXC_ERROR_NULL_INPUT; const int checksum_enabled = opts ? opts->checksum_enabled : 0; const uint8_t* ip = (const uint8_t*)src; const uint8_t* const ip_end = ip + src_size; uint8_t* op = (uint8_t*)dst; const uint8_t* const op_start = op; const uint8_t* const op_end = op + dst_capacity; size_t runtime_chunk_size = 0; int file_has_checksums = 0; if (UNLIKELY(zxc_read_file_header(ip, src_size, &runtime_chunk_size, &file_has_checksums) != ZXC_OK)) return ZXC_ERROR_BAD_HEADER; /* Re-init only when block size changed. */ if (!dctx->initialized || dctx->last_block_size != runtime_chunk_size) { if (dctx->initialized) zxc_cctx_free(&dctx->inner); if (UNLIKELY(zxc_cctx_init(&dctx->inner, runtime_chunk_size, 0, 0, file_has_checksums && checksum_enabled) != ZXC_OK)) return ZXC_ERROR_MEMORY; dctx->last_block_size = runtime_chunk_size; dctx->initialized = 1; } else { dctx->inner.checksum_enabled = file_has_checksums && checksum_enabled; } zxc_cctx_t* const ctx = &dctx->inner; ip += ZXC_FILE_HEADER_SIZE; /* Ensure scratch buffer is large enough. */ const size_t work_sz = runtime_chunk_size + ZXC_PAD_SIZE; if (ctx->work_buf_cap < work_sz) { free(ctx->work_buf); ctx->work_buf = (uint8_t*)malloc(work_sz); if (UNLIKELY(!ctx->work_buf)) return ZXC_ERROR_MEMORY; ctx->work_buf_cap = work_sz; } uint32_t global_hash = 0; while (ip < ip_end) { const size_t rem_src = (size_t)(ip_end - ip); zxc_block_header_t bh; if (UNLIKELY(zxc_read_block_header(ip, rem_src, &bh) != ZXC_OK)) return ZXC_ERROR_BAD_HEADER; if (UNLIKELY(bh.block_type == ZXC_BLOCK_EOF)) { if (UNLIKELY(rem_src < ZXC_BLOCK_HEADER_SIZE + ZXC_FILE_FOOTER_SIZE)) return ZXC_ERROR_SRC_TOO_SMALL; const uint8_t* const footer = ip + ZXC_BLOCK_HEADER_SIZE; const uint64_t stored_size = zxc_le64(footer); if (UNLIKELY(stored_size != (uint64_t)(op - op_start))) return ZXC_ERROR_CORRUPT_DATA; if (checksum_enabled && file_has_checksums) { const uint32_t stored_hash = zxc_le32(footer + sizeof(uint64_t)); if (UNLIKELY(stored_hash != global_hash)) return ZXC_ERROR_BAD_CHECKSUM; } break; } const size_t rem_cap = (size_t)(op_end - op); int res; if (LIKELY(rem_cap >= runtime_chunk_size + ZXC_PAD_SIZE)) { // Fast path: decode directly into dst (enough padding for wild copies). res = zxc_decompress_chunk_wrapper(ctx, ip, rem_src, op, rem_cap); } else { // Safe path: decode into bounce buffer, then copy exact result. res = zxc_decompress_chunk_wrapper(ctx, ip, rem_src, ctx->work_buf, runtime_chunk_size); if (LIKELY(res > 0)) { if (UNLIKELY((size_t)res > rem_cap)) return ZXC_ERROR_DST_TOO_SMALL; ZXC_MEMCPY(op, ctx->work_buf, (size_t)res); } } if (UNLIKELY(res < 0)) return res; if (checksum_enabled && file_has_checksums) { const uint32_t block_hash = zxc_le32(ip + ZXC_BLOCK_HEADER_SIZE + bh.comp_size); global_hash = zxc_hash_combine_rotate(global_hash, block_hash); } ip += ZXC_BLOCK_HEADER_SIZE + bh.comp_size + (file_has_checksums ? ZXC_BLOCK_CHECKSUM_SIZE : 0); op += res; } return (int64_t)(op - op_start); } zxc-0.9.1/src/lib/zxc_driver.c000066400000000000000000000771311515574030100162170ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_driver.c * @brief Multi-threaded streaming compression / decompression engine. * * Implements a ring-buffer producer-worker-consumer architecture that * parallelises block processing over @c FILE* streams. Also provides * the public @ref zxc_stream_compress, @ref zxc_stream_decompress, and * extended variants with progress callbacks. */ #include #include #include #include "../../include/zxc_buffer.h" #include "../../include/zxc_error.h" #include "../../include/zxc_sans_io.h" #include "../../include/zxc_stream.h" #include "zxc_internal.h" /* * ============================================================================ * WINDOWS THREADING EMULATION * ============================================================================ * Maps POSIX pthread calls to Windows Native API (CriticalSection, * ConditionVariable, Threads). Allows the same threading logic to compile on * Linux/macOS and Windows. */ #if defined(_WIN32) #include #include #include #include // Map POSIX file positioning functions to Windows equivalents #define fseeko _fseeki64 #define ftello _ftelli64 // Simple sysconf emulation to get core count static int zxc_get_num_procs(void) { SYSTEM_INFO sysinfo; GetSystemInfo(&sysinfo); return sysinfo.dwNumberOfProcessors; } typedef CRITICAL_SECTION pthread_mutex_t; typedef CONDITION_VARIABLE pthread_cond_t; typedef HANDLE pthread_t; #define pthread_mutex_init(m, a) InitializeCriticalSection(m) #define pthread_mutex_destroy(m) DeleteCriticalSection(m) #define pthread_mutex_lock(m) EnterCriticalSection(m) #define pthread_mutex_unlock(m) LeaveCriticalSection(m) #define pthread_cond_init(c, a) InitializeConditionVariable(c) #define pthread_cond_destroy(c) (void)(0) #define pthread_cond_wait(c, m) SleepConditionVariableCS(c, m, INFINITE) #define pthread_cond_signal(c) WakeConditionVariable(c) #define pthread_cond_broadcast(c) WakeAllConditionVariable(c) typedef struct { void* (*func)(void*); void* arg; } zxc_win_thread_arg_t; static unsigned __stdcall zxc_win_thread_entry(void* p) { zxc_win_thread_arg_t* a = (zxc_win_thread_arg_t*)p; void* (*f)(void*) = a->func; void* arg = a->arg; free(a); f(arg); return 0; } static int pthread_create(pthread_t* thread, const void* attr, void* (*start_routine)(void*), void* arg) { (void)attr; zxc_win_thread_arg_t* wrapper = malloc(sizeof(zxc_win_thread_arg_t)); if (UNLIKELY(!wrapper)) return ZXC_ERROR_MEMORY; wrapper->func = start_routine; wrapper->arg = arg; uintptr_t handle = _beginthreadex(NULL, 0, zxc_win_thread_entry, wrapper, 0, NULL); if (UNLIKELY(handle == 0)) { free(wrapper); return ZXC_ERROR_MEMORY; } *thread = (HANDLE)handle; return 0; } static int pthread_join(pthread_t thread, void** retval) { (void)retval; WaitForSingleObject(thread, INFINITE); CloseHandle(thread); return 0; } #define sysconf(x) zxc_get_num_procs() #define _SC_NPROCESSORS_ONLN 0 #else #include #include #endif /* * ============================================================================ * STREAMING ENGINE (Producer / Worker / Consumer) * ============================================================================ * Implements a Ring Buffer architecture to parallelize block processing. */ /** * @enum job_status_t * @brief Represents the lifecycle states of a processing job within the ring * buffer. * * @var JOB_STATUS_FREE * The job slot is empty and available to be filled with new data by the * writer. * @var JOB_STATUS_FILLED * The job slot has been populated with input data and is ready for * processing by a worker. * @var JOB_STATUS_PROCESSED * The worker has finished processing the data; the result is ready to be * consumed/written out. */ typedef enum { JOB_STATUS_FREE, JOB_STATUS_FILLED, JOB_STATUS_PROCESSED } job_status_t; /** * @struct zxc_stream_job_t * @brief Represents a single unit of work (a chunk of data) to be processed. * * This structure holds the input and output buffers for a specific chunk of * data, along with its processing status. It is padded to align with cache * lines to prevent false sharing in a multi-threaded environment. * * @var zxc_stream_job_t::in_buf * Pointer to the buffer containing raw input data. * @var zxc_stream_job_t::in_cap * The total allocated capacity of the input buffer. * @var zxc_stream_job_t::in_sz * The actual size of the valid data currently in the input buffer. * @var zxc_stream_job_t::out_buf * Pointer to the buffer where processed (compressed/decompressed) data is * stored. * @var zxc_stream_job_t::out_cap * The total allocated capacity of the output buffer. * @var zxc_stream_job_t::result_sz * The actual size of the valid data produced in the output buffer. * @var zxc_stream_job_t::job_id * A unique identifier for the job, often used for ordering or debugging. * @var zxc_stream_job_t::status * The current state of this job (Free, Filled, or Processed). * @var zxc_stream_job_t::pad * Padding bytes to ensure the structure size aligns with typical cache * lines (64 bytes), minimizing cache contention between threads accessing * adjacent jobs. */ typedef struct { uint8_t* in_buf; size_t in_cap, in_sz; uint8_t* out_buf; size_t out_cap, result_sz; int job_id; ZXC_ATOMIC job_status_t status; // Atomic for lock-free status updates char pad[ZXC_CACHE_LINE_SIZE]; // Prevent False Sharing } zxc_stream_job_t; /** * @typedef zxc_chunk_processor_t * @brief Function pointer type for processing a chunk of data. * * This type defines the signature for internal functions responsible for * processing (compressing or transforming) a specific chunk of input data. * * @param ctx Pointer to the compression context containing state and * configuration. * @param in Pointer to the input data buffer. * @param in_sz Size of the input data in bytes. * @param out Pointer to the output buffer where processed data will be * written. * @param out_cap Capacity of the output buffer in bytes. * * @return The number of bytes written to the output buffer on success, or a * negative error code on failure. */ typedef int (*zxc_chunk_processor_t)(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT in, const size_t in_sz, uint8_t* RESTRICT out, const size_t out_cap); /** * @struct zxc_stream_ctx_t * @brief The main context structure managing the streaming * compression/decompression state. * * This structure orchestrates the producer-consumer workflow. It manages the * ring buffer of jobs, the worker queue, synchronization primitives (mutexes * and condition variables), and configuration settings for the compression * algorithm. * * @var zxc_stream_ctx_t::jobs * Array of job structures acting as the ring buffer. * @var zxc_stream_ctx_t::ring_size * The total number of slots in the jobs array. * @var zxc_stream_ctx_t::worker_queue * A circular queue containing indices of jobs ready to be picked up by * worker threads. * @var zxc_stream_ctx_t::wq_head * Index of the head of the worker queue (where workers take jobs). * @var zxc_stream_ctx_t::wq_tail * Index of the tail of the worker queue (where the writer adds jobs). * @var zxc_stream_ctx_t::wq_count * Current number of items in the worker queue. * @var zxc_stream_ctx_t::lock * Mutex used to protect access to shared resources (queue indices, status * changes). * @var zxc_stream_ctx_t::cond_reader * Condition variable to signal the output thread (reader) that processed * data is available. * @var zxc_stream_ctx_t::cond_worker * Condition variable to signal worker threads that new work is available. * @var zxc_stream_ctx_t::cond_writer * Condition variable to signal the input thread (writer) that job slots * are free. * @var zxc_stream_ctx_t::shutdown_workers * Flag indicating that worker threads should terminate. * @var zxc_stream_ctx_t::compression_mode * Indicates the operation mode (e.g., compression or decompression). * @var zxc_stream_ctx_t::io_error * Atomic flag to signal if an I/O error occurred during processing. * @var zxc_stream_ctx_t::processor * Function pointer or object responsible for the actual chunk processing * logic. * @var zxc_stream_ctx_t::write_idx * The index of the next job slot to be written to by the main thread. * @var zxc_stream_ctx_t::compression_level * The configured level of compression (trading off speed vs. ratio). * @var zxc_stream_ctx_t::chunk_size * The size of each data chunk to be processed. * @var zxc_stream_ctx_t::checksum_enabled * Flag indicating whether checksum verification/generation is active. * @var zxc_stream_ctx_t::file_has_checksum * Flag indicating whether the input file includes checksums. * @var zxc_stream_ctx_t::progress_cb * Optional callback function for reporting progress during processing. * @var zxc_stream_ctx_t::progress_user_data * User data pointer to be passed to the progress callback function. * @var zxc_stream_ctx_t::total_input_bytes * Total size of the input data in bytes, used for progress tracking. */ typedef struct { zxc_stream_job_t* jobs; size_t ring_size; int* worker_queue; int wq_head, wq_tail, wq_count; pthread_mutex_t lock; pthread_cond_t cond_reader, cond_worker, cond_writer; int shutdown_workers; int compression_mode; ZXC_ATOMIC int io_error; zxc_chunk_processor_t processor; int write_idx; int compression_level; size_t chunk_size; int checksum_enabled; int file_has_checksum; zxc_progress_callback_t progress_cb; void* progress_user_data; uint64_t total_input_bytes; } zxc_stream_ctx_t; /** * @struct writer_args_t * @brief Structure containing arguments for the writer callback function. * * This structure is used to pass necessary context and state information * to the function responsible for writing compressed or decompressed data * to a file stream. * * @var writer_args_t::ctx * Pointer to the ZXC stream context, holding the state of the * compression/decompression stream. * * @var writer_args_t::f * Pointer to the output file stream where data will be written. * * @var writer_args_t::total_bytes * Accumulator for the total number of bytes written to the file so far. * * @var writer_args_t::global_hash * The global hash accumulated during processing. * * @var writer_args_t::bytes_processed * The number of bytes processed so far, used for progress reporting. */ typedef struct { zxc_stream_ctx_t* ctx; FILE* f; int64_t total_bytes; uint32_t global_hash; uint64_t bytes_processed; // For progress callback } writer_args_t; /** * @brief Worker thread function for parallel stream processing. * * This function serves as the entry point for worker threads in the ZXC * streaming compression/decompression context. It continuously retrieves jobs * from a shared work queue, processes them using a thread-local compression * context (`zxc_cctx_t`), and signals the writer thread upon completion. * * **Worker Lifecycle & Synchronization:** * 1. **Initialization:** Allocates a thread-local `zxc_cctx_t` to avoid lock * contention during compression/decompression. * 2. **Wait Loop:** Uses `pthread_cond_wait` on `cond_worker` to sleep until a * job is available in the `worker_queue`. * 3. **Job Retrieval:** Dequeues a job ID from the ring buffer. The * `worker_queue` acts as a load balancer. * 4. **Processing:** Calls `ctx->processor` (the compression/decompression * function) on the job's data. This is the CPU-intensive part and runs in * parallel. * 5. **Completion:** Updates `job->status` to `JOB_STATUS_PROCESSED`. * 6. **Signaling:** If the processed job is the *next* one expected by the * writer * (`jid == ctx->write_idx`), it signals `cond_writer`. This optimization * prevents unnecessary wake-ups of the writer thread for out-of-order * completions. * * @param[in] arg A pointer to the shared stream context (`zxc_stream_ctx_t`). * @return Always returns NULL. */ static void* zxc_stream_worker(void* arg) { zxc_stream_ctx_t* const ctx = (zxc_stream_ctx_t*)arg; zxc_cctx_t cctx; const int unified_chk = (ctx->compression_mode == 1) ? ctx->checksum_enabled : (ctx->file_has_checksum && ctx->checksum_enabled); if (zxc_cctx_init(&cctx, ctx->chunk_size, ctx->compression_mode, ctx->compression_level, unified_chk) != ZXC_OK) { zxc_cctx_free(&cctx); return NULL; } cctx.compression_level = ctx->compression_level; while (1) { zxc_stream_job_t* job = NULL; pthread_mutex_lock(&ctx->lock); while (ctx->wq_count == 0 && !ctx->shutdown_workers) { pthread_cond_wait(&ctx->cond_worker, &ctx->lock); } if (ctx->shutdown_workers && ctx->wq_count == 0) { pthread_mutex_unlock(&ctx->lock); break; } const int jid = ctx->worker_queue[ctx->wq_tail]; ctx->wq_tail = (ctx->wq_tail + 1) % ctx->ring_size; ctx->wq_count--; job = &ctx->jobs[jid]; pthread_mutex_unlock(&ctx->lock); const int res = ctx->processor(&cctx, job->in_buf, job->in_sz, job->out_buf, job->out_cap); job->result_sz = UNLIKELY(res < 0) ? 0 : (size_t)res; job->status = JOB_STATUS_PROCESSED; pthread_mutex_lock(&ctx->lock); if (UNLIKELY(res < 0)) { ctx->io_error = 1; pthread_cond_broadcast(&ctx->cond_writer); pthread_cond_broadcast(&ctx->cond_reader); } else if (jid == ctx->write_idx) { pthread_cond_signal(&ctx->cond_writer); } pthread_mutex_unlock(&ctx->lock); } zxc_cctx_free(&cctx); return NULL; } /** * @brief Asynchronous writer thread function. * * This function runs as a separate thread responsible for writing processed * data chunks to the output file. It operates on a ring buffer of jobs shared * with the reader and worker threads. * * **Ordering Enforcement:** * The writer MUST write blocks in the exact order they were read. Even if * worker threads finish jobs out of order (e.g., job 2 finishes before job 1), * the writer waits for `ctx->write_idx` (job 1) to be `JOB_STATUS_PROCESSED`. * * **Workflow:** * 1. **Wait:** Sleeps on `cond_writer` until the job at `ctx->write_idx` is * ready. * 2. **Write:** Writes the `out_buf` to the file. * 3. **Release:** Sets the job status to `JOB_STATUS_FREE` and signals * `cond_reader`, allowing the main thread to reuse this slot for new input. * 4. **Advance:** Increments `ctx->write_idx` to wait for the next sequential * block. * * @param[in] arg Pointer to a `writer_args_t` structure containing the stream * context, the output file handle, and a counter for total bytes written. * @return Always returns NULL. */ static void* zxc_async_writer(void* arg) { writer_args_t* const args = (writer_args_t*)arg; zxc_stream_ctx_t* const ctx = args->ctx; while (1) { zxc_stream_job_t* const job = &ctx->jobs[ctx->write_idx]; pthread_mutex_lock(&ctx->lock); while (job->status != JOB_STATUS_PROCESSED) pthread_cond_wait(&ctx->cond_writer, &ctx->lock); if (job->result_sz == (size_t)-1) { pthread_mutex_unlock(&ctx->lock); break; } pthread_mutex_unlock(&ctx->lock); if (args->f && job->result_sz > 0) { if (fwrite(job->out_buf, 1, job->result_sz, args->f) != job->result_sz) { ctx->io_error = 1; } else if (ctx->checksum_enabled && ctx->compression_mode == 1) { // Update Global Hash (Rotation + XOR) if (LIKELY(job->result_sz >= ZXC_GLOBAL_CHECKSUM_SIZE)) { uint32_t block_hash = zxc_le32(job->out_buf + job->result_sz - ZXC_GLOBAL_CHECKSUM_SIZE); args->global_hash = zxc_hash_combine_rotate(args->global_hash, block_hash); } } } if (UNLIKELY(ctx->io_error)) { pthread_mutex_lock(&ctx->lock); job->status = JOB_STATUS_FREE; pthread_cond_signal(&ctx->cond_reader); pthread_mutex_unlock(&ctx->lock); break; } args->total_bytes += (int64_t)job->result_sz; // Update progress callback if (ctx->progress_cb) { args->bytes_processed += ctx->compression_mode == 1 ? job->in_sz : job->result_sz; ctx->progress_cb(args->bytes_processed, ctx->total_input_bytes, ctx->progress_user_data); } pthread_mutex_lock(&ctx->lock); job->status = JOB_STATUS_FREE; ctx->write_idx = (ctx->write_idx + 1) % ctx->ring_size; pthread_cond_signal(&ctx->cond_reader); pthread_mutex_unlock(&ctx->lock); } return NULL; } /** * @brief Orchestrates the multithreaded streaming compression or decompression * engine. * * This function initializes the stream context, allocates the necessary ring * buffer memory for jobs and I/O buffers, and spawns the worker threads and the * asynchronous writer thread. It acts as the main "producer" (reader) loop. * * **Architecture: Producer-Consumer with Ring Buffer** * - **Ring Buffer:** A fixed-size array of `zxc_stream_job_t` structures. * - **Producer (Main Thread):** Reads chunks from `f_in` and fills "Free" slots * in the ring buffer. It blocks if no slots are free (backpressure). * - **Workers:** Pick up "Filled" jobs from a queue, process them, and mark * them as "Processed". * - **Consumer (Writer Thread):** Waits for the *next sequential* job to be * "Processed", writes it to `f_out`, and marks the slot as "Free". * * **Double-Buffering & Zero-Copy:** * We allocate `alloc_in` and `alloc_out` buffers for each job. The reader reads * directly into `in_buf`, and the writer writes directly from `out_buf`, * minimizing memory copies. * * @param[in] f_in Pointer to the input file stream (source). * @param[out] f_out Pointer to the output file stream (destination). * @param[in] n_threads Number of worker threads to spawn. If set to 0 or less, the * function automatically detects the number of online processors. * @param[in] mode Operation mode: 1 for compression, 0 for decompression. * @param[in] level Compression level to be applied (relevant for compression * mode). * @param[in] checksum_enabled Flag indicating whether to enable checksum * generation/verification. * @param[in] func Function pointer to the chunk processor (compression or * decompression logic). * * @return The total number of bytes written to the output stream on success, or * -1 if an initialization or I/O error occurred. */ static int64_t zxc_stream_engine_run(FILE* f_in, FILE* f_out, const int n_threads, const int mode, const int level, const size_t block_size, const int checksum_enabled, zxc_chunk_processor_t func, zxc_progress_callback_t progress_cb, void* user_data) { zxc_stream_ctx_t ctx; ZXC_MEMSET(&ctx, 0, sizeof(ctx)); size_t runtime_chunk_sz = (block_size > 0) ? block_size : ZXC_BLOCK_SIZE_DEFAULT; int file_has_chk = 0; // Try to get input file size for progress tracking (compression mode only) // For decompression, the CLI precomputes the size and passes it via user_data uint64_t total_file_size = 0; if (mode == 1 && progress_cb) { const long long saved_pos = ftello(f_in); if (saved_pos >= 0) { if (fseeko(f_in, 0, SEEK_END) == 0) { const long long size = ftello(f_in); if (size > 0) total_file_size = (uint64_t)size; fseeko(f_in, saved_pos, SEEK_SET); } } } if (mode == 0) { // Decompression Mode: Read and validate file header uint8_t h[ZXC_FILE_HEADER_SIZE]; if (UNLIKELY(fread(h, 1, ZXC_FILE_HEADER_SIZE, f_in) != ZXC_FILE_HEADER_SIZE || zxc_read_file_header(h, ZXC_FILE_HEADER_SIZE, &runtime_chunk_sz, &file_has_chk) != ZXC_OK)) return ZXC_ERROR_BAD_HEADER; } int num_threads = (n_threads > 0) ? n_threads : (int)sysconf(_SC_NPROCESSORS_ONLN); if (num_threads > ZXC_MAX_THREADS) num_threads = ZXC_MAX_THREADS; // Reserve 1 thread for Writer/Reader overhead if possible const int num_workers = (num_threads > 1) ? num_threads - 1 : 1; ctx.compression_mode = mode; ctx.processor = func; ctx.io_error = 0; ctx.compression_level = level; ctx.ring_size = (size_t)num_workers * 4U; ctx.chunk_size = runtime_chunk_sz; ctx.checksum_enabled = checksum_enabled; ctx.file_has_checksum = mode == 1 ? checksum_enabled : file_has_chk; ctx.progress_cb = progress_cb; ctx.progress_user_data = user_data; ctx.total_input_bytes = total_file_size; uint32_t d_global_hash = 0; const uint64_t max_out = zxc_compress_bound(runtime_chunk_sz); const size_t raw_alloc_in = ((mode) ? runtime_chunk_sz : max_out) + ZXC_PAD_SIZE; const size_t alloc_in = (raw_alloc_in + ZXC_ALIGNMENT_MASK) & ~ZXC_ALIGNMENT_MASK; const size_t raw_alloc_out = ((mode) ? max_out : runtime_chunk_sz) + ZXC_PAD_SIZE; const size_t alloc_out = (raw_alloc_out + ZXC_ALIGNMENT_MASK) & ~ZXC_ALIGNMENT_MASK; size_t alloc_size = ctx.ring_size * (sizeof(zxc_stream_job_t) + sizeof(int) + alloc_in + alloc_out); uint8_t* const mem_block = zxc_aligned_malloc(alloc_size, ZXC_CACHE_LINE_SIZE); if (UNLIKELY(!mem_block)) return ZXC_ERROR_MEMORY; uint8_t* ptr = mem_block; ctx.jobs = (zxc_stream_job_t*)ptr; ptr += ctx.ring_size * sizeof(zxc_stream_job_t); ctx.worker_queue = (int*)ptr; ptr += ctx.ring_size * sizeof(int); uint8_t* buf_in = ptr; ptr += ctx.ring_size * alloc_in; uint8_t* buf_out = ptr; ZXC_MEMSET(buf_in, 0, ctx.ring_size * alloc_in); for (size_t i = 0; i < ctx.ring_size; i++) { ctx.jobs[i].job_id = i; ctx.jobs[i].status = JOB_STATUS_FREE; ctx.jobs[i].in_buf = buf_in + (i * alloc_in); ctx.jobs[i].in_cap = alloc_in - ZXC_PAD_SIZE; ctx.jobs[i].in_sz = 0; ctx.jobs[i].out_buf = buf_out + (i * alloc_out); ctx.jobs[i].out_cap = alloc_out - ZXC_PAD_SIZE; ctx.jobs[i].result_sz = 0; } pthread_mutex_init(&ctx.lock, NULL); pthread_cond_init(&ctx.cond_reader, NULL); pthread_cond_init(&ctx.cond_worker, NULL); pthread_cond_init(&ctx.cond_writer, NULL); pthread_t* const workers = malloc((size_t)num_workers * sizeof(pthread_t)); if (UNLIKELY(!workers)) { zxc_aligned_free(mem_block); return ZXC_ERROR_MEMORY; } for (int i = 0; i < num_workers; i++) pthread_create(&workers[i], NULL, zxc_stream_worker, &ctx); writer_args_t w_args = {&ctx, f_out, 0, 0, 0}; if (mode == 1 && f_out) { uint8_t h[ZXC_FILE_HEADER_SIZE]; zxc_write_file_header(h, ZXC_FILE_HEADER_SIZE, runtime_chunk_sz, checksum_enabled); if (UNLIKELY(fwrite(h, 1, ZXC_FILE_HEADER_SIZE, f_out) != ZXC_FILE_HEADER_SIZE)) ctx.io_error = 1; w_args.total_bytes = ZXC_FILE_HEADER_SIZE; } pthread_t writer_th; pthread_create(&writer_th, NULL, zxc_async_writer, &w_args); int read_idx = 0; int read_eof = 0; uint64_t total_src_bytes = 0; // Reader Loop: Reads from file, prepares jobs, pushes to worker queue. while (!read_eof && !ctx.io_error) { zxc_stream_job_t* const job = &ctx.jobs[read_idx]; pthread_mutex_lock(&ctx.lock); while (job->status != JOB_STATUS_FREE) pthread_cond_wait(&ctx.cond_reader, &ctx.lock); pthread_mutex_unlock(&ctx.lock); if (UNLIKELY(ctx.io_error)) break; size_t read_sz = 0; if (mode == 1) { read_sz = fread(job->in_buf, 1, runtime_chunk_sz, f_in); total_src_bytes += read_sz; if (UNLIKELY(read_sz == 0)) read_eof = 1; } else { uint8_t bh_buf[ZXC_BLOCK_HEADER_SIZE]; size_t h_read = fread(bh_buf, 1, ZXC_BLOCK_HEADER_SIZE, f_in); if (UNLIKELY(h_read < ZXC_BLOCK_HEADER_SIZE)) { read_eof = 1; } else { zxc_block_header_t bh; if (UNLIKELY(zxc_read_block_header(bh_buf, ZXC_BLOCK_HEADER_SIZE, &bh) != ZXC_OK)) { read_eof = 1; goto _job_prepared; } if (bh.block_type == ZXC_BLOCK_EOF) { if (UNLIKELY(bh.comp_size != 0)) { ctx.io_error = 1; goto _job_prepared; } read_eof = 1; read_sz = 0; goto _job_prepared; } const int has_crc = ctx.file_has_checksum; const size_t checksum_sz = (has_crc ? ZXC_BLOCK_CHECKSUM_SIZE : 0); const size_t body_total = bh.comp_size + checksum_sz; const size_t total_len = ZXC_BLOCK_HEADER_SIZE + body_total; if (UNLIKELY(total_len > job->in_cap)) { ctx.io_error = 1; break; } ZXC_MEMCPY(job->in_buf, bh_buf, ZXC_BLOCK_HEADER_SIZE); // Single fread for body + checksum (reduces syscalls) const size_t body_read = fread(job->in_buf + ZXC_BLOCK_HEADER_SIZE, 1, body_total, f_in); if (UNLIKELY(body_read != body_total)) { read_eof = 1; } else if (has_crc) { // Update Global Hash for Decompression const uint32_t b_crc = zxc_le32(job->in_buf + ZXC_BLOCK_HEADER_SIZE + bh.comp_size); d_global_hash = zxc_hash_combine_rotate(d_global_hash, b_crc); } read_sz = ZXC_BLOCK_HEADER_SIZE + body_read; } } _job_prepared: if (UNLIKELY(read_eof && read_sz == 0)) break; job->in_sz = read_sz; pthread_mutex_lock(&ctx.lock); job->status = JOB_STATUS_FILLED; ctx.worker_queue[ctx.wq_head] = read_idx; ctx.wq_head = (ctx.wq_head + 1) % ctx.ring_size; ctx.wq_count++; read_idx = (read_idx + 1) % ctx.ring_size; pthread_cond_signal(&ctx.cond_worker); pthread_mutex_unlock(&ctx.lock); if (UNLIKELY(read_sz < runtime_chunk_sz && mode == 1)) read_eof = 1; } zxc_stream_job_t* const end_job = &ctx.jobs[read_idx]; pthread_mutex_lock(&ctx.lock); while (end_job->status != JOB_STATUS_FREE) pthread_cond_wait(&ctx.cond_reader, &ctx.lock); end_job->result_sz = -1; end_job->status = JOB_STATUS_PROCESSED; pthread_cond_broadcast(&ctx.cond_writer); pthread_mutex_unlock(&ctx.lock); pthread_join(writer_th, NULL); pthread_mutex_lock(&ctx.lock); ctx.shutdown_workers = 1; pthread_cond_broadcast(&ctx.cond_worker); pthread_mutex_unlock(&ctx.lock); for (int i = 0; i < num_workers; i++) pthread_join(workers[i], NULL); // Write EOF Block if compression and no error if (mode == 1 && !ctx.io_error && w_args.total_bytes >= 0) { uint8_t final_buf[ZXC_BLOCK_HEADER_SIZE + ZXC_FILE_FOOTER_SIZE]; uint8_t* const eof_buf = final_buf; uint8_t* const footer = final_buf + ZXC_BLOCK_HEADER_SIZE; zxc_block_header_t eof_bh = { .block_type = ZXC_BLOCK_EOF, .block_flags = 0, .reserved = 0, .comp_size = 0}; zxc_write_block_header(eof_buf, ZXC_BLOCK_HEADER_SIZE, &eof_bh); zxc_write_file_footer(footer, ZXC_FILE_FOOTER_SIZE, total_src_bytes, w_args.global_hash, checksum_enabled); if (UNLIKELY(f_out && fwrite(final_buf, 1, sizeof(final_buf), f_out) != sizeof(final_buf))) return ZXC_ERROR_IO; w_args.total_bytes += sizeof(final_buf); } else if (mode == 0 && !ctx.io_error) { // Verification: Expect 12-byte footer uint8_t footer[ZXC_FILE_FOOTER_SIZE]; if (UNLIKELY(fread(footer, 1, ZXC_FILE_FOOTER_SIZE, f_in) != ZXC_FILE_FOOTER_SIZE)) { ctx.io_error = 1; } else { // Verify Footer Content: Source Size and Global Checksum int valid = (zxc_le64(footer) == (uint64_t)w_args.total_bytes); if (valid && checksum_enabled && ctx.file_has_checksum) valid = (zxc_le32(footer + sizeof(uint64_t)) == d_global_hash); if (UNLIKELY(!valid)) ctx.io_error = 1; } } free(workers); zxc_aligned_free(mem_block); if (UNLIKELY(ctx.io_error)) return ZXC_ERROR_IO; return w_args.total_bytes; } int64_t zxc_stream_compress(FILE* f_in, FILE* f_out, const zxc_compress_opts_t* opts) { if (UNLIKELY(!f_in)) return ZXC_ERROR_NULL_INPUT; const int n_threads = opts ? opts->n_threads : 0; const int checksum_enabled = opts ? opts->checksum_enabled : 0; const int level = (opts && opts->level > 0) ? opts->level : ZXC_LEVEL_DEFAULT; const size_t block_size = (opts && opts->block_size > 0) ? opts->block_size : ZXC_BLOCK_SIZE_DEFAULT; zxc_progress_callback_t cb = opts ? opts->progress_cb : NULL; void* ud = opts ? opts->user_data : NULL; if (UNLIKELY(!zxc_validate_block_size(block_size))) return ZXC_ERROR_BAD_BLOCK_SIZE; return zxc_stream_engine_run(f_in, f_out, n_threads, 1, level, block_size, checksum_enabled, zxc_compress_chunk_wrapper, cb, ud); } int64_t zxc_stream_decompress(FILE* f_in, FILE* f_out, const zxc_decompress_opts_t* opts) { if (UNLIKELY(!f_in)) return ZXC_ERROR_NULL_INPUT; const int n_threads = opts ? opts->n_threads : 0; const int checksum_enabled = opts ? opts->checksum_enabled : 0; zxc_progress_callback_t cb = opts ? opts->progress_cb : NULL; void* ud = opts ? opts->user_data : NULL; return zxc_stream_engine_run(f_in, f_out, n_threads, 0, 0, 0, checksum_enabled, (zxc_chunk_processor_t)zxc_decompress_chunk_wrapper, cb, ud); } int64_t zxc_stream_get_decompressed_size(FILE* f_in) { if (UNLIKELY(!f_in)) return ZXC_ERROR_NULL_INPUT; const long long saved_pos = ftello(f_in); if (UNLIKELY(saved_pos < 0)) return ZXC_ERROR_IO; // Get file size if (fseeko(f_in, 0, SEEK_END) != 0) return ZXC_ERROR_IO; const long long file_size = ftello(f_in); if (UNLIKELY(file_size < (long long)(ZXC_FILE_HEADER_SIZE + ZXC_FILE_FOOTER_SIZE))) { fseeko(f_in, saved_pos, SEEK_SET); return ZXC_ERROR_SRC_TOO_SMALL; } uint8_t header[ZXC_FILE_HEADER_SIZE]; if (UNLIKELY(fseeko(f_in, 0, SEEK_SET) != 0 || fread(header, 1, ZXC_FILE_HEADER_SIZE, f_in) != ZXC_FILE_HEADER_SIZE)) { fseeko(f_in, saved_pos, SEEK_SET); return ZXC_ERROR_IO; } if (UNLIKELY(zxc_le32(header) != ZXC_MAGIC_WORD)) { fseeko(f_in, saved_pos, SEEK_SET); return ZXC_ERROR_BAD_MAGIC; } uint8_t footer[ZXC_FILE_FOOTER_SIZE]; if (UNLIKELY(fseeko(f_in, file_size - ZXC_FILE_FOOTER_SIZE, SEEK_SET) != 0 || fread(footer, 1, ZXC_FILE_FOOTER_SIZE, f_in) != ZXC_FILE_FOOTER_SIZE)) { fseeko(f_in, saved_pos, SEEK_SET); return ZXC_ERROR_IO; } fseeko(f_in, saved_pos, SEEK_SET); return (int64_t)zxc_le64(footer); } zxc-0.9.1/src/lib/zxc_internal.h000066400000000000000000001356111515574030100165430ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** * @file zxc_internal.h * @brief Internal definitions, constants, SIMD helpers, and utility functions. * * This header is **not** part of the public API. It is shared across the * library's translation units and contains: * - Platform detection and SIMD intrinsic includes. * - Compiler-abstraction macros (LIKELY, PREFETCH, MEMCPY, ALIGN, ...). * - Endianness detection and byte-swap helpers. * - File-format constants (magic word, header sizes, block sizes, ...). * - Inline helpers for hashing, endian-safe loads/stores, bit manipulation, * ZigZag encoding, aligned allocation, and bitstream reading. * - Internal function prototypes for chunk-level compression/decompression. * * @warning Do not include this header from user code; use the public headers * zxc_buffer.h, zxc_stream.h, or zxc_sans_io.h instead. */ #ifndef ZXC_INTERNAL_H #define ZXC_INTERNAL_H #include #include #include #include #include #include #include "../../include/zxc_buffer.h" #include "../../include/zxc_constants.h" #include "../../include/zxc_sans_io.h" #include "rapidhash.h" #ifdef __cplusplus extern "C" { #endif /** * @defgroup internal Internal Helpers * @brief Platform abstractions, constants, and utility functions (private). * @{ */ /** * @name Atomic Qualifier * @brief Provides a portable atomic / volatile qualifier. * * If C11 atomics are available, @c ZXC_ATOMIC expands to @c _Atomic; * otherwise it falls back to @c volatile. * @{ */ #if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && \ !defined(__STDC_NO_ATOMICS__) #include #define ZXC_ATOMIC _Atomic #define ZXC_USE_C11_ATOMICS 1 #else #define ZXC_ATOMIC volatile #define ZXC_USE_C11_ATOMICS 0 #endif /** @} */ /* end of Atomic Qualifier */ /** * @name SIMD Intrinsics & Compiler Macros * @brief Auto-detected SIMD feature macros for x86 (SSE/AVX) and ARM (NEON). * * Depending on the target architecture and compiler flags the following macros * may be defined: * - @c ZXC_USE_AVX512 - AVX-512F + AVX-512BW available. * - @c ZXC_USE_AVX2 - AVX2 available. * - @c ZXC_USE_NEON64 - AArch64 NEON available. * - @c ZXC_USE_NEON32 - ARMv7 NEON available. * @{ */ #if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) #include #include #if defined(__AVX512F__) && defined(__AVX512BW__) #ifndef ZXC_USE_AVX512 #define ZXC_USE_AVX512 #endif #endif #if defined(__AVX2__) #ifndef ZXC_USE_AVX2 #define ZXC_USE_AVX2 #endif #endif #elif (defined(__ARM_NEON) || defined(__ARM_NEON__) || defined(_M_ARM64) || \ defined(ZXC_USE_NEON32) || defined(ZXC_USE_NEON64)) #if !defined(_MSC_VER) #include #endif #include #if defined(__aarch64__) || defined(_M_ARM64) #ifndef ZXC_USE_NEON64 #define ZXC_USE_NEON64 #endif #else #ifndef ZXC_USE_NEON32 #define ZXC_USE_NEON32 #endif #endif #endif /** @} */ /* end of SIMD Intrinsics */ /** * @name Compiler Abstractions * @brief Portable wrappers for branch hints, prefetch, memory ops, alignment, * and forced inlining. * @{ */ #if defined(__GNUC__) || defined(__clang__) /** @def LIKELY * @brief Branch prediction hint: expression is likely true. * @param x Expression to evaluate. */ #define LIKELY(x) (__builtin_expect(!!(x), 1)) /** @def UNLIKELY * @brief Branch prediction hint: expression is unlikely to be true. * @param x Expression to evaluate. */ #define UNLIKELY(x) (__builtin_expect(!!(x), 0)) /** @def RESTRICT * @brief Pointer aliasing hint (maps to __restrict__). */ #define RESTRICT __restrict__ /** @def ZXC_PREFETCH_READ * @brief Prefetch data for reading. * @param ptr Pointer to data to prefetch. */ #define ZXC_PREFETCH_READ(ptr) __builtin_prefetch((const void*)(ptr), 0, 3) /** @def ZXC_PREFETCH_WRITE * @brief Prefetch data for writing. * @param ptr Pointer to data to prefetch. */ #define ZXC_PREFETCH_WRITE(ptr) __builtin_prefetch((const void*)(ptr), 1, 3) /** @def ZXC_MEMCPY * @brief Optimized memory copy using compiler built-in. */ #define ZXC_MEMCPY(dst, src, n) __builtin_memcpy(dst, src, n) /** @def ZXC_MEMSET * @brief Optimized memory set using compiler built-in. */ #define ZXC_MEMSET(dst, val, n) __builtin_memset(dst, val, n) /** @def ZXC_ALIGN * @brief Specifies memory alignment for a variable or structure. * @param x Alignment boundary in bytes (must be a power of 2). */ #define ZXC_ALIGN(x) __attribute__((aligned(x))) /** @def ZXC_ALWAYS_INLINE * @brief Forces a function to be inlined at all optimization levels. */ #define ZXC_ALWAYS_INLINE inline __attribute__((always_inline)) #elif defined(_MSC_VER) #include #if defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64) #include #define ZXC_PREFETCH_READ(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) #define ZXC_PREFETCH_WRITE(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) #else #define ZXC_PREFETCH_READ(ptr) __prefetch((const void*)(ptr)) #define ZXC_PREFETCH_WRITE(ptr) __prefetch((const void*)(ptr)) #endif #define LIKELY(x) (x) #define UNLIKELY(x) (x) #define RESTRICT __restrict #pragma intrinsic(memcpy, memset) #define ZXC_MEMCPY(dst, src, n) memcpy(dst, src, n) #define ZXC_MEMSET(dst, val, n) memset(dst, val, n) /** @def ZXC_ALIGN * @brief Specifies memory alignment for a variable or structure (MSVC). * @param x Alignment boundary in bytes (must be a power of 2). */ #define ZXC_ALIGN(x) __declspec(align(x)) /** @def ZXC_ALWAYS_INLINE * @brief Forces a function to be inlined at all optimization levels (MSVC). */ #define ZXC_ALWAYS_INLINE __forceinline #pragma intrinsic(_BitScanReverse) #else #define LIKELY(x) (x) #define UNLIKELY(x) (x) #define RESTRICT #define ZXC_PREFETCH_READ(ptr) #define ZXC_PREFETCH_WRITE(ptr) #define ZXC_MEMCPY(dst, src, n) memcpy(dst, src, n) #define ZXC_MEMSET(dst, val, n) memset(dst, val, n) /** @def ZXC_ALWAYS_INLINE * @brief Forces a function to be inlined (fallback for non-GCC/Clang/MSVC compilers). */ #define ZXC_ALWAYS_INLINE inline #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L #include /** @def ZXC_ALIGN * @brief Specifies memory alignment using C11 _Alignas. * @param x Alignment boundary in bytes (must be a power of 2). */ #define ZXC_ALIGN(x) _Alignas(x) #else /** @def ZXC_ALIGN * @brief No-op alignment macro for compilers without alignment support. * @param x Ignored (alignment not supported). */ #define ZXC_ALIGN(x) #endif #endif /** @} */ /* end of Compiler Abstractions */ /** * @name Endianness Detection * @brief Compile-time detection of host byte order. * * Defines exactly one of @c ZXC_LITTLE_ENDIAN or @c ZXC_BIG_ENDIAN. * @{ */ #ifndef ZXC_LITTLE_ENDIAN #if defined(_WIN32) || defined(__LITTLE_ENDIAN__) || \ (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) #define ZXC_LITTLE_ENDIAN #elif defined(__BIG_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) #define ZXC_BIG_ENDIAN #else #warning "Endianness not detected, defaulting to little-endian" #define ZXC_LITTLE_ENDIAN #endif #endif /** @} */ /* end of Endianness Detection */ /** * @name Byte-Swap Helpers * @brief 16/32/64-bit byte-swap macros (only defined under @c ZXC_BIG_ENDIAN). * @{ */ #ifdef ZXC_BIG_ENDIAN #if defined(__GNUC__) || defined(__clang__) #define ZXC_BSWAP16(x) __builtin_bswap16(x) #define ZXC_BSWAP32(x) __builtin_bswap32(x) #define ZXC_BSWAP64(x) __builtin_bswap64(x) #elif defined(_MSC_VER) #define ZXC_BSWAP16(x) _byteswap_ushort(x) #define ZXC_BSWAP32(x) _byteswap_ulong(x) #define ZXC_BSWAP64(x) _byteswap_uint64(x) #else #define ZXC_BSWAP16(x) ((uint16_t)(((x) >> 8) | ((x) << 8))) #define ZXC_BSWAP32(x) \ ((uint32_t)(((x) >> 24) | (((x) >> 8) & 0xFF00) | (((x) << 8) & 0xFF0000) | ((x) << 24))) #define ZXC_BSWAP64(x) \ ((uint64_t)(((uint64_t)ZXC_BSWAP32((uint32_t)(x)) << 32) | ZXC_BSWAP32((uint32_t)((x) >> 32)))) #endif #endif /** @} */ /* end of Byte-Swap Helpers */ /** * @name File Format Constants * @brief Magic words, header sizes, block sizes, and related constants. * @{ */ /** @brief Magic word identifying ZXC files (little-endian 0x9CB02EF5). */ #define ZXC_MAGIC_WORD 0x9CB02EF5U /** @brief Current on-disk file format version. */ #define ZXC_FILE_FORMAT_VERSION 5 /** @brief Size of stdio I/O buffers (1 MB). */ #define ZXC_IO_BUFFER_SIZE (1024 * 1024) /** @brief Maximum number of threads allowed for streaming operations. */ #define ZXC_MAX_THREADS 1024 /** @brief Safety padding appended to buffers to tolerate overruns. */ #define ZXC_PAD_SIZE 32 /** @brief Number of bits per byte (constant 8). */ #define ZXC_BITS_PER_BYTE 8 /** @brief Assumed CPU cache line size for alignment. */ #define ZXC_CACHE_LINE_SIZE 64 /** @brief Bitmask for cache-line alignment checks. */ #define ZXC_ALIGNMENT_MASK (ZXC_CACHE_LINE_SIZE - 1) /** @brief Maximum byte length of a variable-byte encoded integer. */ #define ZXC_VBYTE_MAX_LEN 5 /** @brief Allocation-safe max vbyte length (sufficient for < 2 MB blocks). */ #define ZXC_VBYTE_ALLOC_LEN 3 /** @brief File header size: Magic(4)+Version(1)+Chunk(1)+Flags(1)+Reserved(7)+CRC(2). */ #define ZXC_FILE_HEADER_SIZE 16 /** @brief Bit flag in the Flags byte indicating checksum presence (bit 7). */ #define ZXC_FILE_FLAG_HAS_CHECKSUM 0x80U /** @brief Mask for the checksum algorithm id (bits 0-3). */ #define ZXC_FILE_CHECKSUM_ALGO_MASK 0x0FU /** @brief Block header size: Type(1)+Flags(1)+Reserved(1)+CRC(1)+CompSize(4). */ #define ZXC_BLOCK_HEADER_SIZE 8 /** @brief Size of the per-block checksum field in bytes. */ #define ZXC_BLOCK_CHECKSUM_SIZE 4 /** @brief Binary size of a NUM block sub-header. */ #define ZXC_NUM_HEADER_BINARY_SIZE 16 /** @brief Binary size of a GLO block sub-header. */ #define ZXC_GLO_HEADER_BINARY_SIZE 16 /** @brief Binary size of a GHI block sub-header. */ #define ZXC_GHI_HEADER_BINARY_SIZE 16 /** @brief Binary size of a NUM chunk sub-frame header (nvals + bits + base + psize). */ #define ZXC_NUM_CHUNK_HEADER_SIZE 16 /** @brief Number of numeric values to decode in a single SIMD batch (NUM block). */ #define ZXC_NUM_DEC_BATCH 32 /** @brief Maximum number of frames that can be processed in a single compression operation (NUM * block). */ #define ZXC_NUM_FRAME_SIZE 128 /** @brief Binary size of a section descriptor (comp_size + raw_size). */ #define ZXC_SECTION_DESC_BINARY_SIZE 8 /** @brief 32-bit mask for extracting sizes from a section descriptor. */ #define ZXC_SECTION_SIZE_MASK 0xFFFFFFFFU /** @brief Number of sections in a GLO block. */ #define ZXC_GLO_SECTIONS 4 /** @brief Number of sections in a GHI block. */ #define ZXC_GHI_SECTIONS 3 /** @brief Checksum algorithm id for rapidhash (default). */ #define ZXC_CHECKSUM_RAPIDHASH 0x00U /** @brief Size of the global checksum appended after EOF block (4 bytes). */ #define ZXC_GLOBAL_CHECKSUM_SIZE 4 /** @brief File footer size: original_size(8) + global_checksum(4). */ #define ZXC_FILE_FOOTER_SIZE 12 /** @name GLO Token Constants * @brief 4-bit literal length / 4-bit match length / 16-bit offset. * @{ */ /** @brief Bits for Literal Length in a GLO token. */ #define ZXC_TOKEN_LIT_BITS 4 /** @brief Bits for Match Length in a GLO token. */ #define ZXC_TOKEN_ML_BITS 4 /** @brief Mask to extract Literal Length from a GLO token. */ #define ZXC_TOKEN_LL_MASK ((1U << ZXC_TOKEN_LIT_BITS) - 1) /** @brief Mask to extract Match Length from a GLO token. */ #define ZXC_TOKEN_ML_MASK ((1U << ZXC_TOKEN_ML_BITS) - 1) /** @} */ /** @name GHI Sequence Constants * @brief 8-bit literal length / 8-bit match length / 16-bit offset. * @{ */ /** @brief Bits for Literal Length in a GHI sequence. */ #define ZXC_SEQ_LL_BITS 8 /** @brief Bits for Match Length in a GHI sequence. */ #define ZXC_SEQ_ML_BITS 8 /** @brief Bits for Offset in a GHI sequence. */ #define ZXC_SEQ_OFF_BITS 16 /** @brief Mask to extract Literal Length from a GHI sequence. */ #define ZXC_SEQ_LL_MASK ((1U << ZXC_SEQ_LL_BITS) - 1) /** @brief Mask to extract Match Length from a GHI sequence. */ #define ZXC_SEQ_ML_MASK ((1U << ZXC_SEQ_ML_BITS) - 1) /** @brief Mask to extract Offset from a GHI sequence. */ #define ZXC_SEQ_OFF_MASK ((1U << ZXC_SEQ_OFF_BITS) - 1) /** @} */ /** @name Literal Stream Encoding * @{ */ /** @brief Flag bit indicating an RLE run in the literal stream (0x80). */ #define ZXC_LIT_RLE_FLAG 0x80U /** @brief Mask to extract the run/literal length (lower 7 bits). */ #define ZXC_LIT_LEN_MASK (ZXC_LIT_RLE_FLAG - 1) /** @} */ /** @name LZ77 Constants * @brief Hash table geometry, sliding window, and match parameters. * * The hash table uses 14 bits for addressing (16 384 entries), doubled to * keep the load factor below 0.5. Each entry stores * `(epoch << 18) | offset`, totalling 64 KB of memory. * The 64 KB sliding window allows `chain_table` to use `uint16_t`. * @{ */ /** @brief Address bits for the LZ77 hash table (2^14 = 16 384 max). */ #define ZXC_LZ_HASH_BITS 14 /** @brief Marsaglia multiplicative hash constant for 4-byte hashing. */ #define ZXC_LZ_HASH_PRIME1 0x2D35182DU /** @brief Marsaglia/Vigna xorshift* multiplier for 5-byte hashing. */ #define ZXC_LZ_HASH_PRIME2 0x2545F4914F6CDD1DULL /** @brief Maximum number of entries in the hash table. */ #define ZXC_LZ_HASH_SIZE (1U << ZXC_LZ_HASH_BITS) /** @brief Sliding window size (64 KB). */ #define ZXC_LZ_WINDOW_SIZE (1U << 16) /** @brief Minimum match length for an LZ77 match. */ #define ZXC_LZ_MIN_MATCH_LEN 5 /** @brief Base bias added to encoded offsets (stored = actual - bias). */ #define ZXC_LZ_OFFSET_BIAS 1 /** @brief Maximum allowed offset distance. */ #define ZXC_LZ_MAX_DIST (ZXC_LZ_WINDOW_SIZE - 1) /** * @brief Number of bits reserved for epoch tracking in compressed pointers. * Derived from chunk size: 2^18 = ZXC_BLOCK_SIZE_DEFAULT => 32 - 18 = 14 bits. */ #define ZXC_EPOCH_BITS 14 /** @brief Mask to extract the offset bits from a compressed pointer. */ #define ZXC_OFFSET_MASK ((1U << (32 - ZXC_EPOCH_BITS)) - 1) /** @brief Maximum number of epochs supported by the compression system. */ #define ZXC_MAX_EPOCH (1U << ZXC_EPOCH_BITS) /** @} */ /** @name Hash Prime Constants * @brief Mixing primes used by internal hash functions. * @{ */ /** @brief Hash prime 1. */ #define ZXC_HASH_PRIME1 0x9E3779B97F4A7C15ULL /** @brief Hash prime 2. */ #define ZXC_HASH_PRIME2 0xD2D84A61D2D84A61ULL /** @} */ /** @name Block Size Helpers * @brief Runtime helpers for variable block sizes. * @{ */ /** * @brief Integer log-base-2 for a 32-bit value. * @param v Must be a power of two (undefined for zero). * @return Floor of log2(v). */ static ZXC_ALWAYS_INLINE uint32_t zxc_log2_u32(uint32_t v) { uint32_t r = 0; while (v >>= 1) r++; return r; } /** * @brief Validates a block size. * Must be a power of two in [ZXC_BLOCK_SIZE_MIN, ZXC_BLOCK_SIZE_MAX]. * @return 1 if valid, 0 otherwise. */ static ZXC_ALWAYS_INLINE int zxc_validate_block_size(const size_t bs) { return bs >= ZXC_BLOCK_SIZE_MIN && bs <= ZXC_BLOCK_SIZE_MAX && (bs & (bs - 1)) == 0; } /** @} */ /** @} */ /* end of File Format Constants */ /** * @struct zxc_lz77_params_t * @brief Search parameters for LZ77 compression levels. */ typedef struct { int search_depth; /**< Maximum number of matches to check in hash chain. */ int sufficient_len; /**< Stop searching if match length >= this value. */ int use_lazy; /**< Enable lazy matching (check next position for better match). */ int lazy_attempts; /**< Maximum matches to check during lazy matching. */ uint32_t step_base; /**< Base step size for literal advancement. */ uint32_t step_shift; /**< Shift amount for distance-based stepping. */ } zxc_lz77_params_t; /** * @brief Retrieves LZ77 compression parameters based on the specified compression level. * * This inline function returns the appropriate LZ77 parameters configuration * for the given compression level. * * @param[in] level The compression level to use for determining LZ77 parameters. * @return zxc_lz77_params_t The LZ77 parameters structure corresponding to the specified level. */ static ZXC_ALWAYS_INLINE zxc_lz77_params_t zxc_get_lz77_params(const int level) { if (level >= 5) return (zxc_lz77_params_t){64, 256, 1, 16, 1, 31}; // search_depth, sufficient_len, use_lazy, lazy_attempts, step_base, step_shift static const zxc_lz77_params_t table[5] = { {4, 16, 0, 0, 4, 4}, // fallback {4, 16, 0, 0, 4, 4}, // level 1 {6, 24, 0, 0, 3, 6}, // level 2 {3, 18, 1, 4, 1, 4}, // level 3 {3, 18, 1, 4, 1, 5} // level 4 }; return table[level < 1 ? 1 : level]; } /** * @enum zxc_block_type_t * @brief Defines the different types of data blocks supported by the ZXC * format. * * This enumeration categorizes blocks based on the compression strategy * applied: * - `ZXC_BLOCK_RAW` (0): No compression. Used when data is incompressible (high * entropy) or when compression would expand the data size. * - `ZXC_BLOCK_GLO` (1): General-purpose compression (LZ77 + Bitpacking). This * is the default for most data (text, binaries, JSON, etc.). Includes 4 sections descriptors. * - `ZXC_BLOCK_NUM` (2): Specialized compression for arrays of 32-bit integers. * Uses Delta Encoding + ZigZag + Bitpacking. * - `ZXC_BLOCK_GHI` (3): General-purpose high-velocity mode using LZ77 with advanced * techniques (lazy matching, step skipping) for maximum ratio. Includes 3 sections descriptors. * - `ZXC_BLOCK_EOF` (255): End of file marker. */ typedef enum { ZXC_BLOCK_RAW = 0, ZXC_BLOCK_GLO = 1, ZXC_BLOCK_NUM = 2, ZXC_BLOCK_GHI = 3, ZXC_BLOCK_EOF = 255 } zxc_block_type_t; /** * @enum zxc_section_encoding_t * @brief Specifies the encoding methods used for internal data sections. * * These modes determine how specific components (like literals, match lengths, * or offsets) are stored within a block. * - `ZXC_SECTION_ENCODING_RAW`: Data is stored uncompressed. * - `ZXC_SECTION_ENCODING_RLE`: Run-Length Encoding. * - `ZXC_SECTION_ENCODING_BITPACK`: Bitpacking for integer values. * - `ZXC_SECTION_ENCODING_FSE`: Finite State Entropy (Reserved). * - `ZXC_SECTION_ENCODING_BITPACK_FSE`: Combined Bitpacking and FSE (Reserved). */ typedef enum { ZXC_SECTION_ENCODING_RAW = 0, ZXC_SECTION_ENCODING_RLE = 1, ZXC_SECTION_ENCODING_BITPACK = 2, ZXC_SECTION_ENCODING_FSE = 3, // Reserved ZXC_SECTION_ENCODING_BITPACK_FSE = 4 // Reserved } zxc_section_encoding_t; /** * @struct zxc_gnr_header_t * @brief Header specific to General (LZ-based) compression blocks. * * This header follows the main block header when the block type is GLO/GHI. It * describes the layout of sequences and literals. * * @var zxc_gnr_header_t::n_sequences * The total count of LZ sequences in the block. * @var zxc_gnr_header_t::n_literals * The total count of literal bytes. * @var zxc_gnr_header_t::enc_lit * Encoding method used for the literal stream. * @var zxc_gnr_header_t::enc_litlen * Encoding method used for the literal lengths stream. * @var zxc_gnr_header_t::enc_mlen * Encoding method used for the match lengths stream. * @var zxc_gnr_header_t::enc_off * Encoding method used for the offset stream. */ typedef struct { uint32_t n_sequences; // Number of sequences uint32_t n_literals; // Number of literals uint8_t enc_lit; // Literal encoding uint8_t enc_litlen; // Literal lengths encoding uint8_t enc_mlen; // Match lengths encoding uint8_t enc_off; // Offset encoding (Unused in Token format, kept for alignment) } zxc_gnr_header_t; /** * @struct zxc_section_desc_t * @brief Describes the size attributes of a specific data section. * * Used to track the compressed and uncompressed sizes of sub-components * (e.g., a literal stream or offset stream) within a block. */ typedef struct { uint64_t sizes; /**< Packed sizes: compressed size (low 32 bits) | raw size (high 32 bits). */ } zxc_section_desc_t; /** * @struct zxc_num_header_t * @brief Header specific to Numeric compression blocks. * * This header follows the main block header when the block type is NUM. * * @var zxc_num_header_t::n_values * The total number of numeric values encoded in the block. * @var zxc_num_header_t::frame_size * The size of the frame used for processing. */ typedef struct { uint64_t n_values; uint16_t frame_size; } zxc_num_header_t; /** * @struct zxc_bit_reader_t * @brief Internal bit reader structure for ZXC compression/decompression. * * This structure maintains the state of the bit stream reading operation. * It buffers bits from the input byte stream into an accumulator to allow * reading variable-length bit sequences. */ typedef struct { const uint8_t* ptr; /**< Pointer to the current position in the input byte stream. */ const uint8_t* end; /**< Pointer to the end of the input byte stream. */ uint64_t accum; /**< Bit accumulator holding buffered bits (64-bit buffer). */ int bits; /**< Number of valid bits currently in the accumulator. */ } zxc_bit_reader_t; /** * ============================================================================ * MEMORY & ENDIANNESS HELPERS * ============================================================================ * Functions to handle unaligned memory access and Little Endian conversion. */ /** * @brief Reads a 16-bit unsigned integer from memory in little-endian format. * * This function interprets the bytes at the given memory address as a * little-endian 16-bit integer, regardless of the host system's endianness. * It is marked as always inline for performance critical paths. * * @param[in] p Pointer to the memory location to read from. * @return The 16-bit unsigned integer value read from memory. */ static ZXC_ALWAYS_INLINE uint16_t zxc_le16(const void* p) { uint16_t v; ZXC_MEMCPY(&v, p, sizeof(v)); #ifdef ZXC_BIG_ENDIAN return ZXC_BSWAP16(v); #else return v; #endif } /** * @brief Reads a 32-bit unsigned integer from memory in little-endian format. * * This function interprets the bytes at the given pointer address as a * little-endian 32-bit integer, regardless of the host system's endianness. * It is marked as always inline for performance critical paths. * * @param[in] p Pointer to the memory location to read from. * @return The 32-bit unsigned integer value read from memory. */ static ZXC_ALWAYS_INLINE uint32_t zxc_le32(const void* p) { uint32_t v; ZXC_MEMCPY(&v, p, sizeof(v)); #ifdef ZXC_BIG_ENDIAN return ZXC_BSWAP32(v); #else return v; #endif } /** * @brief Reads a 64-bit unsigned integer from memory in little-endian format. * * This function interprets the bytes at the given memory address as a * little-endian 64-bit integer, regardless of the host system's endianness. * It is marked as always inline for performance critical paths. * * @param[in] p Pointer to the memory location to read from. * @return The 64-bit unsigned integer value read from memory. */ static ZXC_ALWAYS_INLINE uint64_t zxc_le64(const void* p) { uint64_t v; ZXC_MEMCPY(&v, p, sizeof(v)); #ifdef ZXC_BIG_ENDIAN return ZXC_BSWAP64(v); #else return v; #endif } /** * @brief Stores a 16-bit integer in memory using little-endian byte order. * * This function copies the value of a 16-bit unsigned integer to the specified * memory location. It uses memcpy to avoid strict aliasing violations and * potential unaligned access issues. * * @note This function assumes the system is little-endian or that the compiler * optimizes the memcpy to a store instruction that handles endianness if necessary * (though the implementation shown is a direct copy). * * @param[out] p Pointer to the destination memory where the value will be stored. * Must point to a valid memory region of at least 2 bytes. * @param[in] v The 16-bit unsigned integer value to store. */ static ZXC_ALWAYS_INLINE void zxc_store_le16(void* p, const uint16_t v) { #ifdef ZXC_BIG_ENDIAN const uint16_t s = ZXC_BSWAP16(v); ZXC_MEMCPY(p, &s, sizeof(s)); #else ZXC_MEMCPY(p, &v, sizeof(v)); #endif } /** * @brief Stores a 32-bit unsigned integer in little-endian format at the specified memory location. * * This function writes the 32-bit value `v` to the memory pointed to by `p`. * It uses `ZXC_MEMCPY` to ensure safe memory access, avoiding potential alignment issues * that could occur with direct pointer casting on some architectures. * * @note This function is marked as `ZXC_ALWAYS_INLINE` to minimize function call overhead. * * @param[out] p Pointer to the destination memory where the value will be stored. * @param[in] v The 32-bit unsigned integer value to store. */ static ZXC_ALWAYS_INLINE void zxc_store_le32(void* p, const uint32_t v) { #ifdef ZXC_BIG_ENDIAN const uint32_t s = ZXC_BSWAP32(v); ZXC_MEMCPY(p, &s, sizeof(s)); #else ZXC_MEMCPY(p, &v, sizeof(v)); #endif } /** * @brief Stores a 64-bit unsigned integer in little-endian format at the specified memory location. * * This function copies the 64-bit value `v` to the memory pointed to by `p`. * It uses `ZXC_MEMCPY` to ensure safe memory access, avoiding potential alignment issues * that might occur with direct pointer dereferencing on some architectures. * * @note This function assumes the system is little-endian or that the compiler optimizes * the memcpy to a store instruction that handles endianness correctly if `ZXC_MEMCPY` * is defined appropriately. * * @param[out] p Pointer to the destination memory where the value will be stored. * @param[in] v The 64-bit unsigned integer value to store. */ static ZXC_ALWAYS_INLINE void zxc_store_le64(void* p, const uint64_t v) { #ifdef ZXC_BIG_ENDIAN const uint64_t s = ZXC_BSWAP64(v); ZXC_MEMCPY(p, &s, sizeof(s)); #else ZXC_MEMCPY(p, &v, sizeof(v)); #endif } /** * @brief Computes the 1-byte checksum for block headers. * * Implementation based on Marsaglia's Xorshift (PRNG) principles. * * @param[in] p Pointer to the input data to be hashed (8 bytes) * @return uint8_t The computed hash value. */ static ZXC_ALWAYS_INLINE uint8_t zxc_hash8(const uint8_t* p) { const uint64_t v = zxc_le64(p); uint64_t h = v ^ ZXC_HASH_PRIME1; h ^= h << 13; h ^= h >> 7; h ^= h << 17; return (uint8_t)((h >> 32) ^ h); } /** * @brief Computes the 2-byte checksum for file headers. * * This function generates a hash value by reading data from the given pointer. * The result is a 16-bit hash. * Implementation based on Marsaglia's Xorshift (PRNG) principles. * * @param[in] p Pointer to the input data to be hashed (16 bytes) * @return uint16_t The computed hash value. */ static ZXC_ALWAYS_INLINE uint16_t zxc_hash16(const uint8_t* p) { const uint64_t h1 = zxc_le64(p); const uint64_t h2 = zxc_le64(p + 8); uint64_t h = h1 ^ h2 ^ ZXC_HASH_PRIME2; h ^= h << 13; h ^= h >> 7; h ^= h << 17; uint32_t res = (uint32_t)((h >> 32) ^ h); return (uint16_t)((res >> 16) ^ res); } /** * @brief Copies 16 bytes from the source memory location to the destination memory location. * * This function is forced to be inlined and uses SIMD intrinsics when available. * SSE2 on x86/x64, NEON on ARM, or memcpy as fallback. * * @param[out] dst Pointer to the destination memory block. * @param[in] src Pointer to the source memory block. */ static ZXC_ALWAYS_INLINE void zxc_copy16(void* dst, const void* src) { #if defined(ZXC_USE_AVX2) || defined(ZXC_USE_AVX512) // AVX2/AVX512: Single 128-bit unaligned load/store _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src)); #elif defined(ZXC_USE_NEON64) || defined(ZXC_USE_NEON32) vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src)); #else ZXC_MEMCPY(dst, src, 16); #endif } /** * @brief Copies 32 bytes from source to destination using SIMD when available. * * Uses AVX2 on x86, NEON on ARM64/ARM32, or two 16-byte copies as fallback. * * @param[out] dst Pointer to the destination memory block. * @param[in] src Pointer to the source memory block. */ static ZXC_ALWAYS_INLINE void zxc_copy32(void* dst, const void* src) { #if defined(ZXC_USE_AVX2) || defined(ZXC_USE_AVX512) // AVX2/AVX512: Single 256-bit (32 byte) unaligned load/store _mm256_storeu_si256((__m256i*)dst, _mm256_loadu_si256((const __m256i*)src)); #elif defined(ZXC_USE_NEON64) || defined(ZXC_USE_NEON32) // NEON: Two 128-bit (16 byte) unaligned load/stores vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src)); vst1q_u8((uint8_t*)dst + 16, vld1q_u8((const uint8_t*)src + 16)); #else ZXC_MEMCPY(dst, src, 32); #endif } /** * @brief Counts trailing zeros in a 32-bit unsigned integer. * * This function returns the number of contiguous zero bits starting from the * least significant bit (LSB). If the input is 0, it returns 32. * * It utilizes compiler-specific built-ins for GCC/Clang (`__builtin_ctz`) and * MSVC (`_BitScanForward`) for optimal performance. If no supported compiler * is detected, it falls back to a portable De Bruijn sequence implementation. * * @param[in] x The 32-bit unsigned integer to scan. * @return The number of trailing zeros (0-32). */ static ZXC_ALWAYS_INLINE int zxc_ctz32(const uint32_t x) { if (x == 0) return 32; #if defined(__GNUC__) || defined(__clang__) return __builtin_ctz(x); #elif defined(_MSC_VER) unsigned long r; _BitScanForward(&r, x); return (int)r; #else // Fallback De Bruijn (32 bits) static const int DeBruijn32[32] = {0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; return DeBruijn32[((uint32_t)((x & (0U - x)) * 0x077CB531U)) >> 27]; #endif } /** * @brief Counts the number of trailing zeros in a 64-bit unsigned integer. * * This function determines the number of zero bits following the least significant * one bit in the binary representation of `x`. * * @param[in] x The 64-bit unsigned integer to scan. * @return The number of trailing zeros. Returns 64 if `x` is 0. * * @note This implementation uses compiler built-ins for GCC/Clang (`__builtin_ctzll`) * and MSVC (`_BitScanForward64`) when available for optimal performance. * It falls back to a De Bruijn sequence multiplication method for other compilers. */ static ZXC_ALWAYS_INLINE int zxc_ctz64(const uint64_t x) { if (x == 0) return 64; #if defined(__GNUC__) || defined(__clang__) return __builtin_ctzll(x); #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) unsigned long r; _BitScanForward64(&r, x); return (int)r; #else // Fallback De Bruijn static const int Debruijn64[64] = { 0, 1, 48, 2, 57, 49, 28, 3, 61, 58, 50, 42, 38, 29, 17, 4, 62, 55, 59, 36, 53, 51, 43, 22, 45, 39, 33, 30, 24, 18, 12, 5, 63, 47, 56, 27, 60, 41, 37, 16, 54, 35, 52, 21, 44, 32, 23, 11, 46, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6}; return Debruijn64[((x & (0ULL - x)) * 0x03F79D71B4CA8B09ULL) >> 58]; #endif } /** * @brief Calculates the index of the highest set bit (most significant bit) in a 32-bit integer. * * This function determines the position of the most significant bit that is set to 1. * * @param[in] n The 32-bit unsigned integer to analyze. * @return The 0-based index of the highest set bit. If n is 0, the behavior is undefined. */ static ZXC_ALWAYS_INLINE uint8_t zxc_highbit32(const uint32_t n) { #ifdef _MSC_VER unsigned long index; return (n == 0) ? 0 : (_BitScanReverse(&index, n) ? (uint8_t)(index + 1) : 0); #else return (n == 0) ? 0 : (32 - __builtin_clz(n)); #endif } /** * @brief Encodes a signed 32-bit integer using ZigZag encoding. * * ZigZag encoding maps signed integers to unsigned integers so that numbers with a small * absolute value (for instance, -1) have a small variant encoded value too. It does this * by "zig-zagging" back and forth through the positive and negative integers: * * 0 => 0 * -1 => 1 * 1 => 2 * -2 => 3 * 2 => 4 * * This is particularly useful for variable-length encoding (varint) of signed integers, * as standard varint encoding is inefficient for negative numbers (which are interpreted * as very large unsigned integers). * * @param[in] n The signed 32-bit integer to encode. * @return The ZigZag encoded unsigned 32-bit integer. */ static ZXC_ALWAYS_INLINE uint32_t zxc_zigzag_encode(const int32_t n) { return ((uint32_t)n << 1) ^ (uint32_t)(-(int32_t)((uint32_t)n >> 31)); } /** * @brief Decodes a 32-bit unsigned integer using ZigZag decoding. * * ZigZag encoding maps signed integers to unsigned integers so that numbers with a small * absolute value (for instance, -1) have a small variant encoded value too. It does this * by "zig-zagging" back and forth through the positive and negative integers: * 0 => 0, -1 => 1, 1 => 2, -2 => 3, 2 => 4, etc. * * This function reverses that process, converting the unsigned representation back into * the original signed 32-bit integer. * * @param[in] n The unsigned 32-bit integer to decode. * @return The decoded signed 32-bit integer. */ static ZXC_ALWAYS_INLINE int32_t zxc_zigzag_decode(const uint32_t n) { return (int32_t)(n >> 1) ^ -(int32_t)(n & 1); } /** * @brief Allocates aligned memory in a cross-platform manner. * * This function provides a unified interface for allocating memory with a specific * alignment requirement. It wraps `_aligned_malloc` for Windows * environments and `posix_memalign` for POSIX-compliant systems. * * @param[in] size The size of the memory block to allocate, in bytes. * @param[in] alignment The alignment value, which must be a power of two and a multiple * of `sizeof(void *)`. * @return A pointer to the allocated memory block, or NULL if the allocation fails. * The returned pointer must be freed using the corresponding aligned free function. */ void* zxc_aligned_malloc(const size_t size, const size_t alignment); /** * @brief Frees memory previously allocated with an aligned allocation function. * * This function provides a cross-platform wrapper for freeing aligned memory. * On Windows, it calls `_aligned_free`. * On other platforms, it falls back to the standard `free` function. * * @param[in] ptr A pointer to the memory block to be freed. If ptr is NULL, no operation is * performed. */ void zxc_aligned_free(void* ptr); /* * ============================================================================ * COMPRESSION CONTEXT & STRUCTS * ============================================================================ */ /* * INTERNAL API * ------------ */ /** * @brief Calculates a 32-bit hash for a given input buffer. * @param[in] input Pointer to the data buffer. * @param[in] len Length of the data in bytes. * @param[in] hash_method Checksum algorithm identifier (e.g., ZXC_CHECKSUM_RAPIDHASH). * @return The calculated 32-bit hash value. */ static ZXC_ALWAYS_INLINE uint32_t zxc_checksum(const void* RESTRICT input, const size_t len, const uint8_t hash_method) { uint64_t hash; if (LIKELY(hash_method == ZXC_CHECKSUM_RAPIDHASH)) hash = rapidhash(input, len); else // Default fallthrough to rapidhash for unknown types (safe default) hash = rapidhash(input, len); return (uint32_t)(hash ^ (hash >> (sizeof(uint32_t) * ZXC_BITS_PER_BYTE))); } /** * @brief Combines a running hash with a new block hash using rotate-left and XOR. * * This function updates a global checksum by rotating the current hash left by 1 bit * (with wraparound) and XORing with the new block hash. This provides a simple but * effective rolling hash that depends on the order of blocks. * * Formula: result = ((hash << 1) | (hash >> 31)) ^ block_hash * * @param[in] hash The current running hash value. * @param[in] block_hash The hash of the new block to combine. * @return The updated combined hash value. */ static ZXC_ALWAYS_INLINE uint32_t zxc_hash_combine_rotate(const uint32_t hash, const uint32_t block_hash) { return ((hash << 1) | (hash >> 31)) ^ block_hash; } /** * @brief Loads up to 7 bytes from memory in little-endian order into a uint64_t. * * This is used for partial reads at stream boundaries where fewer than 8 bytes * remain. Unlike ZXC_MEMCPY into a uint64_t (which is endian-dependent), this * function always produces a value with byte 0 in the least-significant bits. * * @param[in] p Pointer to the source bytes. * @param[in] n Number of bytes to read (must be < 8). * @return The loaded value in native host order, with bytes arranged as if * read from a little-endian stream. */ static ZXC_ALWAYS_INLINE uint64_t zxc_le_partial(const uint8_t* p, size_t n) { #ifdef ZXC_BIG_ENDIAN uint64_t v = 0; for (size_t i = 0; i < n; i++) v |= (uint64_t)p[i] << (i * 8); return v; #else uint64_t v = 0; n = n > sizeof(v) ? sizeof(v) : n; ZXC_MEMCPY(&v, p, n); return v; #endif } /** * @brief Initializes a bit reader structure. * * Sets up the internal state of the bit reader to read from the specified * source buffer. * * @param[out] br Pointer to the bit reader structure to initialize. * @param[in] src Pointer to the source buffer containing the data to read. * @param[in] size The size of the source buffer in bytes. */ static ZXC_ALWAYS_INLINE void zxc_br_init(zxc_bit_reader_t* RESTRICT br, const uint8_t* RESTRICT src, const size_t size) { br->ptr = src; br->end = src + size; // Safety check: ensure we have at least 8 bytes to fill the accumulator if (UNLIKELY(size < sizeof(uint64_t))) { br->accum = zxc_le_partial(src, size); br->ptr += size; br->bits = (int)(size * 8); } else { br->accum = zxc_le64(br->ptr); br->ptr += sizeof(uint64_t); br->bits = sizeof(uint64_t) * 8; } } /** * @brief Ensures that the bit reader buffer contains at least the specified * number of bits. * * This function checks if the internal buffer of the bit reader has enough bits * available to satisfy a subsequent read operation of `needed` bits. If not, it * refills the buffer from the source. * * @param[in,out] br Pointer to the bit reader context. * @param[in] needed The number of bits required to be available in the buffer. */ static ZXC_ALWAYS_INLINE void zxc_br_ensure(zxc_bit_reader_t* RESTRICT br, const int needed) { if (UNLIKELY(br->bits < needed)) { const int safe_bits = (br->bits < 0) ? 0 : br->bits; br->bits = safe_bits; // Mask out garbage bits (retain only valid existing bits) #if defined(__BMI2__) && (defined(__x86_64__) || defined(_M_X64)) br->accum = _bzhi_u64(br->accum, safe_bits); #else br->accum &= ((1ULL << safe_bits) - 1); #endif // Calculate how many bytes we can read // We want to fill up to the accumulation capability (64 bits for uint64_t) // Bytes needed = (capacity_bits - safe_bits) / 8 const int bytes_needed = ((int)(sizeof(uint64_t) * 8) - safe_bits) >> 3; // Bounds check: zxc_le64 always reads 8 bytes, so we need at least 8 const size_t bytes_left = (size_t)(br->end - br->ptr); if (UNLIKELY(bytes_left < sizeof(uint64_t))) { // Partial read (slow path / end of stream) const size_t to_read = (bytes_left < (size_t)bytes_needed) ? bytes_left : (size_t)bytes_needed; const uint64_t raw = zxc_le_partial(br->ptr, to_read); br->accum |= (raw << safe_bits); br->ptr += to_read; br->bits = safe_bits + (int)to_read * 8; } else { // Fast path: full 8-byte read is safe const uint64_t raw = zxc_le64(br->ptr); br->accum |= (raw << safe_bits); br->ptr += bytes_needed; br->bits = safe_bits + bytes_needed * 8; } } } /** * @brief Bit-packs a stream of 32-bit integers into a destination buffer. * * Compresses an array of 32-bit integers by packing them using a specified * number of bits per integer. * * @param[in] src Pointer to the source array of 32-bit integers. * @param[in] count The number of integers to pack. * @param[out] dst Pointer to the destination buffer where packed data will be * written. * @param[in] dst_cap The capacity of the destination buffer in bytes. * @param[in] bits The number of bits to use for each integer during packing. * @return int The number of bytes written to the destination buffer, or a negative * error code on failure. */ int zxc_bitpack_stream_32(const uint32_t* RESTRICT src, const size_t count, uint8_t* RESTRICT dst, const size_t dst_cap, const uint8_t bits); /** * @brief Writes a numeric header structure to a destination buffer. * * Serializes the `zxc_num_header_t` structure into the output stream. * * @param[out] dst Pointer to the destination buffer. * @param[in] rem The remaining space in the destination buffer. * @param[in] nh Pointer to the numeric header structure to write. * @return int The number of bytes written, or a negative error code if the buffer * is too small. */ int zxc_write_num_header(uint8_t* RESTRICT dst, const size_t rem, const zxc_num_header_t* RESTRICT nh); /** * @brief Reads a numeric header structure from a source buffer. * * Deserializes data from the input stream into a `zxc_num_header_t` structure. * * @param[in] src Pointer to the source buffer. * @param[in] src_size The size of the source buffer available for reading. * @param[out] nh Pointer to the numeric header structure to populate. * @return int The number of bytes read from the source, or a negative error code on * failure. */ int zxc_read_num_header(const uint8_t* RESTRICT src, const size_t src_size, zxc_num_header_t* RESTRICT nh); /** * @brief Writes a generic header and section descriptors to a destination * buffer. * * Serializes the `zxc_gnr_header_t` and an array of 4 section descriptors. * * @param[out] dst Pointer to the destination buffer. * @param[in] rem The remaining space in the destination buffer. * @param[in] gh Pointer to the generic header structure to write. * @param[in] desc Array of 4 section descriptors to write. * @return int The number of bytes written, or a negative error code if the buffer * is too small. */ int zxc_write_glo_header_and_desc(uint8_t* RESTRICT dst, const size_t rem, const zxc_gnr_header_t* RESTRICT gh, const zxc_section_desc_t desc[ZXC_GLO_SECTIONS]); /** * @brief Reads a generic header and section descriptors from a source buffer. * * Deserializes data into a `zxc_gnr_header_t` and an array of 4 section * descriptors. * * @param[in] src Pointer to the source buffer. * @param[in] len The length of the source buffer available for reading. * @param[out] gh Pointer to the generic header structure to populate. * @param[out] desc Array of 4 section descriptors to populate. * * @return int Returns ZXC_OK on success, or a negative zxc_error_t code on failure. */ int zxc_read_glo_header_and_desc(const uint8_t* RESTRICT src, const size_t len, zxc_gnr_header_t* RESTRICT gh, zxc_section_desc_t desc[ZXC_GLO_SECTIONS]); /** * @brief Writes a record header and description to the destination buffer. * * @param dst Pointer to the destination buffer where the header and description will be written. * @param rem Remaining size available in the destination buffer. * @param gh Pointer to the GNR header structure containing header information. * @param desc Array of 3 section descriptors to be written along with the header. * * @return int Returns the number of bytes written on success, or a negative error code on failure. */ int zxc_write_ghi_header_and_desc(uint8_t* RESTRICT dst, const size_t rem, const zxc_gnr_header_t* RESTRICT gh, const zxc_section_desc_t desc[ZXC_GHI_SECTIONS]); /** * @brief Reads a record header and section descriptors from a buffer. * * This function parses the source buffer to extract a general header and * up to three section descriptors from a ZXC record. * * @param[in] src Pointer to the source buffer containing the record data. * @param[in] len Length of the source buffer in bytes. * @param[out] gh Pointer to a zxc_gnr_header_t structure to store the parsed header. * @param[out] desc Array of 3 zxc_section_desc_t structures to store the parsed section * descriptors. * * @return int Returns ZXC_OK on success, or a negative zxc_error_t code on failure. */ int zxc_read_ghi_header_and_desc(const uint8_t* RESTRICT src, const size_t len, zxc_gnr_header_t* RESTRICT gh, zxc_section_desc_t desc[ZXC_GHI_SECTIONS]); /** * @brief Internal wrapper function to decompress a single chunk of data. * * This function handles the decompression of a specific chunk from the source * buffer into the destination buffer using the provided compression context. It * serves as an abstraction layer over the core decompression logic. * * @param[in,out] ctx Pointer to the ZXC compression context structure containing * internal state and configuration. * @param[in] src Pointer to the source buffer containing compressed data. * @param[in] src_sz Size of the compressed data in the source buffer (in bytes). * @param[out] dst Pointer to the destination buffer where decompressed data will * be written. * @param[in] dst_cap Capacity of the destination buffer (maximum bytes that can be * written). * * @return int Returns ZXC_OK on success, or a negative zxc_error_t code on failure. * Specific error codes depend on the underlying ZXC * implementation. */ int zxc_decompress_chunk_wrapper(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT src, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); /** * @brief Wraps the internal chunk compression logic. * * This function acts as a wrapper to compress a single chunk of data using the * provided compression context. It handles the interaction with the underlying * compression algorithm for a specific block of memory. * * @param[in,out] ctx Pointer to the ZXC compression context containing configuration * and state. * @param[in] chunk Pointer to the source buffer containing the raw data to * compress. * @param[in] src_sz The size of the source chunk in bytes. * @param[out] dst Pointer to the destination buffer where compressed data will be * written. * @param[in] dst_cap The capacity of the destination buffer (maximum bytes to write). * * @return int The number of bytes written to the destination buffer on success, * or a negative error code on failure. */ int zxc_compress_chunk_wrapper(zxc_cctx_t* RESTRICT ctx, const uint8_t* RESTRICT chunk, const size_t src_sz, uint8_t* RESTRICT dst, const size_t dst_cap); /** @} */ /* end of internal */ #ifdef __cplusplus } #endif #endif // ZXC_INTERNAL_Hzxc-0.9.1/tests/000077500000000000000000000000001515574030100134705ustar00rootroot00000000000000zxc-0.9.1/tests/fuzz_decompress.c000066400000000000000000000012401515574030100170530ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ #include #include #include #include "../include/zxc_buffer.h" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { static uint8_t out_buf[1048576]; /* 1 MB max for fuzzer */ size_t out_capacity = sizeof(out_buf); uint64_t expected_size = zxc_get_decompressed_size(data, size); if (expected_size > 0 && expected_size <= out_capacity) out_capacity = (size_t)expected_size; zxc_decompress(data, size, out_buf, out_capacity, 0); return 0; }zxc-0.9.1/tests/fuzz_roundtrip.c000066400000000000000000000025311515574030100167410ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ #include #include #include #include #include #include "../include/zxc_buffer.h" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { static void* comp_buf = NULL; static size_t comp_cap = 0; static void* decomp_buf = NULL; static size_t decomp_cap = 0; const size_t bound = zxc_compress_bound(size); if (bound > comp_cap) { void* new_buf = realloc(comp_buf, bound); if (!new_buf) return 0; comp_buf = new_buf; comp_cap = bound; } const int level = size > 0 ? (data[0] % 5) + 1 : 1; zxc_compress_opts_t copts = {.level = level}; const int64_t csize = zxc_compress(data, size, comp_buf, bound, &copts); if (csize < 0) return 0; if (size == 0) return 0; if (size > decomp_cap) { void* new_buf = realloc(decomp_buf, size); if (!new_buf) return 0; decomp_buf = new_buf; decomp_cap = size; } const int64_t dsize = zxc_decompress(comp_buf, (size_t)csize, decomp_buf, size, NULL); if (dsize >= 0) { assert((size_t)dsize == size); assert(memcmp(data, decomp_buf, size) == 0); } return 0; }zxc-0.9.1/tests/test.c000066400000000000000000002326541515574030100146270ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ #include #include #include #include #include "../include/zxc_buffer.h" #include "../include/zxc_error.h" #include "../include/zxc_stream.h" #include "../src/lib/zxc_internal.h" // --- Helpers --- // Generates a buffer of random data (To force RAW) void gen_random_data(uint8_t* const buf, const size_t size) { for (size_t i = 0; i < size; i++) buf[i] = rand() & 0xFF; } // Generates repetitive data (To force GLO/GHI/LZ) void gen_lz_data(uint8_t* const buf, const size_t size) { const char* const pattern = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod " "tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim " "veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea " "commodo consequat. Duis aute irure dolor in reprehenderit in voluptate " "velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint " "occaecat cupidatat non proident, sunt in culpa qui officia deserunt " "mollit anim id est laborum."; const size_t pat_len = strlen(pattern); for (size_t i = 0; i < size; i++) buf[i] = pattern[i % pat_len]; } // Generates a regular numeric sequence (To force NUM) void gen_num_data(uint8_t* const buf, const size_t size) { // Fill with 32-bit integers uint32_t* const ptr = (uint32_t*)buf; const size_t count = size / 4; uint32_t val = 0; for (size_t i = 0; i < count; i++) { // Arithmetic sequence: 0, 100, 200... // Deltas are constant (100), perfect for NUM ptr[i] = val; val += 100; } } // Generates numeric sequence with 0 deltas (all identical) void gen_num_data_zero(uint8_t* const buf, const size_t size) { uint32_t* const ptr = (uint32_t*)buf; const size_t count = size / 4; for (size_t i = 0; i < count; i++) { ptr[i] = 42; } } // Generates numeric data with alternating small deltas (+1, -1) void gen_num_data_small(uint8_t* const buf, const size_t size) { uint32_t* const ptr = (uint32_t*)buf; const size_t count = size / 4; uint32_t val = 1000; for (size_t i = 0; i < count; i++) { ptr[i] = val; val += (i % 2 == 0) ? 1 : -1; } } // Generates numeric data with very large deltas to maximize bit width void gen_num_data_large(uint8_t* const buf, const size_t size) { uint32_t* const ptr = (uint32_t*)buf; const size_t count = size / 4; for (size_t i = 0; i < count; i++) { // Alternate between 0 and 0xFFFFFFFF (delta is huge) ptr[i] = (i % 2 == 0) ? 0 : 0xFFFFFFFF; } } void gen_binary_data(uint8_t* const buf, const size_t size) { // Pattern with problematic bytes that could be corrupted in text mode: // 0x0A (LF), 0x0D (CR), 0x00 (NULL), 0x1A (EOF/CTRL-Z), 0xFF const uint8_t pattern[] = { 0x5A, 0x58, 0x43, 0x00, // "ZXC" + NULL 0x0A, 0x0D, 0x0A, 0x00, // LF, CR, LF, NULL 0xFF, 0xFE, 0x0A, 0x0D, // High bytes + LF/CR 0x1A, 0x00, 0x0A, 0x0D, // EOF marker + NULL + LF/CR 0x00, 0x00, 0x0A, 0x0A, // Multiple NULLs and LFs }; const size_t pat_len = sizeof(pattern); for (size_t i = 0; i < size; i++) { buf[i] = pattern[i % pat_len]; } } // Generates data with small offsets (<=255 bytes) to force 1-byte offset encoding // This creates short repeating patterns with matches very close to each other void gen_small_offset_data(uint8_t* const buf, const size_t size) { // Create short repeating patterns with very short distances. // Uses a 5-byte period (not aligned to uint32_t) to avoid being // classified as NUM data by zxc_probe_is_numeric(). // LZ will match at offset=5 (< 255), exercising 8-bit offset encoding. const uint8_t pattern[] = "ABCDE"; for (size_t i = 0; i < size; i++) { buf[i] = pattern[i % 5]; } } // Generates data with large offsets (>255 bytes) to force 2-byte offset encoding // This creates patterns where matches are far apart void gen_large_offset_data(uint8_t* const buf, const size_t size) { // First 300 bytes: unique random data (no matches possible) for (size_t i = 0; i < 300 && i < size; i++) { buf[i] = (uint8_t)((i * 7 + 13) % 256); } // Then: repeat patterns from the beginning (offset > 255) for (size_t i = 300; i < size; i++) { buf[i] = buf[i - 300]; // Offset of 300 bytes (requires 2-byte encoding) } } // Generic Round-Trip test function (Compress -> Decompress -> Compare) int test_round_trip(const char* test_name, const uint8_t* input, size_t size, int level, int checksum_enabled) { printf("=== TEST: %s (Sz: %zu, Lvl: %d, CRC: %s) ===\n", test_name, size, level, checksum_enabled ? "Enabled" : "Disabled"); FILE* const f_in = tmpfile(); FILE* const f_comp = tmpfile(); FILE* const f_decomp = tmpfile(); if (!f_in || !f_comp || !f_decomp) { perror("tmpfile"); if (f_in) fclose(f_in); if (f_comp) fclose(f_comp); if (f_decomp) fclose(f_decomp); return 0; } fwrite(input, 1, size, f_in); fseek(f_in, 0, SEEK_SET); zxc_compress_opts_t _sco1 = { .n_threads = 1, .level = level, .checksum_enabled = checksum_enabled}; if (zxc_stream_compress(f_in, f_comp, &_sco1) < 0) { printf("Compression Failed!\n"); fclose(f_in); fclose(f_comp); fclose(f_decomp); return 0; } long comp_size = ftell(f_comp); printf("Compressed Size: %ld (Ratio: %.2f)\n", comp_size, (double)size / (comp_size > 0 ? comp_size : 1)); fseek(f_comp, 0, SEEK_SET); zxc_decompress_opts_t _sdo2 = {.n_threads = 1, .checksum_enabled = checksum_enabled}; if (zxc_stream_decompress(f_comp, f_decomp, &_sdo2) < 0) { printf("Decompression Failed!\n"); fclose(f_in); fclose(f_comp); fclose(f_decomp); return 0; } long decomp_size = ftell(f_decomp); if (decomp_size != (long)size) { printf("Size Mismatch! Expected %zu, got %ld\n", size, decomp_size); fclose(f_in); fclose(f_comp); fclose(f_decomp); return 0; } fseek(f_decomp, 0, SEEK_SET); uint8_t* out_buf = malloc(size > 0 ? size : 1); if (fread(out_buf, 1, size, f_decomp) != size) { printf("Read validation failed (incomplete read)!\n"); free(out_buf); fclose(f_in); fclose(f_comp); fclose(f_decomp); return 0; } if (size > 0 && memcmp(input, out_buf, size) != 0) { printf("Data Mismatch (Content Corruption)!\n"); free(out_buf); fclose(f_in); fclose(f_comp); fclose(f_decomp); return 0; } printf("PASS\n\n"); free(out_buf); fclose(f_in); fclose(f_comp); fclose(f_decomp); return 1; } // Checks that the stream decompression can accept NULL output (Integrity Check Mode) int test_null_output_decompression() { printf("=== TEST: Unit - NULL Output Decompression (Integrity Check) ===\n"); size_t size = 64 * 1024; uint8_t* input = malloc(size); if (!input) return 0; gen_lz_data(input, size); FILE* f_in = tmpfile(); FILE* f_comp = tmpfile(); if (!f_in || !f_comp) { if (f_in) fclose(f_in); if (f_comp) fclose(f_comp); free(input); return 0; } fwrite(input, 1, size, f_in); fseek(f_in, 0, SEEK_SET); // Compress with checksum zxc_compress_opts_t _sco3 = {.n_threads = 1, .level = 3, .checksum_enabled = 1}; if (zxc_stream_compress(f_in, f_comp, &_sco3) < 0) { printf("Compression Failed!\n"); fclose(f_in); fclose(f_comp); free(input); return 0; } fseek(f_comp, 0, SEEK_SET); // Decompress with NULL output // This should return the decompressed size but write nothing zxc_decompress_opts_t _sdo4 = {.n_threads = 1, .checksum_enabled = 1}; int64_t d_sz = zxc_stream_decompress(f_comp, NULL, &_sdo4); if (d_sz != (int64_t)size) { printf("Failed: Expected size %zu, got %lld\n", size, (long long)d_sz); fclose(f_in); fclose(f_comp); free(input); return 0; } printf("PASS\n\n"); fclose(f_in); fclose(f_comp); free(input); return 1; } // Checks that the utility function calculates a sufficient size int test_max_compressed_size_logic() { printf("=== TEST: Unit - zxc_compress_bound ===\n"); // Case 1: 0 bytes (must at least contain the header) size_t sz0 = zxc_compress_bound(0); if (sz0 == 0) { printf("Failed: Size for 0 bytes should not be 0 (headers required)\n"); return 0; } // Case 2: Small input size_t input_val = 100; size_t sz100 = zxc_compress_bound(input_val); if (sz100 < input_val) { printf("Failed: Output buffer size (%zu) too small for input (%zu)\n", sz100, input_val); return 0; } // Case 3: Consistency (size should not decrease arbitrarily) if (zxc_compress_bound(2000) < zxc_compress_bound(1000)) { printf("Failed: Max size function is not monotonic\n"); return 0; } printf("PASS\n\n"); return 1; } // Checks API robustness against invalid arguments int test_invalid_arguments() { printf("=== TEST: Unit - Invalid Arguments ===\n"); FILE* f = tmpfile(); if (!f) return 0; FILE* f_valid = tmpfile(); if (!f_valid) { fclose(f); return 0; } // Prepare a valid compressed stream for decompression tests zxc_compress_opts_t _sco5 = {.n_threads = 1, .level = 1, .checksum_enabled = 0}; zxc_stream_compress(f, f_valid, &_sco5); rewind(f_valid); // 1. Input NULL -> Must fail zxc_compress_opts_t _sco6 = {.n_threads = 1, .level = 5, .checksum_enabled = 0}; if (zxc_stream_compress(NULL, f, &_sco6) >= 0) { printf("Failed: Should return < 0 when Input is NULL\n"); fclose(f); return 0; } // 2. Output NULL -> Must SUCCEED (Benchmark / Dry-Run Mode) zxc_compress_opts_t _sco7 = {.n_threads = 1, .level = 5, .checksum_enabled = 0}; if (zxc_stream_compress(f, NULL, &_sco7) < 0) { printf("Failed: Should allow NULL Output (Benchmark mode support)\n"); fclose(f); return 0; } // 3. Decompression Input NULL -> Must fail zxc_decompress_opts_t _sdo8 = {.n_threads = 1, .checksum_enabled = 0}; if (zxc_stream_decompress(NULL, f, &_sdo8) >= 0) { printf("Failed: Decompress should return < 0 when Input is NULL\n"); fclose(f); return 0; } // 3b. Decompression Output NULL -> Must SUCCEED (Benchmark mode) zxc_decompress_opts_t _sdo9 = {.n_threads = 1, .checksum_enabled = 0}; if (zxc_stream_decompress(f_valid, NULL, &_sdo9) < 0) { printf("Failed: Decompress should allow NULL Output (Benchmark mode support)\n"); fclose(f_valid); return 0; } // 4. zxc_compress NULL checks zxc_compress_opts_t _co10 = {.level = 3, .checksum_enabled = 0}; if (zxc_compress(NULL, 100, (void*)1, 100, &_co10) >= 0) { printf("Failed: zxc_compress should return < 0 when src is NULL\n"); fclose(f); return 0; } zxc_compress_opts_t _co11 = {.level = 3, .checksum_enabled = 0}; if (zxc_compress((void*)1, 100, NULL, 100, &_co11) >= 0) { printf("Failed: zxc_compress should return < 0 when dst is NULL\n"); fclose(f); return 0; } // 5. zxc_decompress NULL checks zxc_decompress_opts_t _do12 = {.checksum_enabled = 0}; if (zxc_decompress(NULL, 100, (void*)1, 100, &_do12) >= 0) { printf("Failed: zxc_decompress should return < 0 when src is NULL\n"); fclose(f); return 0; } zxc_decompress_opts_t _do13 = {.checksum_enabled = 0}; if (zxc_decompress((void*)1, 100, NULL, 100, &_do13) >= 0) { printf("Failed: zxc_decompress should return < 0 when dst is NULL\n"); fclose(f); return 0; } // 6. zxc_compress_bound overflow check if (zxc_compress_bound(SIZE_MAX) != 0) { printf("Failed: zxc_compress_bound should return 0 on overflow\n"); fclose(f); return 0; } printf("PASS\n\n"); fclose(f); return 1; } // Checks behavior with truncated compressed input int test_truncated_input() { printf("=== TEST: Unit - Truncated Input (Stream) ===\n"); const size_t SRC_SIZE = 1024; uint8_t src[1024]; gen_lz_data(src, SRC_SIZE); size_t cap = zxc_compress_bound(SRC_SIZE); uint8_t* compressed = malloc(cap); uint8_t* decomp_buf = malloc(SRC_SIZE); if (!compressed || !decomp_buf) { free(compressed); free(decomp_buf); return 0; } zxc_compress_opts_t _co14 = {.level = 3, .checksum_enabled = 1}; int64_t comp_sz = zxc_compress(src, SRC_SIZE, compressed, cap, &_co14); if (comp_sz <= 0) { printf("Prepare failed\n"); free(compressed); free(decomp_buf); return 0; } // Try decompressing with progressively cropped size // 1. Cut off the Footer (last ZXC_FILE_FOOTER_SIZE bytes) if (comp_sz > ZXC_FILE_FOOTER_SIZE) { zxc_decompress_opts_t _do15 = {.checksum_enabled = 1}; if (zxc_decompress(compressed, comp_sz - ZXC_FILE_FOOTER_SIZE, decomp_buf, SRC_SIZE, &_do15) >= 0) { printf("Failed: Should fail when footer is missing\n"); free(compressed); free(decomp_buf); return 0; } } // 2. Cut off half the file zxc_decompress_opts_t _do16 = {.checksum_enabled = 1}; if (zxc_decompress(compressed, comp_sz / 2, decomp_buf, SRC_SIZE, &_do16) >= 0) { printf("Failed: Should fail when stream is truncated by half\n"); free(compressed); free(decomp_buf); return 0; } // 3. Cut off just 1 byte zxc_decompress_opts_t _do17 = {.checksum_enabled = 1}; if (zxc_decompress(compressed, comp_sz - 1, decomp_buf, SRC_SIZE, &_do17) >= 0) { printf("Failed: Should fail when stream is truncated by 1 byte\n"); free(compressed); free(decomp_buf); return 0; } printf("PASS\n\n"); free(compressed); free(decomp_buf); return 1; } // Checks behavior if writing fails int test_io_failures() { printf("=== TEST: Unit - I/O Failures ===\n"); FILE* f_in = tmpfile(); if (!f_in) return 0; // Create a dummy file to simulate failure // Open it in "rb" (read-only) and pass it as "wb" output file. // fwrite should return 0 and trigger the error. const char* bad_filename = "zxc_test_readonly.tmp"; FILE* f_dummy = fopen(bad_filename, "w"); if (f_dummy) fclose(f_dummy); FILE* f_out = fopen(bad_filename, "rb"); if (!f_out) { perror("fopen readonly"); fclose(f_in); return 0; } // Write some data to input fputs("test data to compress", f_in); fseek(f_in, 0, SEEK_SET); // This should fail cleanly (return < 0) because writing to f_out is impossible zxc_compress_opts_t _sco18 = {.n_threads = 1, .level = 5, .checksum_enabled = 0}; if (zxc_stream_compress(f_in, f_out, &_sco18) >= 0) { printf("Failed: Should detect write error on read-only stream\n"); fclose(f_in); fclose(f_out); remove(bad_filename); return 0; } printf("PASS\n\n"); fclose(f_in); fclose(f_out); remove(bad_filename); return 1; } // Checks thread selector behavior int test_thread_params() { printf("=== TEST: Unit - Thread Parameters ===\n"); FILE* f_in = tmpfile(); FILE* f_out = tmpfile(); if (!f_in || !f_out) { if (f_in) fclose(f_in); if (f_out) fclose(f_out); return 0; } // Test with 0 (Auto) and negative value - must not crash zxc_compress_opts_t _sco19 = {.n_threads = 0, .level = 5, .checksum_enabled = 0}; zxc_stream_compress(f_in, f_out, &_sco19); fseek(f_in, 0, SEEK_SET); fseek(f_out, 0, SEEK_SET); zxc_compress_opts_t _sco20 = {.n_threads = -5, .level = 5, .checksum_enabled = 0}; zxc_stream_compress(f_in, f_out, &_sco20); printf("PASS (No crash observed)\n\n"); fclose(f_in); fclose(f_out); return 1; } // Multi-threaded round-trip test for TSan coverage int test_multithread_roundtrip() { printf("=== TEST: Multi-Thread Round-Trip (TSan Coverage) ===\n"); const size_t SIZE = 4 * 1024 * 1024; // 4MB to ensure multiple chunks const int ITERATIONS = 3; // Multiple runs increase race detection int result = 0; uint8_t* input = malloc(SIZE); uint8_t* output = malloc(SIZE); if (!input || !output) goto cleanup; gen_lz_data(input, SIZE); for (int iter = 0; iter < ITERATIONS; iter++) { FILE* f_in = tmpfile(); FILE* f_comp = tmpfile(); FILE* f_decomp = tmpfile(); if (!f_in || !f_comp || !f_decomp) { if (f_in) fclose(f_in); if (f_comp) fclose(f_comp); if (f_decomp) fclose(f_decomp); goto cleanup; } fwrite(input, 1, SIZE, f_in); fseek(f_in, 0, SEEK_SET); // Vary thread count: 2, 4, 8 int num_threads = 2 << iter; zxc_compress_opts_t _sco21 = {.n_threads = num_threads, .level = 3, .checksum_enabled = 1}; if (zxc_stream_compress(f_in, f_comp, &_sco21) < 0) { printf("Compression failed (threads=%d)!\n", num_threads); fclose(f_in); fclose(f_comp); fclose(f_decomp); goto cleanup; } fseek(f_comp, 0, SEEK_SET); zxc_decompress_opts_t _sdo22 = {.n_threads = num_threads, .checksum_enabled = 1}; if (zxc_stream_decompress(f_comp, f_decomp, &_sdo22) < 0) { printf("Decompression failed (threads=%d)!\n", num_threads); fclose(f_in); fclose(f_comp); fclose(f_decomp); goto cleanup; } long decomp_size = ftell(f_decomp); fseek(f_decomp, 0, SEEK_SET); if (decomp_size != (long)SIZE || fread(output, 1, SIZE, f_decomp) != SIZE || memcmp(input, output, SIZE) != 0) { printf("Verification failed (threads=%d)!\n", num_threads); fclose(f_in); fclose(f_comp); fclose(f_decomp); goto cleanup; } fclose(f_in); fclose(f_comp); fclose(f_decomp); printf(" Iteration %d: PASS (%d threads)\n", iter + 1, num_threads); } printf("PASS (3 iterations, 2/4/8 threads)\n\n"); result = 1; cleanup: free(input); free(output); return result; } // Checks the buffer-based API (zxc_compress / zxc_decompress) int test_buffer_api() { printf("=== TEST: Unit - Buffer API (zxc_compress/zxc_decompress) ===\n"); size_t src_size = 128 * 1024; uint8_t* src = malloc(src_size); gen_lz_data(src, src_size); // 1. Calculate max compressed size size_t max_dst_size = zxc_compress_bound(src_size); uint8_t* compressed = malloc(max_dst_size); int checksum_enabled = 1; // 2. Compress zxc_compress_opts_t _co23 = {.level = 3, .checksum_enabled = checksum_enabled}; int64_t compressed_size = zxc_compress(src, src_size, compressed, max_dst_size, &_co23); if (compressed_size <= 0) { printf("Failed: zxc_compress returned %lld\n", (long long)compressed_size); free(src); free(compressed); return 0; } printf("Compressed %zu bytes to %lld bytes\n", src_size, (long long)compressed_size); // 3. Decompress uint8_t* decompressed = malloc(src_size); zxc_decompress_opts_t _do24 = {.checksum_enabled = checksum_enabled}; int64_t decompressed_size = zxc_decompress(compressed, compressed_size, decompressed, src_size, &_do24); if (decompressed_size != (int64_t)src_size) { printf("Failed: zxc_decompress returned %lld, expected %zu\n", (long long)decompressed_size, src_size); free(src); free(compressed); free(decompressed); return 0; } // 4. Verify content if (memcmp(src, decompressed, src_size) != 0) { printf("Failed: Content mismatch after decompression\n"); free(src); free(compressed); free(decompressed); return 0; } // 5. Test error case: Destination too small size_t small_capacity = compressed_size / 2; zxc_compress_opts_t _co25 = {.level = 3, .checksum_enabled = checksum_enabled}; int64_t small_res = zxc_compress(src, src_size, compressed, small_capacity, &_co25); if (small_res >= 0) { printf("Failed: zxc_compress should fail with small buffer (returned %lld)\n", (long long)small_res); free(src); free(compressed); free(decompressed); return 0; } printf("PASS\n\n"); free(src); free(compressed); free(decompressed); return 1; } /* * Test for zxc_br_init and zxc_br_ensure */ int test_bit_reader() { printf("=== TEST: Unit - Bit Reader (zxc_br_init / zxc_br_ensure) ===\n"); // Case 1: Normal initialization uint8_t buffer[16]; for (int i = 0; i < 16; i++) buffer[i] = (uint8_t)i; zxc_bit_reader_t br; zxc_br_init(&br, buffer, 16); if (br.bits != 64) return 0; if (br.ptr != buffer + 8) return 0; if (br.accum != zxc_le64(buffer)) return 0; printf(" [PASS] Normal init\n"); // Case 2: Small buffer initialization (should not crash) uint8_t small_buffer[4] = {0xAA, 0xBB, 0xCC, 0xDD}; zxc_br_init(&br, small_buffer, 4); // Should have read 4 bytes safely (in LE order, matching zxc_le_partial) uint64_t expected_accum = (uint64_t)small_buffer[0] | ((uint64_t)small_buffer[1] << 8) | ((uint64_t)small_buffer[2] << 16) | ((uint64_t)small_buffer[3] << 24); if (br.accum != expected_accum) return 0; if (br.ptr != small_buffer + 4) return 0; printf(" [PASS] Small buffer init\n"); // Case 3: zxc_br_ensure (Normal refill) zxc_br_init(&br, buffer, 16); br.bits = 10; // Simulate consumption br.accum >>= 54; // Simulate shift zxc_br_ensure(&br, 32); // Should have refilled if (br.bits < 32) return 0; printf(" [PASS] Ensure normal refill\n"); // Case 4: zxc_br_ensure (End of stream) // Init with full buffer but advanced pointer near end zxc_br_init(&br, buffer, 16); br.ptr = buffer + 16; // At end br.bits = 0; // Try to ensure bits, should not read past end zxc_br_ensure(&br, 10); // The key is it didn't crash. printf(" [PASS] Ensure EOF safety\n"); printf("PASS\n\n"); return 1; } /* * Test for zxc_bitpack_stream_32 */ int test_bitpack() { printf("=== TEST: Unit - Bit Packing (zxc_bitpack_stream_32) ===\n"); const uint32_t src[4] = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; uint8_t dst[16]; // Pack 4 values with 4 bits each. // Input is 0xFFFFFFFF, but should be masked to 0xF (1111). // Result should be 2 bytes: 0xFF, 0xFF int len = zxc_bitpack_stream_32(src, 4, dst, 16, 4); if (len != 2) return 0; if (dst[0] != 0xFF || dst[1] != 0xFF) return 0; printf(" [PASS] Bitpack overflow masking\n"); // Edge case: bits = 32 const uint32_t src32[1] = {0x12345678}; len = zxc_bitpack_stream_32(src32, 1, dst, 16, 32); if (len != 4) return 0; if (zxc_le32(dst) != 0x12345678) return 0; printf(" [PASS] Bitpack 32 bits\n"); printf("PASS\n\n"); return 1; } // Checks that the EOF block is correctly appended int test_eof_block_structure() { printf("=== TEST: Unit - EOF Block Structure ===\n"); const char* input = "test"; size_t src_size = 4; size_t max_dst_size = zxc_compress_bound(src_size); uint8_t* compressed = malloc(max_dst_size); if (!compressed) return 0; zxc_compress_opts_t _co26 = {.level = 1, .checksum_enabled = 0}; int64_t comp_size = zxc_compress(input, src_size, compressed, max_dst_size, &_co26); if (comp_size <= 0) { printf("Failed: Compression returned 0\n"); free(compressed); return 0; } // Validating Footer and EOF Block // Total Overhead: 12 bytes (Footer) + 8 bytes (EOF Header) = 20 bytes if (comp_size < 20) { printf("Failed: Compressed size too small for Footer + EOF (%lld)\n", (long long)comp_size); free(compressed); return 0; } // 1. Verify 12-byte Footer // Structure: [SrcSize (8)] + [Hash (4)] const uint8_t* footer_ptr = compressed + comp_size - 12; uint32_t f_src_low = zxc_le32(footer_ptr); // Should be 4 uint32_t f_src_high = zxc_le32(footer_ptr + 4); // Should be 0 uint32_t f_hash = zxc_le32(footer_ptr + 8); // Should be 0 (checksum disabled) if (f_src_low != 4 || f_src_high != 0 || f_hash != 0) { printf("Failed: Footer mismatch. Src: %u, Hash: %u\n", f_src_low, f_hash); free(compressed); return 0; } // 2. Verify EOF Block Header (8 bytes) // Should be immediately before the footer const uint8_t* eof_ptr = compressed + comp_size - 20; uint8_t expected[8] = {0xFF, 0, 0, 0, 0, 0, 0, 0}; expected[7] = zxc_hash8(expected); if (memcmp(eof_ptr, expected, 8) != 0) { printf( "Failed: EOF block mismatch.\nExpected: %02X %02X %02X ... %02X\nGot: %02X %02X " "%02X ... %02X\n", expected[0], expected[1], expected[2], expected[7], eof_ptr[0], eof_ptr[1], eof_ptr[2], eof_ptr[7]); free(compressed); return 0; } printf("PASS\n\n"); free(compressed); return 1; } int test_header_checksum() { printf("Running test_header_checksum...\n"); uint8_t header_buf[ZXC_BLOCK_HEADER_SIZE]; zxc_block_header_t bh_in = {.block_type = ZXC_BLOCK_GLO, .block_flags = 0, .reserved = 0, .header_crc = 0, .comp_size = 1024}; // 1. Write Header if (zxc_write_block_header(header_buf, ZXC_BLOCK_HEADER_SIZE, &bh_in) != ZXC_BLOCK_HEADER_SIZE) { printf(" [FAIL] zxc_write_block_header failed\n"); return 0; } // Verify manually that checksum byte is non-zero (highly likely) if (header_buf[7] == 0) { // It's technically possible but very unlikely with a good hash printf(" [WARN] Checksum is 0 (unlikely but possible)\n"); } // 2. Read Header (Valid) zxc_block_header_t bh_out; if (zxc_read_block_header(header_buf, ZXC_BLOCK_HEADER_SIZE, &bh_out) != 0) { printf(" [FAIL] zxc_read_block_header failed on valid input\n"); return 0; } if (bh_out.block_type != bh_in.block_type || bh_out.comp_size != bh_in.comp_size || bh_out.header_crc != header_buf[7]) { printf(" [FAIL] Read data mismatch\n"); return 0; } // 3. Corrupt Header Checksum uint8_t original_crc = header_buf[7]; header_buf[7] = ~original_crc; // Flip bits if (zxc_read_block_header(header_buf, ZXC_BLOCK_HEADER_SIZE, &bh_out) == 0) { printf(" [FAIL] zxc_read_block_header should have failed on corrupted CRC\n"); return 0; } header_buf[7] = original_crc; // Restore // 4. Corrupt Header Content header_buf[0] = ZXC_BLOCK_RAW; // Change type if (zxc_read_block_header(header_buf, ZXC_BLOCK_HEADER_SIZE, &bh_out) == 0) { printf(" [FAIL] zxc_read_block_header should have failed on corrupted content\n"); return 0; } printf("PASS\n\n"); return 1; } // 5. Test Global Checksum Order Sensitivity // Ensures that swapping two blocks (even if valid individually) triggers a global checksum failure. int test_global_checksum_order() { printf("TEST: Global Checksum Order Sensitivity... "); // 1. Create input data withDISTINCT patterns for 2 blocks (so blocks are different) // ZXC_BLOCK_SIZE_DEFAULT is 256KB. We need > 256KB. Let's use 600KB. size_t input_sz = 600 * 1024; uint8_t* val_buf = malloc(input_sz); if (!val_buf) return 0; // Fill Block 1 with 0xAA, Block 2 with 0xBB, Block 3 with 0xCC... memset(val_buf, 0xAA, 256 * 1024); memset(val_buf + 256 * 1024, 0xBB, 256 * 1024); memset(val_buf + 512 * 1024, 0xCC, input_sz - 512 * 1024); FILE* f_in = tmpfile(); FILE* f_comp = tmpfile(); fwrite(val_buf, 1, input_sz, f_in); rewind(f_in); // 2. Compress with Checksum Enabled zxc_compress_opts_t _sco27 = {.n_threads = 1, .level = 1, .checksum_enabled = 1}; zxc_stream_compress(f_in, f_comp, &_sco27); // 3. Read compressed data to memory long comp_sz = ftell(f_comp); rewind(f_comp); uint8_t* comp_buf = malloc(comp_sz); if (fread(comp_buf, 1, comp_sz, f_comp) != (size_t)comp_sz) { printf("[FAIL] Failed to read compressed data\n"); free(val_buf); free(comp_buf); fclose(f_in); fclose(f_comp); return 0; } // 4. Parse Blocks to identify Block 1 and Block 2 // File Header: ZXC_FILE_HEADER_SIZE bytes size_t off1 = ZXC_FILE_HEADER_SIZE; // Parse Block 1 Header zxc_block_header_t bh1; zxc_read_block_header(comp_buf + off1, ZXC_BLOCK_HEADER_SIZE, &bh1); size_t len1 = ZXC_BLOCK_HEADER_SIZE + bh1.comp_size + ZXC_BLOCK_CHECKSUM_SIZE; size_t off2 = off1 + len1; // Parse Block 2 Header zxc_block_header_t bh2; zxc_read_block_header(comp_buf + off2, ZXC_BLOCK_HEADER_SIZE, &bh2); size_t len2 = ZXC_BLOCK_HEADER_SIZE + bh2.comp_size + ZXC_BLOCK_CHECKSUM_SIZE; // Ensure we have at least 2 full blocks + EOF + Global Checksum if (off2 + len2 > (size_t)comp_sz) { printf("[FAIL] Compressed size too small for test\n"); free(val_buf); free(comp_buf); fclose(f_in); fclose(f_comp); return 0; } // 5. Swap Block 1 and Block 2 // To safely swap, we need a new buffer uint8_t* swapped_buf = malloc(comp_sz); // Copy File Header // Copy File Header memcpy(swapped_buf, comp_buf, ZXC_FILE_HEADER_SIZE); size_t w_off = ZXC_FILE_HEADER_SIZE; // Write Block 2 first memcpy(swapped_buf + w_off, comp_buf + off2, len2); w_off += len2; // Write Block 1 second memcpy(swapped_buf + w_off, comp_buf + off1, len1); w_off += len1; // Write remaining data (EOF block + Global Checksum) size_t remaining_off = off2 + len2; size_t remaining_len = comp_sz - remaining_off; memcpy(swapped_buf + w_off, comp_buf + remaining_off, remaining_len); // 6. Write to File for Decompression FILE* f_bad = tmpfile(); fwrite(swapped_buf, 1, comp_sz, f_bad); rewind(f_bad); // 7. Attempt Decompression FILE* f_out = tmpfile(); zxc_decompress_opts_t _sdo28 = {.n_threads = 1, .checksum_enabled = 1}; int64_t res = zxc_stream_decompress(f_bad, f_out, &_sdo28); fclose(f_in); fclose(f_comp); fclose(f_bad); fclose(f_out); free(val_buf); free(comp_buf); free(swapped_buf); if (res >= 0) { printf(" [FAIL] zxc_stream_decompress unexpectedly succeeded on swapped blocks\n"); return 0; } printf("PASS\n\n"); return 1; } // Test zxc_get_decompressed_size int test_get_decompressed_size() { printf("=== TEST: Unit - zxc_get_decompressed_size ===\n"); // 1. Compress some data, then check decompressed size size_t src_size = 64 * 1024; uint8_t* src = malloc(src_size); gen_lz_data(src, src_size); size_t max_dst = zxc_compress_bound(src_size); uint8_t* compressed = malloc(max_dst); zxc_compress_opts_t _co29 = {.level = 3, .checksum_enabled = 0}; int64_t comp_size = zxc_compress(src, src_size, compressed, max_dst, &_co29); if (comp_size <= 0) { printf("Failed: Compression returned 0\n"); free(src); free(compressed); return 0; } size_t reported = zxc_get_decompressed_size(compressed, comp_size); if (reported != src_size) { printf("Failed: Expected %zu, got %zu\n", src_size, reported); free(src); free(compressed); return 0; } printf(" [PASS] Valid compressed data\n"); // 2. Too-small buffer if (zxc_get_decompressed_size(compressed, 4) != 0) { printf("Failed: Should return 0 for too-small buffer\n"); free(src); free(compressed); return 0; } printf(" [PASS] Too-small buffer\n"); // 3. Invalid magic word uint8_t bad_buf[64] = {0}; if (zxc_get_decompressed_size(bad_buf, sizeof(bad_buf)) != 0) { printf("Failed: Should return 0 for invalid magic\n"); free(src); free(compressed); return 0; } printf(" [PASS] Invalid magic word\n"); printf("PASS\n\n"); free(src); free(compressed); return 1; } int test_error_name() { printf("--- Test: zxc_error_name ---\n"); struct { int code; const char* expected; } cases[] = { {ZXC_OK, "ZXC_OK"}, {ZXC_ERROR_MEMORY, "ZXC_ERROR_MEMORY"}, {ZXC_ERROR_DST_TOO_SMALL, "ZXC_ERROR_DST_TOO_SMALL"}, {ZXC_ERROR_SRC_TOO_SMALL, "ZXC_ERROR_SRC_TOO_SMALL"}, {ZXC_ERROR_BAD_MAGIC, "ZXC_ERROR_BAD_MAGIC"}, {ZXC_ERROR_BAD_VERSION, "ZXC_ERROR_BAD_VERSION"}, {ZXC_ERROR_BAD_HEADER, "ZXC_ERROR_BAD_HEADER"}, {ZXC_ERROR_BAD_CHECKSUM, "ZXC_ERROR_BAD_CHECKSUM"}, {ZXC_ERROR_CORRUPT_DATA, "ZXC_ERROR_CORRUPT_DATA"}, {ZXC_ERROR_BAD_OFFSET, "ZXC_ERROR_BAD_OFFSET"}, {ZXC_ERROR_OVERFLOW, "ZXC_ERROR_OVERFLOW"}, {ZXC_ERROR_IO, "ZXC_ERROR_IO"}, {ZXC_ERROR_NULL_INPUT, "ZXC_ERROR_NULL_INPUT"}, {ZXC_ERROR_BAD_BLOCK_TYPE, "ZXC_ERROR_BAD_BLOCK_TYPE"}, {ZXC_ERROR_BAD_BLOCK_SIZE, "ZXC_ERROR_BAD_BLOCK_SIZE"}, }; const int n = sizeof(cases) / sizeof(cases[0]); for (int i = 0; i < n; i++) { const char* name = zxc_error_name(cases[i].code); if (strcmp(name, cases[i].expected) != 0) { printf(" [FAIL] zxc_error_name(%d) = \"%s\", expected \"%s\"\n", cases[i].code, name, cases[i].expected); return 0; } } printf(" [PASS] All %d known error codes\n", n); // Unknown codes should return "ZXC_UNKNOWN_ERROR" const char* unk = zxc_error_name(-999); if (strcmp(unk, "ZXC_UNKNOWN_ERROR") != 0) { printf(" [FAIL] zxc_error_name(-999) = \"%s\", expected \"ZXC_UNKNOWN_ERROR\"\n", unk); return 0; } unk = zxc_error_name(42); if (strcmp(unk, "ZXC_UNKNOWN_ERROR") != 0) { printf(" [FAIL] zxc_error_name(42) = \"%s\", expected \"ZXC_UNKNOWN_ERROR\"\n", unk); return 0; } printf(" [PASS] Unknown error codes\n"); printf("PASS\n\n"); return 1; } int test_legacy_header() { printf("=== TEST: Legacy header (chunk_size_code=64) ===\n"); // Build a valid file header with legacy chunk_size_code = 64 (= 256 KB) uint8_t hdr[ZXC_FILE_HEADER_SIZE]; memset(hdr, 0, sizeof(hdr)); // Magic word (LE) hdr[0] = 0xF5; hdr[1] = 0x2E; hdr[2] = 0xB0; hdr[3] = 0x9C; // Version hdr[4] = ZXC_FILE_FORMAT_VERSION; // Legacy chunk size code hdr[5] = 64; // Flags: no checksum hdr[6] = 0; // Compute CRC16 (bytes 14-15 zeroed, then hash) hdr[14] = 0; hdr[15] = 0; uint16_t crc = zxc_hash16(hdr); hdr[14] = (uint8_t)(crc & 0xFF); hdr[15] = (uint8_t)(crc >> 8); size_t block_size = 0; int has_checksum = -1; int rc = zxc_read_file_header(hdr, sizeof(hdr), &block_size, &has_checksum); if (rc != ZXC_OK) { printf(" [FAIL] zxc_read_file_header returned %d (%s)\n", rc, zxc_error_name(rc)); return 0; } if (block_size != 256 * 1024) { printf(" [FAIL] block_size = %zu, expected %d\n", block_size, 256 * 1024); return 0; } if (has_checksum != 0) { printf(" [FAIL] has_checksum = %d, expected 0\n", has_checksum); return 0; } printf(" [PASS] Legacy code 64 -> block_size = 256 KB\n"); // Verify that invalid codes are rejected hdr[5] = 99; // Not a valid exponent nor legacy value hdr[14] = 0; hdr[15] = 0; crc = zxc_hash16(hdr); hdr[14] = (uint8_t)(crc & 0xFF); hdr[15] = (uint8_t)(crc >> 8); rc = zxc_read_file_header(hdr, sizeof(hdr), &block_size, &has_checksum); if (rc != ZXC_ERROR_BAD_BLOCK_SIZE) { printf(" [FAIL] invalid code 99: expected %d, got %d\n", ZXC_ERROR_BAD_BLOCK_SIZE, rc); return 0; } printf(" [PASS] Invalid code 99 -> ZXC_ERROR_BAD_BLOCK_SIZE\n"); printf("PASS\n\n"); return 1; } int test_buffer_error_codes() { printf("=== TEST: Unit - Buffer API Error Codes ===\n"); /* ------------------------------------------------------------------ */ /* zxc_compress error paths */ /* ------------------------------------------------------------------ */ // 1. NULL src zxc_compress_opts_t _co30 = {.level = 3, .checksum_enabled = 0}; int64_t r = zxc_compress(NULL, 100, (void*)1, 100, &_co30); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] NULL src: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); return 0; } printf(" [PASS] zxc_compress NULL src -> ZXC_ERROR_NULL_INPUT\n"); // 2. NULL dst zxc_compress_opts_t _co31 = {.level = 3, .checksum_enabled = 0}; r = zxc_compress((void*)1, 100, NULL, 100, &_co31); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] NULL dst: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); return 0; } printf(" [PASS] zxc_compress NULL dst -> ZXC_ERROR_NULL_INPUT\n"); // 3. src_size == 0 uint8_t dummy[16]; zxc_compress_opts_t _co32 = {.level = 3, .checksum_enabled = 0}; r = zxc_compress(dummy, 0, dummy, sizeof(dummy), &_co32); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] src_size==0: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); return 0; } printf(" [PASS] zxc_compress src_size==0 -> ZXC_ERROR_NULL_INPUT\n"); // 4. dst_capacity == 0 zxc_compress_opts_t _co33 = {.level = 3, .checksum_enabled = 0}; r = zxc_compress(dummy, sizeof(dummy), dummy, 0, &_co33); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] dst_cap==0: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); return 0; } printf(" [PASS] zxc_compress dst_capacity==0 -> ZXC_ERROR_NULL_INPUT\n"); // 5. dst too small for file header (< 16 bytes) { uint8_t src[64]; uint8_t dst[8]; // Too small for file header (16 bytes) gen_lz_data(src, sizeof(src)); zxc_compress_opts_t _co34 = {.level = 3, .checksum_enabled = 0}; r = zxc_compress(src, sizeof(src), dst, sizeof(dst), &_co34); if (r >= 0) { printf(" [FAIL] dst too small for header: expected < 0, got %lld\n", (long long)r); return 0; } } printf(" [PASS] zxc_compress dst too small for header -> negative\n"); // 6. dst too small for data (fits header but not chunk) { const size_t src_sz = 4096; uint8_t* src = malloc(src_sz); const size_t small_dst = 128; uint8_t* dst = malloc(small_dst); gen_lz_data(src, src_sz); zxc_compress_opts_t _co35 = {.level = 3, .checksum_enabled = 0}; r = zxc_compress(src, src_sz, dst, small_dst, &_co35); if (r >= 0) { printf(" [FAIL] dst too small for chunk: expected < 0, got %lld\n", (long long)r); free(src); free(dst); return 0; } free(src); free(dst); } printf(" [PASS] zxc_compress dst too small for chunk -> negative\n"); // 7. dst too small for EOF + footer { // Compress first to find the exact compressed size, then retry with // just enough for the data blocks but not for the EOF + footer. const size_t src_sz = 256; uint8_t* src = malloc(src_sz); gen_lz_data(src, src_sz); const size_t full_cap = zxc_compress_bound(src_sz); uint8_t* full_dst = malloc(full_cap); zxc_compress_opts_t _co36 = {.level = 3, .checksum_enabled = 0}; const int64_t full_sz = zxc_compress(src, src_sz, full_dst, full_cap, &_co36); if (full_sz <= 0) { printf(" [SKIP] Cannot prepare for EOF test\n"); free(src); free(full_dst); } else { // EOF header(8) + footer(12) = 20 bytes at the end. // Try with a buffer that's just a few bytes too small. const size_t tight = (size_t)full_sz - 5; uint8_t* tight_dst = malloc(tight); zxc_compress_opts_t _co37 = {.level = 3, .checksum_enabled = 0}; r = zxc_compress(src, src_sz, tight_dst, tight, &_co37); if (r >= 0) { printf(" [FAIL] dst too small for EOF+footer: expected < 0, got %lld\n", (long long)r); free(src); free(full_dst); free(tight_dst); return 0; } free(src); free(full_dst); free(tight_dst); } } printf(" [PASS] zxc_compress dst too small for EOF+footer -> negative\n"); /* ------------------------------------------------------------------ */ /* zxc_decompress error paths */ /* ------------------------------------------------------------------ */ // 8. NULL src zxc_decompress_opts_t _do38 = {.checksum_enabled = 0}; r = zxc_decompress(NULL, 100, (void*)1, 100, &_do38); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] decompress NULL src: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); return 0; } printf(" [PASS] zxc_decompress NULL src -> ZXC_ERROR_NULL_INPUT\n"); // 9. NULL dst zxc_decompress_opts_t _do39 = {.checksum_enabled = 0}; r = zxc_decompress((void*)1, 100, NULL, 100, &_do39); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] decompress NULL dst: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); return 0; } printf(" [PASS] zxc_decompress NULL dst -> ZXC_ERROR_NULL_INPUT\n"); // 10. src too small for file header { uint8_t tiny[4] = {0}; uint8_t out[64]; zxc_decompress_opts_t _do40 = {.checksum_enabled = 0}; r = zxc_decompress(tiny, sizeof(tiny), out, sizeof(out), &_do40); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] src too small: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); return 0; } } printf(" [PASS] zxc_decompress src too small -> ZXC_ERROR_NULL_INPUT\n"); // 11. Bad file header (invalid magic) { uint8_t bad_src[64]; memset(bad_src, 0, sizeof(bad_src)); uint8_t out[64]; zxc_decompress_opts_t _do41 = {.checksum_enabled = 0}; r = zxc_decompress(bad_src, sizeof(bad_src), out, sizeof(out), &_do41); if (r != ZXC_ERROR_BAD_HEADER) { printf(" [FAIL] bad magic: expected %d, got %lld\n", ZXC_ERROR_BAD_HEADER, (long long)r); return 0; } } printf(" [PASS] zxc_decompress bad magic -> ZXC_ERROR_BAD_HEADER\n"); // Prepare a valid compressed buffer for subsequent decompress error tests const size_t test_src_sz = 1024; uint8_t* test_src = malloc(test_src_sz); gen_lz_data(test_src, test_src_sz); const size_t comp_cap = zxc_compress_bound(test_src_sz); uint8_t* comp_buf = malloc(comp_cap); zxc_compress_opts_t _co42 = {.level = 3, .checksum_enabled = 1}; const int64_t comp_sz = zxc_compress(test_src, test_src_sz, comp_buf, comp_cap, &_co42); if (comp_sz <= 0) { printf(" [FAIL] Could not prepare compressed data\n"); free(test_src); free(comp_buf); return 0; } // 12. Corrupt block header (damage the first block header byte after file header) { uint8_t* corrupt = malloc(comp_sz); memcpy(corrupt, comp_buf, comp_sz); // Corrupt the block type byte at offset ZXC_FILE_HEADER_SIZE corrupt[ZXC_FILE_HEADER_SIZE] = 0xFF; // Invalid block type uint8_t* out = malloc(test_src_sz); zxc_decompress_opts_t _do43 = {.checksum_enabled = 1}; r = zxc_decompress(corrupt, comp_sz, out, test_src_sz, &_do43); if (r >= 0) { printf(" [FAIL] corrupt block header: expected < 0, got %lld\n", (long long)r); free(corrupt); free(out); free(test_src); free(comp_buf); return 0; } free(corrupt); free(out); } printf(" [PASS] zxc_decompress corrupt block header -> negative\n"); // 13. Truncated at EOF (missing footer) { // Find the EOF block: it ends with the footer(12 bytes) // Truncate so the footer is missing const size_t trunc_sz = (size_t)comp_sz - ZXC_FILE_FOOTER_SIZE + 2; // Cut most of footer uint8_t* out = malloc(test_src_sz); zxc_decompress_opts_t _do44 = {.checksum_enabled = 1}; r = zxc_decompress(comp_buf, trunc_sz, out, test_src_sz, &_do44); if (r >= 0) { printf(" [FAIL] truncated footer: expected < 0, got %lld\n", (long long)r); free(out); free(test_src); free(comp_buf); return 0; } free(out); } printf(" [PASS] zxc_decompress truncated footer -> negative\n"); // 14. Stored size mismatch (corrupt the source size in footer) { uint8_t* corrupt = malloc(comp_sz); memcpy(corrupt, comp_buf, comp_sz); // Footer is at end: last 12 bytes = [src_size(8)] + [global_hash(4)] // Corrupt the source size field (add 1 to the first byte) const size_t footer_offset = (size_t)comp_sz - ZXC_FILE_FOOTER_SIZE; corrupt[footer_offset] ^= 0x01; // Flip a bit in the stored source size uint8_t* out = malloc(test_src_sz); zxc_decompress_opts_t _do45 = {.checksum_enabled = 1}; r = zxc_decompress(corrupt, comp_sz, out, test_src_sz, &_do45); if (r >= 0) { printf(" [FAIL] size mismatch: expected < 0, got %lld\n", (long long)r); free(corrupt); free(out); free(test_src); free(comp_buf); return 0; } free(corrupt); free(out); } printf(" [PASS] zxc_decompress stored size mismatch -> negative\n"); // 15. Global checksum failure (corrupt the global hash in footer) { uint8_t* corrupt = malloc(comp_sz); memcpy(corrupt, comp_buf, comp_sz); // Global hash is the last 4 bytes of the file corrupt[comp_sz - 1] ^= 0xFF; uint8_t* out = malloc(test_src_sz); zxc_decompress_opts_t _do46 = {.checksum_enabled = 1}; r = zxc_decompress(corrupt, comp_sz, out, test_src_sz, &_do46); if (r != ZXC_ERROR_BAD_CHECKSUM) { printf(" [FAIL] bad global checksum: expected %d, got %lld\n", ZXC_ERROR_BAD_CHECKSUM, (long long)r); free(corrupt); free(out); free(test_src); free(comp_buf); return 0; } free(corrupt); free(out); } printf(" [PASS] zxc_decompress global checksum -> ZXC_ERROR_BAD_CHECKSUM\n"); // 16. dst too small for decompression { uint8_t* out = malloc(test_src_sz / 4); // Way too small zxc_decompress_opts_t _do47 = {.checksum_enabled = 0}; r = zxc_decompress(comp_buf, comp_sz, out, test_src_sz / 4, &_do47); if (r >= 0) { printf(" [FAIL] dst too small for decompress: expected < 0, got %lld\n", (long long)r); free(out); free(test_src); free(comp_buf); return 0; } free(out); } printf(" [PASS] zxc_decompress dst too small -> negative\n"); free(test_src); free(comp_buf); printf("PASS\n\n"); return 1; } int test_stream_get_decompressed_size_errors() { printf("=== TEST: Unit - zxc_stream_get_decompressed_size Error Codes ===\n"); // 1. NULL FILE* int64_t r = zxc_stream_get_decompressed_size(NULL); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] NULL FILE*: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); return 0; } printf(" [PASS] NULL FILE* -> ZXC_ERROR_NULL_INPUT\n"); // 2. File too small (less than header + footer) { FILE* f = tmpfile(); if (!f) { printf(" [SKIP] tmpfile failed\n"); return 0; } fwrite("tiny", 1, 4, f); fseek(f, 0, SEEK_SET); r = zxc_stream_get_decompressed_size(f); if (r != ZXC_ERROR_SRC_TOO_SMALL) { printf(" [FAIL] file too small: expected %d, got %lld\n", ZXC_ERROR_SRC_TOO_SMALL, (long long)r); fclose(f); return 0; } fclose(f); } printf(" [PASS] file too small -> ZXC_ERROR_SRC_TOO_SMALL\n"); // 3. Bad magic word { FILE* f = tmpfile(); if (!f) { printf(" [SKIP] tmpfile failed\n"); return 0; } // Write enough bytes but with wrong magic uint8_t garbage[ZXC_FILE_HEADER_SIZE + ZXC_FILE_FOOTER_SIZE]; memset(garbage, 0, sizeof(garbage)); fwrite(garbage, 1, sizeof(garbage), f); fseek(f, 0, SEEK_SET); r = zxc_stream_get_decompressed_size(f); if (r != ZXC_ERROR_BAD_MAGIC) { printf(" [FAIL] bad magic: expected %d, got %lld\n", ZXC_ERROR_BAD_MAGIC, (long long)r); fclose(f); return 0; } fclose(f); } printf(" [PASS] bad magic -> ZXC_ERROR_BAD_MAGIC\n"); // 4. Valid file returns correct size { // Create a valid compressed file in memory const size_t src_sz = 512; uint8_t* src = malloc(src_sz); gen_lz_data(src, src_sz); const size_t cap = zxc_compress_bound(src_sz); uint8_t* comp = malloc(cap); zxc_compress_opts_t _co48 = {.level = 3, .checksum_enabled = 0}; int64_t comp_sz = zxc_compress(src, src_sz, comp, cap, &_co48); if (comp_sz <= 0) { printf(" [SKIP] compress failed\n"); free(src); free(comp); return 0; } FILE* f = tmpfile(); fwrite(comp, 1, (size_t)comp_sz, f); fseek(f, 0, SEEK_SET); r = zxc_stream_get_decompressed_size(f); if (r != (int64_t)src_sz) { printf(" [FAIL] valid file: expected %zu, got %lld\n", src_sz, (long long)r); fclose(f); free(src); free(comp); return 0; } fclose(f); free(src); free(comp); } printf(" [PASS] valid file -> correct size\n"); printf("PASS\n\n"); return 1; } int test_stream_engine_errors() { printf("=== TEST: Unit - Stream Engine Error Codes ===\n"); // 1. zxc_stream_compress with NULL f_in { FILE* f_out = tmpfile(); zxc_compress_opts_t _sco49 = {.n_threads = 1, .level = 3, .checksum_enabled = 0}; int64_t r = zxc_stream_compress(NULL, f_out, &_sco49); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] compress NULL f_in: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); if (f_out) fclose(f_out); return 0; } if (f_out) fclose(f_out); } printf(" [PASS] zxc_stream_compress NULL f_in -> ZXC_ERROR_NULL_INPUT\n"); // 2. zxc_stream_decompress with NULL f_in { FILE* f_out = tmpfile(); zxc_decompress_opts_t _sdo50 = {.n_threads = 1, .checksum_enabled = 0}; int64_t r = zxc_stream_decompress(NULL, f_out, &_sdo50); if (r != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] decompress NULL f_in: expected %d, got %lld\n", ZXC_ERROR_NULL_INPUT, (long long)r); if (f_out) fclose(f_out); return 0; } if (f_out) fclose(f_out); } printf(" [PASS] zxc_stream_decompress NULL f_in -> ZXC_ERROR_NULL_INPUT\n"); // 3. zxc_stream_decompress with bad header (invalid file) { FILE* f_in = tmpfile(); FILE* f_out = tmpfile(); if (!f_in || !f_out) { if (f_in) fclose(f_in); if (f_out) fclose(f_out); printf(" [SKIP] tmpfile failed\n"); return 0; } // Write garbage data (bad magic) uint8_t garbage[64]; memset(garbage, 0xAA, sizeof(garbage)); fwrite(garbage, 1, sizeof(garbage), f_in); fseek(f_in, 0, SEEK_SET); zxc_decompress_opts_t _sdo51 = {.n_threads = 1, .checksum_enabled = 0}; int64_t r = zxc_stream_decompress(f_in, f_out, &_sdo51); if (r != ZXC_ERROR_BAD_HEADER) { printf(" [FAIL] bad header: expected %d, got %lld\n", ZXC_ERROR_BAD_HEADER, (long long)r); fclose(f_in); fclose(f_out); return 0; } fclose(f_in); fclose(f_out); } printf(" [PASS] zxc_stream_decompress bad header -> ZXC_ERROR_BAD_HEADER\n"); // 4. Stream decompress with corrupted footer (stored size mismatch) { // First, create a valid compressed stream const size_t src_sz = 4096; uint8_t* src = malloc(src_sz); gen_lz_data(src, src_sz); FILE* f_comp_in = tmpfile(); FILE* f_comp_out = tmpfile(); fwrite(src, 1, src_sz, f_comp_in); fseek(f_comp_in, 0, SEEK_SET); zxc_compress_opts_t _sco52 = {.n_threads = 1, .level = 3, .checksum_enabled = 1}; int64_t comp_sz = zxc_stream_compress(f_comp_in, f_comp_out, &_sco52); fclose(f_comp_in); if (comp_sz <= 0) { printf(" [SKIP] stream compress failed\n"); fclose(f_comp_out); free(src); return 0; } // Read the compressed data, corrupt the footer source size, rewrite fseek(f_comp_out, 0, SEEK_END); const long comp_file_sz = ftell(f_comp_out); uint8_t* comp_data = malloc(comp_file_sz); fseek(f_comp_out, 0, SEEK_SET); if (fread(comp_data, 1, comp_file_sz, f_comp_out) != (size_t)comp_file_sz) { printf(" [FAIL] fread failed\n"); fclose(f_comp_out); free(comp_data); free(src); return 0; } fclose(f_comp_out); // Corrupt the stored source size in footer (last 12 bytes: [src_size(8)] + [hash(4)]) const size_t footer_off = comp_file_sz - ZXC_FILE_FOOTER_SIZE; comp_data[footer_off] ^= 0x01; // Flip a bit in stored source size FILE* f_corrupt = tmpfile(); FILE* f_dec_out = tmpfile(); fwrite(comp_data, 1, comp_file_sz, f_corrupt); fseek(f_corrupt, 0, SEEK_SET); free(comp_data); zxc_decompress_opts_t _sdo53 = {.n_threads = 1, .checksum_enabled = 1}; int64_t r = zxc_stream_decompress(f_corrupt, f_dec_out, &_sdo53); fclose(f_corrupt); fclose(f_dec_out); free(src); if (r >= 0) { printf(" [FAIL] corrupt footer size: expected < 0, got %lld\n", (long long)r); return 0; } } printf(" [PASS] zxc_stream_decompress corrupt footer -> negative\n"); // 5. Stream decompress with corrupted global checksum { const size_t src_sz = 4096; uint8_t* src = malloc(src_sz); gen_lz_data(src, src_sz); FILE* f_comp_in = tmpfile(); FILE* f_comp_out = tmpfile(); fwrite(src, 1, src_sz, f_comp_in); fseek(f_comp_in, 0, SEEK_SET); zxc_compress_opts_t _sco54 = {.n_threads = 1, .level = 3, .checksum_enabled = 1}; int64_t comp_sz = zxc_stream_compress(f_comp_in, f_comp_out, &_sco54); fclose(f_comp_in); if (comp_sz <= 0) { printf(" [SKIP] stream compress failed\n"); fclose(f_comp_out); free(src); return 0; } fseek(f_comp_out, 0, SEEK_END); const long comp_file_sz = ftell(f_comp_out); uint8_t* comp_data = malloc(comp_file_sz); fseek(f_comp_out, 0, SEEK_SET); if (fread(comp_data, 1, comp_file_sz, f_comp_out) != (size_t)comp_file_sz) { printf(" [FAIL] fread failed\n"); fclose(f_comp_out); free(comp_data); free(src); return 0; } fclose(f_comp_out); // Corrupt the global checksum (last 4 bytes) comp_data[comp_file_sz - 1] ^= 0xFF; FILE* f_corrupt = tmpfile(); FILE* f_dec_out = tmpfile(); fwrite(comp_data, 1, comp_file_sz, f_corrupt); fseek(f_corrupt, 0, SEEK_SET); free(comp_data); zxc_decompress_opts_t _sdo55 = {.n_threads = 1, .checksum_enabled = 1}; int64_t r = zxc_stream_decompress(f_corrupt, f_dec_out, &_sdo55); fclose(f_corrupt); fclose(f_dec_out); free(src); if (r >= 0) { printf(" [FAIL] corrupt global checksum: expected < 0, got %lld\n", (long long)r); return 0; } } printf(" [PASS] zxc_stream_decompress corrupt checksum -> negative\n"); // 6. Stream decompress truncated file (missing EOF + footer) { const size_t src_sz = 4096; uint8_t* src = malloc(src_sz); gen_lz_data(src, src_sz); FILE* f_comp_in = tmpfile(); FILE* f_comp_out = tmpfile(); fwrite(src, 1, src_sz, f_comp_in); fseek(f_comp_in, 0, SEEK_SET); zxc_compress_opts_t _sco56 = {.n_threads = 1, .level = 3, .checksum_enabled = 0}; int64_t comp_sz = zxc_stream_compress(f_comp_in, f_comp_out, &_sco56); fclose(f_comp_in); free(src); if (comp_sz <= 0) { printf(" [SKIP] stream compress failed\n"); fclose(f_comp_out); return 0; } fseek(f_comp_out, 0, SEEK_END); const long comp_file_sz = ftell(f_comp_out); // Truncate: remove the EOF block header + footer const long trunc_sz = comp_file_sz - (ZXC_BLOCK_HEADER_SIZE + ZXC_FILE_FOOTER_SIZE); uint8_t* comp_data = malloc(trunc_sz); fseek(f_comp_out, 0, SEEK_SET); if (fread(comp_data, 1, trunc_sz, f_comp_out) != (size_t)trunc_sz) { printf(" [FAIL] fread failed\n"); fclose(f_comp_out); free(comp_data); return 0; } fclose(f_comp_out); FILE* f_corrupt = tmpfile(); FILE* f_dec_out = tmpfile(); fwrite(comp_data, 1, trunc_sz, f_corrupt); fseek(f_corrupt, 0, SEEK_SET); free(comp_data); zxc_decompress_opts_t _sdo57 = {.n_threads = 1, .checksum_enabled = 0}; int64_t r = zxc_stream_decompress(f_corrupt, f_dec_out, &_sdo57); fclose(f_corrupt); fclose(f_dec_out); // Should fail: missing EOF/footer means io_error or bad read if (r >= 0) { printf(" [FAIL] truncated stream: expected < 0, got %lld\n", (long long)r); return 0; } } printf(" [PASS] zxc_stream_decompress truncated -> negative\n"); printf("PASS\n\n"); return 1; } // Tests the buffer API scratch buffer (work_buf) used to safely absorb // zxc_copy32 wild-copy overshoot during decompression. int test_buffer_api_scratch_buf() { printf("=== TEST: Unit - Buffer API Scratch Buffer (work_buf) ===\n"); // 1. Small data roundtrip (177 bytes) { const size_t sz = 177; uint8_t src[177]; gen_lz_data(src, sz); const size_t comp_cap = zxc_compress_bound(sz); uint8_t* comp = malloc(comp_cap); zxc_compress_opts_t _co58 = {.level = 3, .checksum_enabled = 0}; const int64_t comp_sz = zxc_compress(src, sz, comp, comp_cap, &_co58); if (comp_sz <= 0) { printf(" [FAIL] compress 177B\n"); free(comp); return 0; } uint8_t dec[177]; zxc_decompress_opts_t _do59 = {.checksum_enabled = 0}; const int64_t dec_sz = zxc_decompress(comp, comp_sz, dec, sz, &_do59); if (dec_sz != (int64_t)sz || memcmp(src, dec, sz) != 0) { printf(" [FAIL] roundtrip 177B\n"); free(comp); return 0; } free(comp); printf(" [PASS] small data roundtrip (177 bytes)\n"); } // 2. Exact-fit destination (dst_capacity == decompressed size, no slack) { const size_t sz = 1024; uint8_t* src = malloc(sz); gen_lz_data(src, sz); const size_t comp_cap = zxc_compress_bound(sz); uint8_t* comp = malloc(comp_cap); zxc_compress_opts_t _co60 = {.level = 1, .checksum_enabled = 1}; const int64_t comp_sz = zxc_compress(src, sz, comp, comp_cap, &_co60); if (comp_sz <= 0) { printf(" [FAIL] compress 1KB\n"); free(src); free(comp); return 0; } uint8_t* dec = malloc(sz); // exactly sz, no extra room zxc_decompress_opts_t _do61 = {.checksum_enabled = 1}; const int64_t dec_sz = zxc_decompress(comp, comp_sz, dec, sz, &_do61); if (dec_sz != (int64_t)sz || memcmp(src, dec, sz) != 0) { printf(" [FAIL] exact-fit 1KB\n"); free(src); free(comp); free(dec); return 0; } free(src); free(comp); free(dec); printf(" [PASS] exact-fit destination (1KB)\n"); } // 3. Tiny data (1 byte) { const uint8_t src = 0x42; const size_t comp_cap = zxc_compress_bound(1); uint8_t* comp = malloc(comp_cap); zxc_compress_opts_t _co62 = {.level = 1, .checksum_enabled = 0}; const int64_t comp_sz = zxc_compress(&src, 1, comp, comp_cap, &_co62); if (comp_sz <= 0) { printf(" [FAIL] compress 1B\n"); free(comp); return 0; } uint8_t dec = 0; zxc_decompress_opts_t _do63 = {.checksum_enabled = 0}; const int64_t dec_sz = zxc_decompress(comp, comp_sz, &dec, 1, &_do63); if (dec_sz != 1 || dec != 0x42) { printf(" [FAIL] roundtrip 1B\n"); free(comp); return 0; } free(comp); printf(" [PASS] tiny data roundtrip (1 byte)\n"); } // 4. Malformed input must not crash (safe error return) { uint8_t garbage[64]; for (int i = 0; i < 64; i++) garbage[i] = (uint8_t)(i * 37); uint8_t out[256]; zxc_decompress_opts_t _do64 = {.checksum_enabled = 0}; const int64_t r = zxc_decompress(garbage, sizeof(garbage), out, sizeof(out), &_do64); if (r >= 0) { printf(" [FAIL] malformed input should return < 0\n"); return 0; } printf(" [PASS] malformed input -> error %lld (no crash)\n", (long long)r); } // 5. Destination too small { const size_t sz = 512; uint8_t* src = malloc(sz); gen_lz_data(src, sz); const size_t comp_cap = zxc_compress_bound(sz); uint8_t* comp = malloc(comp_cap); zxc_compress_opts_t _co65 = {.level = 1, .checksum_enabled = 0}; const int64_t comp_sz = zxc_compress(src, sz, comp, comp_cap, &_co65); if (comp_sz <= 0) { printf(" [FAIL] compress 512B\n"); free(src); free(comp); return 0; } uint8_t tiny_dst[8]; zxc_decompress_opts_t _do66 = {.checksum_enabled = 0}; const int64_t r = zxc_decompress(comp, comp_sz, tiny_dst, sizeof(tiny_dst), &_do66); if (r >= 0) { printf(" [FAIL] dst too small should return < 0\n"); free(src); free(comp); return 0; } free(src); free(comp); printf(" [PASS] zxc_decompress dst too small -> negative\n"); } printf("PASS\n\n"); return 1; } // Tests that the two decompression paths in zxc_decompress() produce // identical results: // - Fast path: rem_cap >= runtime_chunk_size + ZXC_PAD_SIZE // -> decompress directly into dst (enough padding for wild copies). // - Safe path: rem_cap < runtime_chunk_size + ZXC_PAD_SIZE // -> decompress into bounce buffer (work_buf), then memcpy exact result. // int test_decompress_fast_vs_safe_path() { printf("=== TEST: Unit - Decompress Fast Path vs Safe Path ===\n"); // Use a multi-block input: ZXC_BLOCK_SIZE_DEFAULT + extra so we get at least 2 blocks. // Block size = 256KB (ZXC_BLOCK_SIZE_DEFAULT). Second block is small. const size_t src_sz = ZXC_BLOCK_SIZE_DEFAULT + 4096; // 256KB + 4KB -> 2 blocks uint8_t* src = malloc(src_sz); if (!src) return 0; gen_lz_data(src, src_sz); const size_t comp_cap = zxc_compress_bound(src_sz); uint8_t* comp = malloc(comp_cap); zxc_compress_opts_t _co67 = {.level = 3, .checksum_enabled = 1}; const int64_t comp_sz = zxc_compress(src, src_sz, comp, comp_cap, &_co67); if (comp_sz <= 0) { printf(" [FAIL] compression failed\n"); free(src); free(comp); return 0; } // ----- Sub-test 1: Fast path ----- // Provide a very large dst buffer so all chunks decompress directly into // dst (rem_cap >= runtime_chunk_size + ZXC_PAD_SIZE at every iteration). { const size_t big_cap = src_sz + ZXC_BLOCK_SIZE_DEFAULT; // way more than enough uint8_t* dst = malloc(big_cap); zxc_decompress_opts_t _do68 = {.checksum_enabled = 1}; const int64_t dec_sz = zxc_decompress(comp, comp_sz, dst, big_cap, &_do68); if (dec_sz != (int64_t)src_sz) { printf(" [FAIL] fast path size: expected %zu, got %lld\n", src_sz, (long long)dec_sz); free(dst); free(src); free(comp); return 0; } if (memcmp(src, dst, src_sz) != 0) { printf(" [FAIL] fast path content mismatch\n"); free(dst); free(src); free(comp); return 0; } free(dst); printf(" [PASS] fast path (oversized dst)\n"); } // ----- Sub-test 2: Safe path (exact-fit) ----- // Provide dst_capacity == src_sz exactly. After the first 256KB block is // written, rem_cap for the second block (4KB) is exactly 4KB which is // < runtime_chunk_size (256KB) + ZXC_PAD_SIZE (32). This forces the // safe path (bounce buffer) for the second block. { uint8_t* dst = malloc(src_sz); // no slack at all zxc_decompress_opts_t _do69 = {.checksum_enabled = 1}; const int64_t dec_sz = zxc_decompress(comp, comp_sz, dst, src_sz, &_do69); if (dec_sz != (int64_t)src_sz) { printf(" [FAIL] safe path size: expected %zu, got %lld\n", src_sz, (long long)dec_sz); free(dst); free(src); free(comp); return 0; } if (memcmp(src, dst, src_sz) != 0) { printf(" [FAIL] safe path content mismatch\n"); free(dst); free(src); free(comp); return 0; } free(dst); printf(" [PASS] safe path (exact-fit dst)\n"); } // ----- Sub-test 3: Boundary ----- // dst_capacity = src_sz + ZXC_PAD_SIZE - 1 (just below the fast path // threshold for the LAST chunk). The last chunk should still fall into // the safe path here. { const size_t tight_cap = src_sz + ZXC_PAD_SIZE - 1; uint8_t* dst = malloc(tight_cap); zxc_decompress_opts_t _do70 = {.checksum_enabled = 1}; const int64_t dec_sz = zxc_decompress(comp, comp_sz, dst, tight_cap, &_do70); if (dec_sz != (int64_t)src_sz) { printf(" [FAIL] boundary size: expected %zu, got %lld\n", src_sz, (long long)dec_sz); free(dst); free(src); free(comp); return 0; } if (memcmp(src, dst, src_sz) != 0) { printf(" [FAIL] boundary content mismatch\n"); free(dst); free(src); free(comp); return 0; } free(dst); printf(" [PASS] boundary (dst = src_sz + PAD - 1)\n"); } // ----- Sub-test 4: Safe path with dst too small ----- // The safe path detects that the decompressed chunk doesn't fit and // returns ZXC_ERROR_DST_TOO_SMALL (covers the res > rem_cap guard). { const size_t tiny_cap = ZXC_BLOCK_SIZE_DEFAULT / 2; // Enough for half a block uint8_t* dst = malloc(tiny_cap); zxc_decompress_opts_t _do71 = {.checksum_enabled = 0}; const int64_t dec_sz = zxc_decompress(comp, comp_sz, dst, tiny_cap, &_do71); if (dec_sz >= 0) { printf(" [FAIL] safe path dst-too-small should fail, got %lld\n", (long long)dec_sz); free(dst); free(src); free(comp); return 0; } free(dst); printf(" [PASS] safe path dst too small -> negative\n"); } free(src); free(comp); printf("PASS\n\n"); return 1; } int test_opaque_context_api() { printf("=== TEST: Opaque Context API (zxc_create_cctx / zxc_create_dctx) ===\n"); /* 1. NULL context -> ZXC_ERROR_NULL_INPUT */ { uint8_t d[64]; zxc_compress_opts_t co = {.level = 3, .checksum_enabled = 0}; if (zxc_compress_cctx(NULL, d, sizeof(d), d, sizeof(d), &co) != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] compress_cctx NULL ctx\n"); return 0; } zxc_decompress_opts_t do_ = {.checksum_enabled = 0}; if (zxc_decompress_dctx(NULL, d, sizeof(d), d, sizeof(d), &do_) != ZXC_ERROR_NULL_INPUT) { printf(" [FAIL] decompress_dctx NULL ctx\n"); return 0; } printf(" [PASS] NULL context -> ZXC_ERROR_NULL_INPUT\n"); } /* 2. Create with eager init, multi-call reuse, free */ zxc_compress_opts_t create_opts = {.level = 3, .checksum_enabled = 0}; zxc_cctx* cctx = zxc_create_cctx(&create_opts); zxc_dctx* dctx = zxc_create_dctx(); if (!cctx || !dctx) { printf(" [FAIL] create returned NULL\n"); zxc_free_cctx(cctx); zxc_free_dctx(dctx); return 0; } const size_t src_sz = 8192; uint8_t* src = malloc(src_sz); const size_t comp_cap = zxc_compress_bound(src_sz); uint8_t* comp = malloc(comp_cap); uint8_t* dec = malloc(src_sz); /* 3. Three calls with the SAME cctx: level 1, 3, 5 */ for (int lvl = 1; lvl <= 5; lvl += 2) { gen_lz_data(src, src_sz); zxc_compress_opts_t co = {.level = lvl, .checksum_enabled = (lvl == 3)}; const int64_t csz = zxc_compress_cctx(cctx, src, src_sz, comp, comp_cap, &co); if (csz <= 0) { printf(" [FAIL] compress_cctx level %d returned %lld\n", lvl, (long long)csz); goto fail; } zxc_decompress_opts_t do_ = {.checksum_enabled = (lvl == 3)}; const int64_t dsz = zxc_decompress_dctx(dctx, comp, (size_t)csz, dec, src_sz, &do_); if (dsz != (int64_t)src_sz || memcmp(src, dec, src_sz) != 0) { printf(" [FAIL] roundtrip level %d (dsz=%lld)\n", lvl, (long long)dsz); goto fail; } } printf(" [PASS] Multi-call reuse (level 1, 3, 5)\n"); /* 4. Free is safe to call multiple times / on NULL */ zxc_free_cctx(cctx); cctx = NULL; zxc_free_cctx(NULL); /* no-op */ zxc_free_dctx(dctx); dctx = NULL; zxc_free_dctx(NULL); printf(" [PASS] Free + double-free + NULL safety\n"); free(src); free(comp); free(dec); printf("PASS\n\n"); return 1; fail: zxc_free_cctx(cctx); zxc_free_dctx(dctx); free(src); free(comp); free(dec); return 0; } int main() { srand(42); // Fixed seed for reproducibility int total_failures = 0; // Standard size for blocks const size_t BUF_SIZE = 256 * 1024; uint8_t* buffer = malloc(BUF_SIZE); if (!buffer) { printf("Memory allocation failed!\n"); return 1; } gen_random_data(buffer, BUF_SIZE); if (!test_round_trip("RAW Block (Random Data)", buffer, BUF_SIZE, 3, 0)) total_failures++; gen_lz_data(buffer, BUF_SIZE); if (!test_round_trip("GHI Block (Text Pattern)", buffer, BUF_SIZE, 2, 0)) total_failures++; gen_lz_data(buffer, BUF_SIZE); if (!test_round_trip("GLO Block (Text Pattern)", buffer, BUF_SIZE, 4, 0)) total_failures++; gen_num_data(buffer, BUF_SIZE); if (!test_round_trip("NUM Block (Integer Sequence)", buffer, BUF_SIZE, 3, 0)) total_failures++; gen_num_data_zero(buffer, BUF_SIZE); if (!test_round_trip("NUM Block (Zero Deltas)", buffer, BUF_SIZE, 3, 0)) total_failures++; gen_num_data_small(buffer, BUF_SIZE); if (!test_round_trip("NUM Block (Small Deltas)", buffer, BUF_SIZE, 3, 0)) total_failures++; gen_num_data_large(buffer, BUF_SIZE); if (!test_round_trip("NUM Block (Large Deltas)", buffer, BUF_SIZE, 3, 0)) total_failures++; gen_random_data(buffer, 50); if (!test_round_trip("Small Input (50 bytes)", buffer, 50, 3, 0)) total_failures++; if (!test_round_trip("Empty Input (0 bytes)", buffer, 0, 3, 0)) total_failures++; // Edge Cases: 1-byte file gen_random_data(buffer, 1); if (!test_round_trip("1-byte Input", buffer, 1, 3, 0)) total_failures++; if (!test_round_trip("1-byte Input (with checksum)", buffer, 1, 3, 1)) total_failures++; // Large File Case: Cross block boundaries const size_t LARGE_BUF_SIZE = 15 * 1024 * 1024; // 15 MB uint8_t* large_buffer = malloc(LARGE_BUF_SIZE); if (large_buffer) { gen_lz_data(large_buffer, LARGE_BUF_SIZE); // Good mix of repetitive data if (!test_round_trip("Large File (15MB Multi-Block)", large_buffer, LARGE_BUF_SIZE, 3, 1)) { total_failures++; } // Test NUM block specifically over a large size to stress boundaries gen_num_data(large_buffer, LARGE_BUF_SIZE); if (!test_round_trip("Large File NUM (15MB Multi-Block)", large_buffer, LARGE_BUF_SIZE, 3, 1)) { total_failures++; } free(large_buffer); } else { printf("Failed to allocate 15MB buffer for large file test.\n"); } printf("\n--- Test Coverage: Checksum ---\n"); gen_lz_data(buffer, BUF_SIZE); if (!test_round_trip("Checksum Disabled", buffer, BUF_SIZE, 3, 0)) total_failures++; if (!test_round_trip("Checksum Enabled", buffer, BUF_SIZE, 31, 1)) total_failures++; printf("\n--- Test Coverage: Compression Levels ---\n"); gen_lz_data(buffer, BUF_SIZE); if (!test_round_trip("Level 1", buffer, BUF_SIZE, 1, 1)) total_failures++; if (!test_round_trip("Level 2", buffer, BUF_SIZE, 2, 1)) total_failures++; if (!test_round_trip("Level 3", buffer, BUF_SIZE, 3, 1)) total_failures++; if (!test_round_trip("Level 4", buffer, BUF_SIZE, 4, 1)) total_failures++; if (!test_round_trip("Level 5", buffer, BUF_SIZE, 5, 1)) total_failures++; printf("\n--- Test Coverage: Binary Data Preservation ---\n"); gen_binary_data(buffer, BUF_SIZE); if (!test_round_trip("Binary Data (0x00, 0x0A, 0x0D, 0xFF)", buffer, BUF_SIZE, 3, 0)) total_failures++; if (!test_round_trip("Binary Data with Checksum", buffer, BUF_SIZE, 3, 1)) total_failures++; // Test with small binary data to ensure even small payloads are preserved gen_binary_data(buffer, 128); if (!test_round_trip("Small Binary Data (128 bytes)", buffer, 128, 3, 0)) total_failures++; printf("\n--- Test Coverage: Repetitive Pattern Encoding ---\n"); // Test 8-bit offset mode (enc_off=1): patterns with all offsets <= 255 gen_small_offset_data(buffer, BUF_SIZE); if (!test_round_trip("8-bit Offsets (Small Pattern)", buffer, BUF_SIZE, 3, 1)) total_failures++; if (!test_round_trip("8-bit Offsets (Level 5)", buffer, BUF_SIZE, 5, 1)) total_failures++; // Test 16-bit offset mode (enc_off=0): patterns with offsets > 255 gen_large_offset_data(buffer, BUF_SIZE); if (!test_round_trip("16-bit Offsets (Large Distance)", buffer, BUF_SIZE, 3, 1)) total_failures++; if (!test_round_trip("16-bit Offsets (Level 5)", buffer, BUF_SIZE, 5, 1)) total_failures++; // Edge case: Mixed buffer that should trigger 16-bit mode // (even one large offset forces 16-bit mode) gen_small_offset_data(buffer, BUF_SIZE / 2); gen_large_offset_data(buffer + BUF_SIZE / 2, BUF_SIZE / 2); if (!test_round_trip("Mixed Offsets (Hybrid)", buffer, BUF_SIZE, 3, 1)) total_failures++; free(buffer); // --- UNIT TESTS (ROBUSTNESS/API) --- if (!test_buffer_api()) total_failures++; if (!test_multithread_roundtrip()) total_failures++; if (!test_null_output_decompression()) total_failures++; if (!test_max_compressed_size_logic()) total_failures++; if (!test_invalid_arguments()) total_failures++; if (!test_truncated_input()) total_failures++; if (!test_io_failures()) total_failures++; if (!test_thread_params()) total_failures++; if (!test_bit_reader()) total_failures++; if (!test_bitpack()) total_failures++; if (!test_eof_block_structure()) total_failures++; if (!test_header_checksum()) total_failures++; if (!test_global_checksum_order()) total_failures++; if (!test_get_decompressed_size()) total_failures++; if (!test_error_name()) total_failures++; if (!test_legacy_header()) total_failures++; if (!test_buffer_error_codes()) total_failures++; if (!test_stream_get_decompressed_size_errors()) total_failures++; if (!test_stream_engine_errors()) total_failures++; if (!test_buffer_api_scratch_buf()) total_failures++; if (!test_decompress_fast_vs_safe_path()) total_failures++; if (!test_opaque_context_api()) total_failures++; if (total_failures > 0) { printf("FAILED: %d tests failed.\n", total_failures); return 1; } printf("ALL TESTS PASSED SUCCESSFULLY.\n"); return 0; }zxc-0.9.1/tests/test_cli.sh000077500000000000000000000666651515574030100156600ustar00rootroot00000000000000#!/bin/bash # ZXC - High-performance lossless compression # # Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. # SPDX-License-Identifier: BSD-3-Clause # # Test script for ZXC CLI # Usage: ./tests/test_cli.sh [path_to_zxc_binary] set -e # Default binary path ZXC_BIN=${1:-"../build/zxc"} # Test Directory (to isolate test files) TEST_DIR="./test_tmp" mkdir -p "$TEST_DIR" # Test Files TEST_FILE="$TEST_DIR/testdata" TEST_FILE_XC="${TEST_FILE}.xc" TEST_FILE_DEC="${TEST_FILE}.dec" PIPE_XC="$TEST_DIR/test_pipe.xc" PIPE_DEC="$TEST_DIR/test_pipe.dec" # Variables for checking file existence TEST_FILE_XC_BASH="$TEST_FILE_XC" TEST_FILE_DEC_BASH="${TEST_FILE}.dec" # Arguments passed to ZXC (relative to test_tmp) TEST_FILE_ARG="$TEST_FILE" TEST_FILE_XC_ARG="$TEST_FILE_XC" RED='\033[0;31m' GREEN='\033[0;32m' NC='\033[0m' # No Color log_pass() { echo -e "${GREEN}[PASS]${NC} $1" } log_fail() { echo -e "${RED}[FAIL]${NC} $1" exit 1 } # cleanup on exit cleanup() { echo "Cleaning up..." rm -rf "$TEST_DIR" } trap cleanup EXIT # 0. Check binary if [ ! -f "$ZXC_BIN" ]; then log_fail "Binary not found at $ZXC_BIN. Please build the project first." fi echo "Using binary: $ZXC_BIN" # 1. Generate Test File (Lorem Ipsum) echo "Generating test file..." cat > "$TEST_FILE" <> "${TEST_FILE}.tmp" done mv "${TEST_FILE}.tmp" "$TEST_FILE" FILE_SIZE=$(wc -c < "$TEST_FILE" | tr -d ' ') echo "Test file generated: $TEST_FILE ($FILE_SIZE bytes)" # Helper: Wait for file to be ready and readable wait_for_file() { local file="$1" local retries=10 local count=0 # On Windows, file locking can cause race conditions immediately after creation. while [ $count -lt $retries ]; do if [ -f "$file" ]; then # Try to read a byte to ensure it's not exclusively locked if head -c 1 "$file" >/dev/null 2>&1; then return 0 fi fi sleep 1 count=$((count + 1)) done echo "Timeout waiting for file '$file' to be readable." ls -l "$file" 2>/dev/null || echo "File not found in ls." return 1 } # 2. Basic Round-Trip echo "Testing Basic Round-Trip..." if ! "$ZXC_BIN" -z -k "$TEST_FILE_ARG"; then log_fail "Compression command failed (exit code $?)" fi # Wait for output if ! wait_for_file "$TEST_FILE_XC_BASH"; then log_fail "Compression succeeded but output file '$TEST_FILE_XC_BASH' is not accessible." fi # Decompress to stdout echo "Attempting decompression of: $TEST_FILE_XC_ARG" if ! "$ZXC_BIN" -d -c "$TEST_FILE_XC_ARG" > "$TEST_FILE_DEC_BASH"; then echo "Decompress to stdout failed. Retrying once..." sleep 1 if ! "$ZXC_BIN" -d -c "$TEST_FILE_XC_ARG" > "$TEST_FILE_DEC_BASH"; then log_fail "Decompression to stdout failed." fi fi # Decompress to file rm -f "$TEST_FILE" sleep 1 if ! "$ZXC_BIN" -d -k "$TEST_FILE_XC_ARG"; then echo "Decompress to file failed. Retrying once..." sleep 1 if ! "$ZXC_BIN" -d -k "$TEST_FILE_XC_ARG"; then log_fail "Decompression to file failed." fi fi if ! wait_for_file "$TEST_FILE"; then log_fail "Decompression failed to recreate original file." fi mv "$TEST_FILE" "$TEST_FILE_DEC_BASH" # Re-generate source for valid comparison cat > "$TEST_FILE" <> "${TEST_FILE}.tmp" done mv "${TEST_FILE}.tmp" "$TEST_FILE" if cmp -s "$TEST_FILE" "$TEST_FILE_DEC_BASH"; then log_pass "Basic Round-Trip" else log_fail "Basic Round-Trip content mismatch" fi # 3. Piping echo "Testing Piping..." rm -f "$PIPE_XC" "$PIPE_DEC" cat "$TEST_FILE" | "$ZXC_BIN" > "$PIPE_XC" if [ ! -s "$PIPE_XC" ]; then log_fail "Piping compression failed (empty output)" fi cat "$PIPE_XC" | "$ZXC_BIN" -d > "$PIPE_DEC" if [ ! -s "$PIPE_DEC" ]; then log_fail "Piping decompression failed (empty output)" fi if cmp -s "$TEST_FILE" "$PIPE_DEC"; then log_pass "Piping" else log_fail "Piping content mismatch" fi # 4. Flags echo "Testing Flags..." # Level "$ZXC_BIN" -1 -k -f "$TEST_FILE_ARG" if [ ! -f "$TEST_FILE_XC_BASH" ]; then log_fail "Level 1 flag failed"; fi log_pass "Flag -1" # Force Overwrite (-f) touch "$TEST_DIR/out.xc" touch "${TEST_FILE_XC_BASH}" set +e "$ZXC_BIN" -z -k "$TEST_FILE_ARG" > /dev/null 2>&1 RET=$? set -e if [ $RET -eq 0 ]; then log_fail "Should have failed to overwrite without -f" else log_pass "Overwrite protection verified" fi "$ZXC_BIN" -z -k -f "$TEST_FILE_ARG" if [ $? -eq 0 ]; then log_pass "Force overwrite (-f)" else log_fail "Force overwrite failed" fi # 5. Benchmark echo "Testing Benchmark..." "$ZXC_BIN" -b1 "$TEST_FILE_ARG" > /dev/null if [ $? -eq 0 ]; then log_pass "Benchmark mode" else log_fail "Benchmark mode failed" fi # 6. Error Handling echo "Testing Error Handling..." set +e "$ZXC_BIN" "nonexistent_file" > /dev/null 2>&1 RET=$? set -e if [ $RET -ne 0 ]; then log_pass "Missing file error handled" else log_fail "Missing file should return error" fi # 7. Version echo "Testing Version..." OUT_VER=$("$ZXC_BIN" -V) if [[ "$OUT_VER" == *"zxc"* ]]; then log_pass "Version flag" else log_fail "Version flag failed" fi # 8. Checksum echo "Testing Checksum..." "$ZXC_BIN" -C -k -f "$TEST_FILE_ARG" if [ ! -f "$TEST_FILE_XC_BASH" ]; then log_fail "Checksum compression failed"; fi rm -f "$TEST_FILE" "$ZXC_BIN" -d "$TEST_FILE_XC_ARG" if [ ! -f "$TEST_FILE" ]; then log_fail "Checksum decompression failed"; fi log_pass "Checksum enabled (-C)" "$ZXC_BIN" -N -k -f "$TEST_FILE_ARG" if [ ! -f "$TEST_FILE_XC_BASH" ]; then log_fail "No-Checksum compression failed"; fi rm -f "$TEST_FILE" "$ZXC_BIN" -d "$TEST_FILE_XC_ARG" if [ ! -f "$TEST_FILE" ]; then log_fail "No-Checksum decompression failed"; fi log_pass "Checksum disabled (-N)" # 9. Integrity Test (-t) echo "Testing Integrity Check (-t)..." "$ZXC_BIN" -z -k -f -C "$TEST_FILE_ARG" # Valid file should pass and show "OK" OUT=$("$ZXC_BIN" -t "$TEST_FILE_XC_ARG") if [[ "$OUT" == *": OK"* ]]; then log_pass "Integrity check passed on valid file" else log_fail "Integrity check failed on valid file (expected OK output)" fi # Verbose test mode OUT=$("$ZXC_BIN" -t -v "$TEST_FILE_XC_ARG") if [[ "$OUT" == *": OK"* ]] && [[ "$OUT" == *"Checksum:"* ]]; then log_pass "Integrity check verbose mode" else log_fail "Integrity check verbose mode failed" fi # Corrupt file should fail and show "FAILED" # Corrupt a byte in the middle of the file (after header) printf '\xff' | dd of="$TEST_FILE_XC_ARG" bs=1 seek=100 count=1 conv=notrunc 2>/dev/null set +e OUT=$("$ZXC_BIN" -t "$TEST_FILE_XC_ARG" 2>&1) RET=$? set -e if [ $RET -ne 0 ] && [[ "$OUT" == *": FAILED"* ]]; then log_pass "Integrity check correctly failed on corrupt file" else log_fail "Integrity check PASSED on corrupt file (False Negative)" fi # 10. Global Checksum Integrity echo "Testing Global Checksum Integrity..." "$ZXC_BIN" -z -k -f -C "$TEST_FILE_ARG" # Corrupt the last byte (part of Global Checksum) FILE_SZ=$(wc -c < "$TEST_FILE_XC_ARG" | tr -d ' ') LAST_BYTE_OFFSET=$((FILE_SZ - 1)) printf '\x00' | dd of="$TEST_FILE_XC_ARG" bs=1 seek=$LAST_BYTE_OFFSET count=1 conv=notrunc 2>/dev/null set +e OUT=$("$ZXC_BIN" -t "$TEST_FILE_XC_ARG" 2>&1) RET=$? set -e if [ $RET -ne 0 ] && [[ "$OUT" == *": FAILED"* ]]; then log_pass "Integrity check correctly failed on corrupt Global Checksum" else log_fail "Integrity check PASSED on corrupt Global Checksum (False Negative)" fi # Ensure no output file is created if [ -f "${TEST_FILE}.xc.xc" ] || [ -f "${TEST_FILE}.xc.dec" ]; then log_fail "Integrity check created output file unexpectedly" fi # 11. List Command (-l) echo "Testing List Command (-l)..." "$ZXC_BIN" -z -k -f -C "$TEST_FILE_ARG" # Normal list mode OUT=$("$ZXC_BIN" -l "$TEST_FILE_XC_ARG") if [[ "$OUT" == *"Compressed"* ]] && [[ "$OUT" == *"Uncompressed"* ]] && [[ "$OUT" == *"Ratio"* ]]; then log_pass "List command basic output" else log_fail "List command basic output failed" fi # Verbose list mode OUT=$("$ZXC_BIN" -l -v "$TEST_FILE_XC_ARG") if [[ "$OUT" == *"Block Format:"* ]] && [[ "$OUT" == *"Block Units:"* ]] && [[ "$OUT" == *"Checksum Method:"* ]]; then log_pass "List command verbose output" else log_fail "List command verbose output failed" fi # List with invalid file should fail set +e "$ZXC_BIN" -l "nonexistent_file" > /dev/null 2>&1 RET=$? set -e if [ $RET -ne 0 ]; then log_pass "List command error handling" else log_fail "List command should fail on nonexistent file" fi # 12. Compression Levels (All) echo "Testing All Compression Levels..." for LEVEL in 1 2 3 4 5; do "$ZXC_BIN" -$LEVEL -c -k "$TEST_FILE_ARG" > "$TEST_DIR/test_lvl${LEVEL}.xc" if [ ! -f "$TEST_DIR/test_lvl${LEVEL}.xc" ]; then log_fail "Compression level $LEVEL failed" fi # Decompress and verify "$ZXC_BIN" -d -c "$TEST_DIR/test_lvl${LEVEL}.xc" > "$TEST_DIR/test_lvl${LEVEL}.dec" if ! cmp -s "$TEST_FILE" "$TEST_DIR/test_lvl${LEVEL}.dec"; then log_fail "Level $LEVEL decompression mismatch" fi SIZE=$(wc -c < "$TEST_DIR/test_lvl${LEVEL}.xc" | tr -d ' ') log_pass "Level $LEVEL (Size: $SIZE bytes)" done # 13. Data Type Tests echo "Testing Different Data Types..." # 13.1 Highly Repetitive Text (Best Compression) echo " Testing repetitive data..." yes "Lorem ipsum dolor sit amet" | head -n 1000 > "$TEST_DIR/test_repetitive.txt" "$ZXC_BIN" -3 -k "$TEST_DIR/test_repetitive.txt" if [ ! -f "$TEST_DIR/test_repetitive.txt.xc" ]; then log_fail "Repetitive data compression failed" fi "$ZXC_BIN" -d -c "$TEST_DIR/test_repetitive.txt.xc" > "$TEST_DIR/test_repetitive.dec" if cmp -s "$TEST_DIR/test_repetitive.txt" "$TEST_DIR/test_repetitive.dec"; then SIZE_ORIG=$(wc -c < "$TEST_DIR/test_repetitive.txt" | tr -d ' ') SIZE_COMP=$(wc -c < "$TEST_DIR/test_repetitive.txt.xc" | tr -d ' ') RATIO=$((SIZE_ORIG / SIZE_COMP)) log_pass "Repetitive text (Ratio: ${RATIO}:1)" else log_fail "Repetitive data round-trip failed" fi # 13.2 Binary Zeros (Highly Compressible) echo " Testing binary zeros..." dd if=/dev/zero bs=1024 count=100 of="$TEST_DIR/test_zeros.bin" 2>/dev/null "$ZXC_BIN" -3 -k "$TEST_DIR/test_zeros.bin" if [ ! -f "$TEST_DIR/test_zeros.bin.xc" ]; then log_fail "Binary zeros compression failed" fi "$ZXC_BIN" -d -c "$TEST_DIR/test_zeros.bin.xc" > "$TEST_DIR/test_zeros.dec" if cmp -s "$TEST_DIR/test_zeros.bin" "$TEST_DIR/test_zeros.dec"; then SIZE_ORIG=$(wc -c < "$TEST_DIR/test_zeros.bin" | tr -d ' ') SIZE_COMP=$(wc -c < "$TEST_DIR/test_zeros.bin.xc" | tr -d ' ') RATIO=$((SIZE_ORIG / SIZE_COMP)) log_pass "Binary zeros (Ratio: ${RATIO}:1)" else log_fail "Binary zeros round-trip failed" fi # 13.3 Random Data (Incompressible - Should use RAW blocks) echo " Testing random data (incompressible)..." dd if=/dev/urandom bs=1024 count=100 of="$TEST_DIR/test_random.bin" 2>/dev/null "$ZXC_BIN" -3 -k "$TEST_DIR/test_random.bin" if [ ! -f "$TEST_DIR/test_random.bin.xc" ]; then log_fail "Random data compression failed" fi "$ZXC_BIN" -d -c "$TEST_DIR/test_random.bin.xc" > "$TEST_DIR/test_random.dec" if cmp -s "$TEST_DIR/test_random.bin" "$TEST_DIR/test_random.dec"; then SIZE_ORIG=$(wc -c < "$TEST_DIR/test_random.bin" | tr -d ' ') SIZE_COMP=$(wc -c < "$TEST_DIR/test_random.bin.xc" | tr -d ' ') # Random data should expand slightly (RAW blocks + headers) if [ $SIZE_COMP -le $((SIZE_ORIG + 200)) ]; then log_pass "Random data (RAW blocks, minimal expansion)" else log_fail "Random data expanded too much" fi else log_fail "Random data round-trip failed" fi # 14. Large File Test (Performance Validation) echo "Testing Large Files..." dd if=/dev/zero bs=1M count=10 of="$TEST_DIR/test_large.bin" 2>/dev/null if ! "$ZXC_BIN" -3 -k "$TEST_DIR/test_large.bin"; then log_fail "Large file compression failed" fi if ! "$ZXC_BIN" -d -c "$TEST_DIR/test_large.bin.xc" > "$TEST_DIR/test_large.dec"; then log_fail "Large file decompression failed" fi if cmp -s "$TEST_DIR/test_large.bin" "$TEST_DIR/test_large.dec"; then log_pass "Large file (10 MB) round-trip" else log_fail "Large file content mismatch" fi # 15. One-Pass Pipe Round-Trip (Critical for Streaming) echo "Testing One-Pass Pipe Round-Trip..." cat "$TEST_FILE" | "$ZXC_BIN" -c | "$ZXC_BIN" -dc > "$TEST_DIR/test_pipe_onepass.dec" if cmp -s "$TEST_FILE" "$TEST_DIR/test_pipe_onepass.dec"; then log_pass "One-pass pipe round-trip" else log_fail "One-pass pipe round-trip content mismatch" fi # 16. Empty File Edge Case echo "Testing Empty File..." touch "$TEST_DIR/test_empty.txt" "$ZXC_BIN" -3 -k "$TEST_DIR/test_empty.txt" if [ ! -f "$TEST_DIR/test_empty.txt.xc" ]; then log_fail "Empty file compression failed" fi "$ZXC_BIN" -d -c "$TEST_DIR/test_empty.txt.xc" > "$TEST_DIR/test_empty.dec" if [ ! -s "$TEST_DIR/test_empty.dec" ]; then log_pass "Empty file round-trip" else log_fail "Empty file produced non-empty output" fi # 17. Stdin Detection (No -c flag needed for compression) echo "Testing Stdin Auto-Detection..." cat "$TEST_FILE" | "$ZXC_BIN" > "$TEST_DIR/test_stdin_auto.xc" 2>/dev/null if [ -s "$TEST_DIR/test_stdin_auto.xc" ]; then "$ZXC_BIN" -d -c "$TEST_DIR/test_stdin_auto.xc" > "$TEST_DIR/test_stdin_auto.dec" if cmp -s "$TEST_FILE" "$TEST_DIR/test_stdin_auto.dec"; then log_pass "Stdin auto-detection (compression)" else log_fail "Stdin auto-detection content mismatch" fi else log_fail "Stdin auto-detection failed (empty output)" fi # 18. Keep Source File (-k) echo "Testing Keep Source (-k)..." cp "$TEST_FILE" "$TEST_DIR/test_keep.txt" "$ZXC_BIN" -k "$TEST_DIR/test_keep.txt" if [ -f "$TEST_DIR/test_keep.txt" ] && [ -f "$TEST_DIR/test_keep.txt.xc" ]; then log_pass "Keep source file (-k)" else log_fail "Keep source file failed (source was deleted)" fi # 19. Multi-Threading Tests (-T) echo "Testing Multi-Threading..." # 19.1 Single Thread (baseline) echo " Testing single thread (-T1)..." "$ZXC_BIN" -3 -T1 -c -k "$TEST_FILE_ARG" > "$TEST_DIR/test_T1.xc" if [ ! -f "$TEST_DIR/test_T1.xc" ]; then log_fail "Single thread compression failed" fi "$ZXC_BIN" -d -c "$TEST_DIR/test_T1.xc" > "$TEST_DIR/test_T1.dec" if cmp -s "$TEST_FILE" "$TEST_DIR/test_T1.dec"; then SIZE_T1=$(wc -c < "$TEST_DIR/test_T1.xc" | tr -d ' ') log_pass "Single thread (-T1, Size: $SIZE_T1)" else log_fail "Single thread round-trip failed" fi # 19.2 Multi-Thread (2 threads) echo " Testing 2 threads (-T2)..." "$ZXC_BIN" -3 -T2 -c -k "$TEST_FILE_ARG" > "$TEST_DIR/test_T2.xc" if [ ! -f "$TEST_DIR/test_T2.xc" ]; then log_fail "2-thread compression failed" fi "$ZXC_BIN" -d -c "$TEST_DIR/test_T2.xc" > "$TEST_DIR/test_T2.dec" if cmp -s "$TEST_FILE" "$TEST_DIR/test_T2.dec"; then SIZE_T2=$(wc -c < "$TEST_DIR/test_T2.xc" | tr -d ' ') log_pass "2 threads (-T2, Size: $SIZE_T2)" else log_fail "2-thread round-trip failed" fi # 19.3 Multi-Thread (all threads) echo " Testing all threads (-T0)..." "$ZXC_BIN" -3 -T0 -c -k "$TEST_FILE_ARG" > "$TEST_DIR/test_T0.xc" if [ ! -f "$TEST_DIR/test_T0.xc" ]; then log_fail "all-thread compression failed" fi "$ZXC_BIN" -d -c "$TEST_DIR/test_T0.xc" > "$TEST_DIR/test_T0.dec" if cmp -s "$TEST_FILE" "$TEST_DIR/test_T0.dec"; then SIZE_T0=$(wc -c < "$TEST_DIR/test_T0.xc" | tr -d ' ') log_pass "all threads (-T0, Size: $SIZE_T0)" else log_fail "all-thread round-trip failed" fi # 19.4 Verify determinism: All outputs should decompress to same content echo " Verifying thread-count independence..." if cmp -s "$TEST_DIR/test_T1.dec" "$TEST_DIR/test_T2.dec" && cmp -s "$TEST_DIR/test_T2.dec" "$TEST_DIR/test_T0.dec"; then log_pass "Deterministic output (all thread counts produce valid results)" else log_fail "Thread outputs differ (non-deterministic)" fi # 19.5 Cross-compatibility: File compressed with -T2 should decompress without -T flag echo " Testing cross-compatibility..." "$ZXC_BIN" -d -c "$TEST_DIR/test_T2.xc" > "$TEST_DIR/test_T2_compat.dec" if cmp -s "$TEST_FILE" "$TEST_DIR/test_T2_compat.dec"; then log_pass "Cross-compatible decompression" else log_fail "Multi-threaded file not compatible with standard decompression" fi # 19.6 Large file with threads (performance validation) echo " Testing large file with threads..." dd if=/dev/zero bs=1M count=5 of="$TEST_DIR/test_large_mt.bin" 2>/dev/null "$ZXC_BIN" -3 -T4 -c -k "$TEST_DIR/test_large_mt.bin" > "$TEST_DIR/test_large_mt.xc" "$ZXC_BIN" -d -c "$TEST_DIR/test_large_mt.xc" > "$TEST_DIR/test_large_mt.dec" if cmp -s "$TEST_DIR/test_large_mt.bin" "$TEST_DIR/test_large_mt.dec"; then log_pass "Large file multi-threading" else log_fail "Large file multi-threading round-trip failed" fi # 20. JSON Output Tests echo "Testing JSON Output (-j, --json)..." # 20.1 List mode with JSON (single file) echo " Testing list mode with JSON (single file)..." "$ZXC_BIN" -z -k -f -C "$TEST_FILE_ARG" JSON_OUT=$("$ZXC_BIN" -l -j "$TEST_FILE_XC_ARG") if [[ "$JSON_OUT" == *'"filename"'* ]] && \ [[ "$JSON_OUT" == *'"compressed_size_bytes"'* ]] && \ [[ "$JSON_OUT" == *'"uncompressed_size_bytes"'* ]] && \ [[ "$JSON_OUT" == *'"compression_ratio"'* ]] && \ [[ "$JSON_OUT" == *'"format_version"'* ]] && \ [[ "$JSON_OUT" == *'"checksum_method"'* ]]; then log_pass "List mode JSON output (single file)" else log_fail "List mode JSON output missing expected fields" fi # 20.2 List mode with JSON (multiple files) echo " Testing list mode with JSON (multiple files)..." cp "$TEST_FILE_XC_ARG" "$TEST_DIR/test2.xc" JSON_OUT=$("$ZXC_BIN" -l -j "$TEST_FILE_XC_ARG" "$TEST_DIR/test2.xc") if [[ "$JSON_OUT" == "["* ]] && \ [[ "$JSON_OUT" == *"]" ]] && \ [[ "$JSON_OUT" == *'"filename"'* ]] && \ [[ "$JSON_OUT" == *","* ]]; then log_pass "List mode JSON output (multiple files - array)" else log_fail "List mode JSON output should produce array for multiple files" fi # 20.3 Benchmark mode with JSON echo " Testing benchmark mode with JSON..." JSON_OUT=$("$ZXC_BIN" -b1 -j "$TEST_FILE_ARG" 2>/dev/null) if [[ "$JSON_OUT" == *'"input_file"'* ]] && \ [[ "$JSON_OUT" == *'"input_size_bytes"'* ]] && \ [[ "$JSON_OUT" == *'"compressed_size_bytes"'* ]] && \ [[ "$JSON_OUT" == *'"compression_ratio"'* ]] && \ [[ "$JSON_OUT" == *'"duration_seconds"'* ]] && \ [[ "$JSON_OUT" == *'"compress_iterations"'* ]] && \ [[ "$JSON_OUT" == *'"decompress_iterations"'* ]] && \ [[ "$JSON_OUT" == *'"threads"'* ]] && \ [[ "$JSON_OUT" == *'"level"'* ]] && \ [[ "$JSON_OUT" == *'"checksum_enabled"'* ]] && \ [[ "$JSON_OUT" == *'"compress_speed_mbps"'* ]] && \ [[ "$JSON_OUT" == *'"decompress_speed_mbps"'* ]]; then log_pass "Benchmark mode JSON output" else log_fail "Benchmark mode JSON output missing expected fields" fi # 20.4 Integrity check with JSON (valid file) echo " Testing integrity check with JSON (valid file)..." "$ZXC_BIN" -z -k -f -C "$TEST_FILE_ARG" JSON_OUT=$("$ZXC_BIN" -t -j "$TEST_FILE_XC_ARG") if [[ "$JSON_OUT" == *'"filename"'* ]] && \ [[ "$JSON_OUT" == *'"status": "ok"'* ]] && \ [[ "$JSON_OUT" == *'"checksum_verified"'* ]]; then log_pass "Integrity check JSON output (valid file)" else log_fail "Integrity check JSON output missing expected fields" fi # 20.5 Integrity check with JSON (corrupt file) echo " Testing integrity check with JSON (corrupt file)..." "$ZXC_BIN" -z -k -f -C "$TEST_FILE_ARG" # Corrupt a byte printf '\xff' | dd of="$TEST_FILE_XC_ARG" bs=1 seek=100 count=1 conv=notrunc 2>/dev/null set +e JSON_OUT=$("$ZXC_BIN" -t -j "$TEST_FILE_XC_ARG" 2>&1) RET=$? set -e if [ $RET -ne 0 ] && \ [[ "$JSON_OUT" == *'"filename"'* ]] && \ [[ "$JSON_OUT" == *'"status": "failed"'* ]] && \ [[ "$JSON_OUT" == *'"error"'* ]]; then log_pass "Integrity check JSON output (corrupt file)" else log_fail "Integrity check JSON output for corrupt file incorrect" fi # 20.6 Verify JSON is parseable (if jq is available) echo " Checking JSON validity (if jq available)..." if command -v jq &> /dev/null; then "$ZXC_BIN" -z -k -f -C "$TEST_FILE_ARG" JSON_OUT=$("$ZXC_BIN" -l -j "$TEST_FILE_XC_ARG") if echo "$JSON_OUT" | jq . > /dev/null 2>&1; then log_pass "JSON output is valid (verified with jq)" else log_fail "JSON output is not valid JSON" fi else echo " [SKIP] jq not available, skipping JSON validation" fi # 21. Multiple Mode (-m) Tests echo "Testing Multiple Mode (-m)..." # 21.1 Compress multiple files echo " Testing compress multiple files..." cp "$TEST_FILE" "$TEST_DIR/multi1.txt" cp "$TEST_FILE" "$TEST_DIR/multi2.txt" "$ZXC_BIN" -m -3 "$TEST_DIR/multi1.txt" "$TEST_DIR/multi2.txt" if [ -f "$TEST_DIR/multi1.txt.xc" ] && [ -f "$TEST_DIR/multi2.txt.xc" ]; then log_pass "Compress multiple files (-m)" else log_fail "Compress multiple files failed" fi # 21.2 Decompress multiple files echo " Testing decompress multiple files..." rm -f "$TEST_DIR/multi1.txt" "$TEST_DIR/multi2.txt" "$ZXC_BIN" -d -m "$TEST_DIR/multi1.txt.xc" "$TEST_DIR/multi2.txt.xc" if cmp -s "$TEST_FILE" "$TEST_DIR/multi1.txt" && cmp -s "$TEST_FILE" "$TEST_DIR/multi2.txt"; then log_pass "Decompress multiple files (-d -m)" else log_fail "Decompress multiple files failed (content mismatch or missing files)" fi # 21.3 Error on multiple and stdout echo " Testing stdout restriction with multiple mode..." set +e "$ZXC_BIN" -m -c "$TEST_DIR/multi1.txt" "$TEST_DIR/multi2.txt" > /dev/null 2>&1 RET=$? set -e if [ $RET -ne 0 ]; then log_pass "Stdout rejected with multiple mode" else log_fail "Stdout should be rejected with multiple mode" fi # 22. Recursive Mode (-r) Tests echo "Testing Recursive Mode (-r)..." # Create a nested directory structure mkdir -p "$TEST_DIR/rec_test/subdir1" mkdir -p "$TEST_DIR/rec_test/subdir2" cp "$TEST_FILE" "$TEST_DIR/rec_test/fileA.txt" cp "$TEST_FILE" "$TEST_DIR/rec_test/subdir1/fileB.txt" cp "$TEST_FILE" "$TEST_DIR/rec_test/subdir2/fileC.txt" # 22.1 Compress recursively echo " Testing compress recursive directory..." "$ZXC_BIN" -r -3 "$TEST_DIR/rec_test" if [ -f "$TEST_DIR/rec_test/fileA.txt.xc" ] && \ [ -f "$TEST_DIR/rec_test/subdir1/fileB.txt.xc" ] && \ [ -f "$TEST_DIR/rec_test/subdir2/fileC.txt.xc" ]; then log_pass "Compress recursive directory (-r)" else log_fail "Compress recursive directory failed" fi # 22.2 Decompress recursively echo " Testing decompress recursive directory..." rm -f "$TEST_DIR/rec_test/fileA.txt" "$TEST_DIR/rec_test/subdir1/fileB.txt" "$TEST_DIR/rec_test/subdir2/fileC.txt" "$ZXC_BIN" -d -r "$TEST_DIR/rec_test" if cmp -s "$TEST_FILE" "$TEST_DIR/rec_test/fileA.txt" && \ cmp -s "$TEST_FILE" "$TEST_DIR/rec_test/subdir1/fileB.txt" && \ cmp -s "$TEST_FILE" "$TEST_DIR/rec_test/subdir2/fileC.txt"; then log_pass "Decompress recursive directory (-d -r)" else log_fail "Decompress recursive directory failed (content mismatch or missing files)" fi # 23. Block Size Tests (-B) echo "Testing Block Size (-B)..." # 23.1 Round-trip with different block sizes for BS in 4K 64K 512K 1M; do echo " Testing block size -B $BS..." "$ZXC_BIN" -3 -B "$BS" -c -k "$TEST_FILE_ARG" > "$TEST_DIR/test_bs_${BS}.xc" if [ ! -s "$TEST_DIR/test_bs_${BS}.xc" ]; then log_fail "Block size $BS compression produced empty output" fi "$ZXC_BIN" -d -c "$TEST_DIR/test_bs_${BS}.xc" > "$TEST_DIR/test_bs_${BS}.dec" if cmp -s "$TEST_FILE" "$TEST_DIR/test_bs_${BS}.dec"; then SIZE_BS=$(wc -c < "$TEST_DIR/test_bs_${BS}.xc" | tr -d ' ') log_pass "Block size -B $BS (Size: $SIZE_BS)" else log_fail "Block size $BS round-trip failed" fi done # 23.2 Test suffix formats (KB, M, MB) echo " Testing block size suffix formats..." "$ZXC_BIN" -3 -B 128KB -c -k "$TEST_FILE_ARG" > "$TEST_DIR/test_bs_128KB.xc" "$ZXC_BIN" -d -c "$TEST_DIR/test_bs_128KB.xc" > "$TEST_DIR/test_bs_128KB.dec" if cmp -s "$TEST_FILE" "$TEST_DIR/test_bs_128KB.dec"; then log_pass "Block size suffix KB" else log_fail "Block size suffix KB round-trip failed" fi "$ZXC_BIN" -3 -B 2MB -c -k "$TEST_FILE_ARG" > "$TEST_DIR/test_bs_2MB.xc" "$ZXC_BIN" -d -c "$TEST_DIR/test_bs_2MB.xc" > "$TEST_DIR/test_bs_2MB.dec" if cmp -s "$TEST_FILE" "$TEST_DIR/test_bs_2MB.dec"; then log_pass "Block size suffix MB" else log_fail "Block size suffix MB round-trip failed" fi # 23.3 Error: invalid block size (not power of 2) echo " Testing invalid block size..." set +e "$ZXC_BIN" -3 -B 100K "$TEST_FILE_ARG" > /dev/null 2>&1 RET=$? set -e if [ $RET -ne 0 ]; then log_pass "Invalid block size rejected (100K)" else log_fail "Invalid block size should be rejected (100K is not power of 2)" fi # 23.4 Error: block size too small set +e "$ZXC_BIN" -3 -B 2K "$TEST_FILE_ARG" > /dev/null 2>&1 RET=$? set -e if [ $RET -ne 0 ]; then log_pass "Block size too small rejected (2K)" else log_fail "Block size 2K should be rejected (min is 4K)" fi # 23.5 Error: block size too large set +e "$ZXC_BIN" -3 -B 4M "$TEST_FILE_ARG" > /dev/null 2>&1 RET=$? set -e if [ $RET -ne 0 ]; then log_pass "Block size too large rejected (4M)" else log_fail "Block size 4M should be rejected (max is 2M)" fi echo "All tests passed!" exit 0 zxc-0.9.1/wrappers/000077500000000000000000000000001515574030100141715ustar00rootroot00000000000000zxc-0.9.1/wrappers/nodejs/000077500000000000000000000000001515574030100154535ustar00rootroot00000000000000zxc-0.9.1/wrappers/nodejs/.gitignore000066400000000000000000000000711515574030100174410ustar00rootroot00000000000000node_modules/ build/ prebuilds/ *.node package-lock.json zxc-0.9.1/wrappers/nodejs/.nvmrc000066400000000000000000000000021515574030100165710ustar00rootroot0000000000000022zxc-0.9.1/wrappers/nodejs/CMakeLists.txt000066400000000000000000000027741515574030100202250ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.14) project(zxc_nodejs C CXX) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) # ---- Core ZXC library ---- add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../../ ${CMAKE_CURRENT_BINARY_DIR}/zxc_core_build) # ---- Node.js addon ---- include_directories(${CMAKE_JS_INC}) file(GLOB SOURCE_FILES "src/zxc_addon.cc") add_library(${PROJECT_NAME} SHARED ${SOURCE_FILES} ${CMAKE_JS_SRC}) set_target_properties(${PROJECT_NAME} PROPERTIES PREFIX "" SUFFIX ".node" ) target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../include ${CMAKE_JS_INC} ) # Generate node.lib on Windows (cmake-js provides .def but not .lib) if(MSVC AND CMAKE_JS_NODELIB_DEF AND CMAKE_JS_NODELIB_TARGET) execute_process(COMMAND ${CMAKE_AR} /def:${CMAKE_JS_NODELIB_DEF} /out:${CMAKE_JS_NODELIB_TARGET} ${CMAKE_STATIC_LINKER_FLAGS}) endif() target_link_libraries(${PROJECT_NAME} PRIVATE zxc_lib ${CMAKE_JS_LIB} ) # node-addon-api (header-only, no exceptions by default) execute_process( COMMAND node -p "require('node-addon-api').include" WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE NAPI_INCLUDE_DIR OUTPUT_STRIP_TRAILING_WHITESPACE ) # Strip surrounding quotes if present string(REPLACE "\"" "" NAPI_INCLUDE_DIR ${NAPI_INCLUDE_DIR}) target_include_directories(${PROJECT_NAME} PRIVATE ${NAPI_INCLUDE_DIR}) target_compile_definitions(${PROJECT_NAME} PRIVATE NAPI_VERSION=8) zxc-0.9.1/wrappers/nodejs/README.md000066400000000000000000000050031515574030100167300ustar00rootroot00000000000000# ZXC Node.js Bindings High-performance Node.js bindings for the **ZXC** asymmetric compressor, optimized for **fast decompression**. Designed for *Write Once, Read Many* workloads like ML datasets, game assets, and caches. ## Features - **Blazing fast decompression** - ZXC is specifically optimized for read-heavy workloads. - **Native N-API addon** - compiled C code via cmake-js for maximum performance. - **Buffer support** - works directly with Node.js `Buffer` objects. - **TypeScript declarations** - full type definitions included. ## Installation (from source) ```bash git clone https://github.com/hellobertrand/zxc.git cd zxc/wrappers/nodejs npm install ``` ### Prerequisites - Node.js >= 16.0.0 - CMake >= 3.14 - A C17/C++17 compiler (GCC, Clang, or MSVC) ## Usage ```javascript const zxc = require('zxc-compress'); // Compress const data = Buffer.from('Hello, World!'.repeat(1_000)); const compressed = zxc.compress(data, { level: zxc.LEVEL_DEFAULT }); // Decompress (auto-detects size) const decompressed = zxc.decompress(compressed); console.log(`Original: ${data.length} bytes`); console.log(`Compressed: ${compressed.length} bytes`); console.log(`Ratio: ${(compressed.length / data.length * 100).toFixed(1)}%`); ``` ## API ### `compress(data, options?)` Compress a Buffer. | Parameter | Type | Default | Description | |-----------|------|---------|-------------| | `data` | `Buffer` | - | Input data | | `options.level` | `number` | `LEVEL_DEFAULT` | Compression level (1–5) | | `options.checksum` | `boolean` | `false` | Enable checksum | Returns: `Buffer` - compressed data. ### `decompress(data, options?)` Decompress a ZXC compressed Buffer. | Parameter | Type | Default | Description | |-----------|------|---------|-------------| | `data` | `Buffer` | - | Compressed data | | `options.size` | `number` | auto | Expected decompressed size | | `options.checksum` | `boolean` | `false` | Verify checksum | Returns: `Buffer` - decompressed data. ### `compressBound(inputSize)` Returns the maximum compressed size for a given input size. ### `getDecompressedSize(data)` Returns the original size from a ZXC compressed buffer (reads footer only). ### Constants | Constant | Value | Description | |----------|-------|-------------| | `LEVEL_FASTEST` | 1 | Fastest compression | | `LEVEL_FAST` | 2 | Fast compression | | `LEVEL_DEFAULT` | 3 | Recommended balance | | `LEVEL_BALANCED` | 4 | Good ratio, good speed | | `LEVEL_COMPACT` | 5 | Highest density | ## Testing ```bash npm test ``` ## License BSD-3-Clause zxc-0.9.1/wrappers/nodejs/lib/000077500000000000000000000000001515574030100162215ustar00rootroot00000000000000zxc-0.9.1/wrappers/nodejs/lib/index.d.ts000066400000000000000000000055311515574030100201260ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ /** Fastest compression, best for real-time applications. */ export const LEVEL_FASTEST: number; /** Fast compression, good for real-time applications. */ export const LEVEL_FAST: number; /** Recommended: ratio > LZ4, decode speed > LZ4. */ export const LEVEL_DEFAULT: number; /** Good ratio, good decode speed. */ export const LEVEL_BALANCED: number; /** High density. Best for storage/firmware/assets. */ export const LEVEL_COMPACT: number; /** Memory allocation failure. */ export const ERROR_MEMORY: number; /** Destination buffer too small. */ export const ERROR_DST_TOO_SMALL: number; /** Source buffer too small or truncated input. */ export const ERROR_SRC_TOO_SMALL: number; /** Invalid magic word in file header. */ export const ERROR_BAD_MAGIC: number; /** Unsupported file format version. */ export const ERROR_BAD_VERSION: number; /** Corrupted or invalid header (CRC mismatch). */ export const ERROR_BAD_HEADER: number; /** Block or global checksum verification failed. */ export const ERROR_BAD_CHECKSUM: number; /** Corrupted compressed data. */ export const ERROR_CORRUPT_DATA: number; /** Invalid match offset during decompression. */ export const ERROR_BAD_OFFSET: number; /** Buffer overflow detected during processing. */ export const ERROR_OVERFLOW: number; /** Read/write/seek failure on file. */ export const ERROR_IO: number; /** Required input pointer is NULL. */ export const ERROR_NULL_INPUT: number; /** Unknown or unexpected block type. */ export const ERROR_BAD_BLOCK_TYPE: number; /** Invalid block size. */ export const ERROR_BAD_BLOCK_SIZE: number; export interface CompressOptions { /** Compression level (1-5). Defaults to LEVEL_DEFAULT. */ level?: number; /** Enable checksum verification. Defaults to false. */ checksum?: boolean; } export interface DecompressOptions { /** Expected decompressed size. If omitted, read from header. */ size?: number; /** Enable checksum verification. Defaults to false. */ checksum?: boolean; } /** * Returns the maximum compressed size for a given input size. * Useful for pre-allocating output buffers. */ export function compressBound(inputSize: number): number; /** * Compress a Buffer using the ZXC algorithm. */ export function compress(data: Buffer, options?: CompressOptions): Buffer; /** * Returns the original decompressed size from a ZXC compressed buffer. * Reads the footer without performing decompression. */ export function getDecompressedSize(data: Buffer): number; /** * Decompress a ZXC compressed Buffer. */ export function decompress(data: Buffer, options?: DecompressOptions): Buffer; /** * Returns a human-readable name for a given error code. */ export function errorName(code: number): string; zxc-0.9.1/wrappers/nodejs/lib/index.js000066400000000000000000000113341515574030100176700ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ 'use strict'; const native = require('../build/Release/zxc_nodejs.node'); // Re-export compression level constants const LEVEL_FASTEST = native.LEVEL_FASTEST; const LEVEL_FAST = native.LEVEL_FAST; const LEVEL_DEFAULT = native.LEVEL_DEFAULT; const LEVEL_BALANCED = native.LEVEL_BALANCED; const LEVEL_COMPACT = native.LEVEL_COMPACT; // Re-export error constants const ERROR_MEMORY = native.ERROR_MEMORY; const ERROR_DST_TOO_SMALL = native.ERROR_DST_TOO_SMALL; const ERROR_SRC_TOO_SMALL = native.ERROR_SRC_TOO_SMALL; const ERROR_BAD_MAGIC = native.ERROR_BAD_MAGIC; const ERROR_BAD_VERSION = native.ERROR_BAD_VERSION; const ERROR_BAD_HEADER = native.ERROR_BAD_HEADER; const ERROR_BAD_CHECKSUM = native.ERROR_BAD_CHECKSUM; const ERROR_CORRUPT_DATA = native.ERROR_CORRUPT_DATA; const ERROR_BAD_OFFSET = native.ERROR_BAD_OFFSET; const ERROR_OVERFLOW = native.ERROR_OVERFLOW; const ERROR_IO = native.ERROR_IO; const ERROR_NULL_INPUT = native.ERROR_NULL_INPUT; const ERROR_BAD_BLOCK_TYPE = native.ERROR_BAD_BLOCK_TYPE; const ERROR_BAD_BLOCK_SIZE = native.ERROR_BAD_BLOCK_SIZE; /** * Returns a human-readable name for a given error code. * * @param {number} code - ZXC error code. * @returns {string} Error name (e.g., "ZXC_ERROR_DST_TOO_SMALL"). */ function errorName(code) { if (typeof code !== 'number') { throw new TypeError('code must be a number'); } return native.errorName(code); } /** * Returns the maximum compressed size for a given input size. * Useful for pre-allocating output buffers. * * @param {number} inputSize - Size of the input data in bytes. * @returns {number} Maximum required buffer size in bytes. */ function compressBound(inputSize) { if (typeof inputSize !== 'number' || inputSize < 0) { throw new TypeError('inputSize must be a non-negative number'); } return native.compressBound(inputSize); } /** * Compress a Buffer using the ZXC algorithm. * * @param {Buffer} data - Buffer to compress. * @param {object} [options] - Compression options. * @param {number} [options.level=LEVEL_DEFAULT] - Compression level (1-5). * @param {boolean} [options.checksum=false] - Enable checksum verification. * @returns {Buffer} Compressed data. */ function compress(data, options = {}) { if (!Buffer.isBuffer(data)) { throw new TypeError('data must be a Buffer'); } const level = options.level !== undefined ? options.level : LEVEL_DEFAULT; const checksum = options.checksum !== undefined ? options.checksum : false; if (data.length === 0 && !checksum) { return Buffer.alloc(0); } return native.compress(data, level, checksum); } /** * Returns the original decompressed size from a ZXC compressed buffer. * Reads the footer without performing decompression. * * @param {Buffer} data - Compressed data buffer. * @returns {number} Original uncompressed size in bytes, or 0 if invalid. */ function getDecompressedSize(data) { if (!Buffer.isBuffer(data)) { throw new TypeError('data must be a Buffer'); } return native.getDecompressedSize(data); } /** * Decompress a ZXC compressed Buffer. * * @param {Buffer} data - Compressed data. * @param {object} [options] - Decompression options. * @param {number} [options.size] - Expected decompressed size. If omitted, read from header. * @param {boolean} [options.checksum=false] - Enable checksum verification. * @returns {Buffer} Decompressed data. */ function decompress(data, options = {}) { if (!Buffer.isBuffer(data)) { throw new TypeError('data must be a Buffer'); } const checksum = options.checksum !== undefined ? options.checksum : false; if (data.length === 0 && !checksum) { return Buffer.alloc(0); } let size = options.size; if (size === undefined) { size = getDecompressedSize(data); if (size === 0) { throw new Error('Invalid ZXC header or data too short to determine size'); } } return native.decompress(data, size, checksum); } module.exports = { // Functions compress, decompress, compressBound, getDecompressedSize, // Constants LEVEL_FASTEST, LEVEL_FAST, LEVEL_DEFAULT, LEVEL_BALANCED, LEVEL_COMPACT, // Error handling errorName, ERROR_MEMORY, ERROR_DST_TOO_SMALL, ERROR_SRC_TOO_SMALL, ERROR_BAD_MAGIC, ERROR_BAD_VERSION, ERROR_BAD_HEADER, ERROR_BAD_CHECKSUM, ERROR_CORRUPT_DATA, ERROR_BAD_OFFSET, ERROR_OVERFLOW, ERROR_IO, ERROR_NULL_INPUT, ERROR_BAD_BLOCK_TYPE, ERROR_BAD_BLOCK_SIZE, }; zxc-0.9.1/wrappers/nodejs/package.json000066400000000000000000000017511515574030100177450ustar00rootroot00000000000000{ "name": "zxc-compress", "version": "0.9.1", "description": "ZXC: High-performance lossless asymmetric compression for Node.js", "main": "lib/index.js", "types": "lib/index.d.ts", "files": [ "lib/", "src/", "CMakeLists.txt" ], "scripts": { "install": "cmake-js compile", "build": "cmake-js compile", "rebuild": "cmake-js rebuild", "test": "vitest run" }, "keywords": [ "compression", "decompression", "lossless", "zxc", "performance", "lz77" ], "author": "Bertrand Lebonnois", "license": "BSD-3-Clause", "repository": { "type": "git", "url": "https://github.com/hellobertrand/zxc.git", "directory": "wrappers/nodejs" }, "homepage": "https://github.com/hellobertrand/zxc", "engines": { "node": ">=16.0.0" }, "dependencies": { "node-addon-api": "^8.5.0", "cmake-js": "^8.0.0" }, "devDependencies": { "vitest": "^4.0.0" }, "binary": { "napi_versions": [ 8 ] } }zxc-0.9.1/wrappers/nodejs/src/000077500000000000000000000000001515574030100162425ustar00rootroot00000000000000zxc-0.9.1/wrappers/nodejs/src/zxc_addon.cc000066400000000000000000000201441515574030100205230ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ #include extern "C" { #include "zxc.h" } // ============================================================================= // compressBound(inputSize: number): number // ============================================================================= static Napi::Value CompressBound(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); if (info.Length() < 1 || !info[0].IsNumber()) { Napi::TypeError::New(env, "Expected a number (inputSize)").ThrowAsJavaScriptException(); return env.Undefined(); } uint64_t input_size = info[0].As().Int64Value(); uint64_t bound = zxc_compress_bound(static_cast(input_size)); return Napi::Number::New(env, static_cast(bound)); } // ============================================================================= // compress(buffer: Buffer, level?: number, checksum?: boolean): Buffer // ============================================================================= static Napi::Value Compress(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); if (info.Length() < 1 || !info[0].IsBuffer()) { Napi::TypeError::New(env, "Expected a Buffer as first argument") .ThrowAsJavaScriptException(); return env.Undefined(); } Napi::Buffer src_buf = info[0].As>(); const void* src = src_buf.Data(); size_t src_size = src_buf.Length(); int level = ZXC_LEVEL_DEFAULT; if (info.Length() >= 2 && info[1].IsNumber()) { level = info[1].As().Int32Value(); } int checksum = 0; if (info.Length() >= 3 && info[2].IsBoolean()) { checksum = info[2].As().Value() ? 1 : 0; } // Handle empty input if (src_size == 0 && !checksum) { return Napi::Buffer::New(env, 0); } uint64_t bound = zxc_compress_bound(src_size); Napi::Buffer dst_buf = Napi::Buffer::New(env, static_cast(bound)); zxc_compress_opts_t opts = {0}; opts.level = level; opts.checksum_enabled = checksum; int64_t nwritten = zxc_compress(src, src_size, dst_buf.Data(), static_cast(bound), &opts); if (nwritten < 0) { int err_code = static_cast(nwritten); Napi::Error err = Napi::Error::New(env, zxc_error_name(err_code)); err.Set("code", Napi::Number::New(env, err_code)); err.ThrowAsJavaScriptException(); return env.Undefined(); } if (nwritten == 0) { Napi::Error::New(env, "Input is too small to be compressed").ThrowAsJavaScriptException(); return env.Undefined(); } // Return a slice of the buffer with the actual size return Napi::Buffer::Copy(env, dst_buf.Data(), static_cast(nwritten)); } // ============================================================================= // decompress(buffer: Buffer, decompressSize: number, checksum?: boolean): Buffer // ============================================================================= static Napi::Value Decompress(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); if (info.Length() < 2 || !info[0].IsBuffer() || !info[1].IsNumber()) { Napi::TypeError::New(env, "Expected (Buffer, number) as arguments") .ThrowAsJavaScriptException(); return env.Undefined(); } Napi::Buffer src_buf = info[0].As>(); const void* src = src_buf.Data(); size_t src_size = src_buf.Length(); size_t decompress_size = static_cast(info[1].As().Int64Value()); int checksum = 0; if (info.Length() >= 3 && info[2].IsBoolean()) { checksum = info[2].As().Value() ? 1 : 0; } Napi::Buffer dst_buf = Napi::Buffer::New(env, decompress_size); zxc_decompress_opts_t dopts = {0}; dopts.checksum_enabled = checksum; int64_t nwritten = zxc_decompress(src, src_size, dst_buf.Data(), decompress_size, &dopts); if (nwritten < 0) { int err_code = static_cast(nwritten); Napi::Error err = Napi::Error::New(env, zxc_error_name(err_code)); err.Set("code", Napi::Number::New(env, err_code)); err.ThrowAsJavaScriptException(); return env.Undefined(); } return dst_buf; } // ============================================================================= // getDecompressedSize(buffer: Buffer): number // ============================================================================= static Napi::Value GetDecompressedSize(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); if (info.Length() < 1 || !info[0].IsBuffer()) { Napi::TypeError::New(env, "Expected a Buffer as first argument") .ThrowAsJavaScriptException(); return env.Undefined(); } Napi::Buffer src_buf = info[0].As>(); uint64_t size = zxc_get_decompressed_size(src_buf.Data(), src_buf.Length()); return Napi::Number::New(env, static_cast(size)); } // ============================================================================= // errorName(code: number): string // ============================================================================= static Napi::Value ErrorName(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); if (info.Length() < 1 || !info[0].IsNumber()) { Napi::TypeError::New(env, "Expected a number (error code)").ThrowAsJavaScriptException(); return env.Undefined(); } int code = info[0].As().Int32Value(); const char* name = zxc_error_name(code); return Napi::String::New(env, name); } // ============================================================================= // Module initialization // ============================================================================= static Napi::Object Init(Napi::Env env, Napi::Object exports) { // Functions exports.Set("compressBound", Napi::Function::New(env, CompressBound, "compressBound")); exports.Set("compress", Napi::Function::New(env, Compress, "compress")); exports.Set("decompress", Napi::Function::New(env, Decompress, "decompress")); exports.Set("getDecompressedSize", Napi::Function::New(env, GetDecompressedSize, "getDecompressedSize")); exports.Set("errorName", Napi::Function::New(env, ErrorName, "errorName")); // Compression level constants exports.Set("LEVEL_FASTEST", Napi::Number::New(env, ZXC_LEVEL_FASTEST)); exports.Set("LEVEL_FAST", Napi::Number::New(env, ZXC_LEVEL_FAST)); exports.Set("LEVEL_DEFAULT", Napi::Number::New(env, ZXC_LEVEL_DEFAULT)); exports.Set("LEVEL_BALANCED", Napi::Number::New(env, ZXC_LEVEL_BALANCED)); exports.Set("LEVEL_COMPACT", Napi::Number::New(env, ZXC_LEVEL_COMPACT)); // Error constants exports.Set("ERROR_MEMORY", Napi::Number::New(env, ZXC_ERROR_MEMORY)); exports.Set("ERROR_DST_TOO_SMALL", Napi::Number::New(env, ZXC_ERROR_DST_TOO_SMALL)); exports.Set("ERROR_SRC_TOO_SMALL", Napi::Number::New(env, ZXC_ERROR_SRC_TOO_SMALL)); exports.Set("ERROR_BAD_MAGIC", Napi::Number::New(env, ZXC_ERROR_BAD_MAGIC)); exports.Set("ERROR_BAD_VERSION", Napi::Number::New(env, ZXC_ERROR_BAD_VERSION)); exports.Set("ERROR_BAD_HEADER", Napi::Number::New(env, ZXC_ERROR_BAD_HEADER)); exports.Set("ERROR_BAD_CHECKSUM", Napi::Number::New(env, ZXC_ERROR_BAD_CHECKSUM)); exports.Set("ERROR_CORRUPT_DATA", Napi::Number::New(env, ZXC_ERROR_CORRUPT_DATA)); exports.Set("ERROR_BAD_OFFSET", Napi::Number::New(env, ZXC_ERROR_BAD_OFFSET)); exports.Set("ERROR_OVERFLOW", Napi::Number::New(env, ZXC_ERROR_OVERFLOW)); exports.Set("ERROR_IO", Napi::Number::New(env, ZXC_ERROR_IO)); exports.Set("ERROR_NULL_INPUT", Napi::Number::New(env, ZXC_ERROR_NULL_INPUT)); exports.Set("ERROR_BAD_BLOCK_TYPE", Napi::Number::New(env, ZXC_ERROR_BAD_BLOCK_TYPE)); exports.Set("ERROR_BAD_BLOCK_SIZE", Napi::Number::New(env, ZXC_ERROR_BAD_BLOCK_SIZE)); return exports; } NODE_API_MODULE(zxc_nodejs, Init) zxc-0.9.1/wrappers/nodejs/test/000077500000000000000000000000001515574030100164325ustar00rootroot00000000000000zxc-0.9.1/wrappers/nodejs/test/zxc.test.js000066400000000000000000000144111515574030100205530ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ 'use strict'; const zxc = require('../lib/index'); // ============================================================================= // Roundtrip tests // ============================================================================= describe('compress/decompress roundtrip', () => { const testCases = [ { name: 'normal data', data: Buffer.from('hello world'.repeat(10)) }, { name: 'single byte', data: Buffer.from('a') }, { name: 'empty buffer', data: Buffer.alloc(0) }, { name: 'large 10 MB', data: Buffer.alloc(10_000_000, 0x42) }, ]; for (const { name, data } of testCases) { test(`roundtrip: ${name}`, () => { for (let level = zxc.LEVEL_FASTEST; level <= zxc.LEVEL_COMPACT; level++) { const compressed = zxc.compress(data, { level }); const size = zxc.getDecompressedSize(compressed); const decompressed = zxc.decompress(compressed, { size }); expect(decompressed.length).toBe(data.length); expect(Buffer.compare(decompressed, data)).toBe(0); } }); } test('roundtrip with auto-size detection', () => { const data = Buffer.from('auto size detection test'.repeat(100)); const compressed = zxc.compress(data); const decompressed = zxc.decompress(compressed); expect(decompressed.length).toBe(data.length); expect(Buffer.compare(decompressed, data)).toBe(0); }); }); // ============================================================================= // compressBound // ============================================================================= describe('compressBound', () => { test('returns a value >= input size', () => { const bound = zxc.compressBound(1024); expect(bound).toBeGreaterThanOrEqual(1024); }); test('returns 0-based bound for size 0', () => { const bound = zxc.compressBound(0); expect(bound).toBeGreaterThanOrEqual(0); }); }); // ============================================================================= // Corruption detection // ============================================================================= describe('corruption detection', () => { test('detects corrupted data with checksum', () => { const data = Buffer.from('hello world'.repeat(10)); const compressed = zxc.compress(data, { checksum: true }); // Corrupt last byte const corrupted = Buffer.from(compressed); corrupted[corrupted.length - 1] ^= 0x01; expect(() => { zxc.decompress(corrupted, { size: data.length, checksum: true }); }).toThrow(); }); test('throws on truncated compressed input', () => { expect(() => { zxc.decompress(Buffer.from([0x01, 0x02, 0x03]), { size: 100 }); }).toThrow(); }); }); // ============================================================================= // Invalid input types // ============================================================================= describe('invalid input handling', () => { test('compress rejects non-Buffer', () => { expect(() => zxc.compress('string')).toThrow(TypeError); expect(() => zxc.compress(123)).toThrow(TypeError); expect(() => zxc.compress(null)).toThrow(TypeError); }); test('decompress rejects non-Buffer', () => { expect(() => zxc.decompress('string')).toThrow(TypeError); expect(() => zxc.decompress(123)).toThrow(TypeError); }); test('getDecompressedSize rejects non-Buffer', () => { expect(() => zxc.getDecompressedSize('string')).toThrow(TypeError); }); test('compressBound rejects non-number', () => { expect(() => zxc.compressBound('abc')).toThrow(TypeError); }); }); // ============================================================================= // Constants // ============================================================================= describe('constants', () => { test('compression levels are defined', () => { expect(zxc.LEVEL_FASTEST).toBe(1); expect(zxc.LEVEL_FAST).toBe(2); expect(zxc.LEVEL_DEFAULT).toBe(3); expect(zxc.LEVEL_BALANCED).toBe(4); expect(zxc.LEVEL_COMPACT).toBe(5); }); }); // ============================================================================= // Error Handling // ============================================================================= describe('error handling', () => { test('error constants are defined', () => { expect(zxc.ERROR_MEMORY).toBe(-1); expect(zxc.ERROR_DST_TOO_SMALL).toBe(-2); expect(zxc.ERROR_SRC_TOO_SMALL).toBe(-3); expect(zxc.ERROR_BAD_MAGIC).toBe(-4); expect(zxc.ERROR_BAD_VERSION).toBe(-5); expect(zxc.ERROR_BAD_HEADER).toBe(-6); expect(zxc.ERROR_BAD_CHECKSUM).toBe(-7); expect(zxc.ERROR_CORRUPT_DATA).toBe(-8); expect(zxc.ERROR_BAD_OFFSET).toBe(-9); expect(zxc.ERROR_OVERFLOW).toBe(-10); expect(zxc.ERROR_IO).toBe(-11); expect(zxc.ERROR_NULL_INPUT).toBe(-12); expect(zxc.ERROR_BAD_BLOCK_TYPE).toBe(-13); expect(zxc.ERROR_BAD_BLOCK_SIZE).toBe(-14); }); test('errorName works', () => { expect(zxc.errorName(zxc.ERROR_DST_TOO_SMALL)).toBe('ZXC_ERROR_DST_TOO_SMALL'); expect(zxc.errorName(-999)).toBe('ZXC_UNKNOWN_ERROR'); }); test('thrown errors have code property', () => { try { zxc.decompress(Buffer.from([0x01, 0x02, 0x03]), { size: 100 }); throw new Error('Should have thrown'); } catch (err) { expect(err.code).toBe(zxc.ERROR_NULL_INPUT); expect(err.message).toBe('ZXC_ERROR_NULL_INPUT'); } }); test('detects bad magic with correct code', () => { try { // Provide enough bytes (16 for header) but fake magic const fakeHeader = Buffer.alloc(32); fakeHeader.write('FAKE', 0, 'utf8'); zxc.decompress(fakeHeader, { size: 100 }); throw new Error('Should have thrown'); } catch (err) { expect(err.code).toBe(zxc.ERROR_BAD_HEADER); } }); }); zxc-0.9.1/wrappers/nodejs/vitest.config.mjs000066400000000000000000000001631515574030100207500ustar00rootroot00000000000000import { defineConfig } from 'vitest/config'; export default defineConfig({ test: { globals: true, }, }); zxc-0.9.1/wrappers/python/000077500000000000000000000000001515574030100155125ustar00rootroot00000000000000zxc-0.9.1/wrappers/python/.gitignore000066400000000000000000000014011515574030100174760ustar00rootroot00000000000000# Editor temporary/working/backup files # ######################################### .#* [#]*# *~ *$ *.bak *.diff .idea/ *.iml *.ipr *.iws *.org .project pmip *.rej .settings/ .*.sw[nop] .sw[nop] *.tmp *.vim .vscode tags cscope.out # gnu global GPATH GRTAGS GSYMS GTAGS .cache .mypy_cache/ # Compiled source # ################### *.a *.com *.class *.dll *.exe *.o *.o.d *.py[ocd] *.so *.mod # Compiled source # ################### build build-* _build _version.py # Egg metadata *.egg-info venv/ # OS generated files # ###################### .DS_Store* .VolumeIcon.icns .fseventsd Icon? .gdb_history ehthumbs.db Thumbs.db .directory # pytest generated files # ########################## /.pytest_cache # benchmarking # ########################## heaptrack* *.out zxc-0.9.1/wrappers/python/CMakeLists.txt000066400000000000000000000010751515574030100202550ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.20) project(zxc_python C) find_package(Python COMPONENTS Development.Module REQUIRED) # ---- Core ---- add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../../ ${CMAKE_CURRENT_BINARY_DIR}/zxc_core_build) # ---- Python extension ---- Python_add_library(_zxc MODULE src/zxc/_zxc.c) # Without "lib" prefix set_target_properties(_zxc PROPERTIES PREFIX "") target_include_directories(_zxc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../include ) target_link_libraries(_zxc PRIVATE zxc_lib) install(TARGETS _zxc DESTINATION zxc)zxc-0.9.1/wrappers/python/README.md000066400000000000000000000014051515574030100167710ustar00rootroot00000000000000# ZXC Python Bindings High-performance Python bindings for the **ZXC** asymmetric compressor, optimized for **fast decompression**. Designed for *Write Once, Read Many* workloads like ML datasets, game assets, and caches. ## Features - **Blazing fast decompression** — ZXC is specifically optimized for read-heavy workloads. - **Buffer protocol support** — works with `bytes`, `bytearray`, `memoryview`, and even NumPy arrays. - **Releases the GIL** during compression/decompression — true parallelism with Python threads. - **Stream helpers** — compress/decompress file-like objects. ## Installation (from source) ```bash git clone https://github.com/hellobertrand/zxc.git cd zxc/wrappers/python python -m venv .venv source .venv/bin/activate pip install .zxc-0.9.1/wrappers/python/pyproject.toml000066400000000000000000000011341515574030100204250ustar00rootroot00000000000000[build-system] requires = ["scikit-build-core>=0.9", "setuptools_scm[toml]>=7.0"] build-backend = "scikit_build_core.build" [project] name = "zxc-compress" dynamic = ["version"] description = "ZXC: Package for high-performance, lossless, asymmetric compressions" readme = "README.md" license = "BSD-3-Clause" [project.urls] Homepage = "https://github.com/hellobertrand/zxc" [tool.scikit-build] metadata.version.provider = "scikit_build_core.metadata.setuptools_scm" wheel.packages = ["src/zxc"] [tool.setuptools_scm] root = "../../" version_scheme = "only-version" local_scheme = "no-local-version"zxc-0.9.1/wrappers/python/src/000077500000000000000000000000001515574030100163015ustar00rootroot00000000000000zxc-0.9.1/wrappers/python/src/zxc/000077500000000000000000000000001515574030100171055ustar00rootroot00000000000000zxc-0.9.1/wrappers/python/src/zxc/__init__.py000066400000000000000000000131651515574030100212240ustar00rootroot00000000000000""" ZXC - High-performance lossless compression Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. SPDX-License-Identifier: BSD-3-Clause """ from ._zxc import ( pyzxc_compress, pyzxc_decompress, pyzxc_stream_compress, pyzxc_stream_decompress, pyzxc_get_decompressed_size, LEVEL_FASTEST, LEVEL_FAST, LEVEL_DEFAULT, LEVEL_BALANCED, LEVEL_COMPACT, ERROR_MEMORY, ERROR_DST_TOO_SMALL, ERROR_SRC_TOO_SMALL, ERROR_BAD_MAGIC, ERROR_BAD_VERSION, ERROR_BAD_HEADER, ERROR_BAD_CHECKSUM, ERROR_CORRUPT_DATA, ERROR_BAD_OFFSET, ERROR_OVERFLOW, ERROR_IO, ERROR_NULL_INPUT, ERROR_BAD_BLOCK_TYPE, ERROR_BAD_BLOCK_SIZE, ) try: from ._version import __version__ except ImportError: __version__ = "0.0.0-dev" __all__ = [ # Functions "compress", "decompress", "stream_compress", "stream_decompress", "pyzxc_get_decompressed_size", # Constants "LEVEL_FASTEST", "LEVEL_FAST", "LEVEL_DEFAULT", "LEVEL_BALANCED", "LEVEL_COMPACT", # Error Constants "ERROR_MEMORY", "ERROR_DST_TOO_SMALL", "ERROR_SRC_TOO_SMALL", "ERROR_BAD_MAGIC", "ERROR_BAD_VERSION", "ERROR_BAD_HEADER", "ERROR_BAD_CHECKSUM", "ERROR_CORRUPT_DATA", "ERROR_BAD_OFFSET", "ERROR_OVERFLOW", "ERROR_IO", "ERROR_NULL_INPUT", "ERROR_BAD_BLOCK_TYPE", "ERROR_BAD_BLOCK_SIZE", ] def compress(data, level = LEVEL_DEFAULT, checksum = False) -> bytes: """Compress a bytes object. Args: data: Bytes-like object to compress. level: Compression level. Use constants like LEVEL_FASTEST, LEVEL_DEFAULT, etc. checksum: If True, append a checksum for integrity verification. Returns: Compressed bytes. Note: This function operates entirely in-memory. For streaming files, use `stream_compress`. """ if len(data) == 0 and not checksum: return data return pyzxc_compress(data, level, checksum) def get_decompressed_size(data: bytes) -> int: """Get the original decompressed size of a ZXC compressed buffer. Args: data (bytes): Compressed bytes buffer. Returns: int: Original uncompressed size in bytes, or 0 if the buffer is invalid or too small. Note: This function does not decompress the data, it only reads the footer for size info. """ return pyzxc_get_decompressed_size(data) def decompress(data, decompress_size=None, checksum=False) -> bytes: """Decompress a bytes object. Args: data: Compressed bytes. decompress_size: Expected size. If None, read from header (slower/safer). checksum: If True, verify the checksum appended during compression. Returns: Decompressed bytes. """ if len(data) == 0 and not checksum: return data if decompress_size is None: decompress_size = get_decompressed_size(data) if decompress_size == 0: raise ValueError( "Invalid ZXC header or data too short to determine size" ) return pyzxc_decompress(data, decompress_size, checksum) def stream_compress(src, dst, n_threads=0, level=LEVEL_DEFAULT, checksum=False) -> int: """Compress data from src to dst (file-like objects). Args: src: Readable file-like object with `fileno()` support (e.g., open file). dst: Writable file-like object with `fileno()` support. n_threads: Number of threads to use for compression. 0 uses default. level: Compression level. Use constants like LEVEL_FASTEST, LEVEL_DEFAULT, etc. checksum: If True, append a checksum for integrity verification. Returns: Number of bytes written to `dst`. Note: In-memory streams like `io.BytesIO` are not supported. Use the in-memory `compress`/`decompress` functions for buffers. """ if not hasattr(src, "fileno") or not hasattr(dst, "fileno"): raise ValueError("src and dst must be open file-like objects") if not src.readable(): raise ValueError("Source file must be readable") if not dst.writable(): raise ValueError("Destination file must be writable") # CRITICAL: Flush Python buffers before passing FDs to C # to prevent data reordering/corruption. if hasattr(src, "flush"): src.flush() if hasattr(dst, "flush"): dst.flush() return pyzxc_stream_compress(src, dst, n_threads, level, checksum) def stream_decompress(src, dst, n_threads=0, checksum=False) -> int: """Decompress data from src to dst (file-like objects). Args: src: Readable file-like object with `fileno()` support. dst: Writable file-like object with `fileno()` support. n_threads: Number of threads to use for decompression. 0 uses default. checksum: If True, verify the checksum appended during compression. Returns: Number of bytes written to `dst`. Note: In-memory streams like `io.BytesIO` are not supported. Use the in-memory `compress`/`decompress` functions for buffers. """ if not hasattr(src, "fileno") or not hasattr(dst, "fileno"): raise ValueError("src and dst must be open file-like objects") if not src.readable(): raise ValueError("Source file must be readable") if not dst.writable(): raise ValueError("Destination file must be writable") # CRITICAL: Flush Python buffers before passing FDs to C # to prevent data reordering/corruption. if hasattr(src, "flush"): src.flush() if hasattr(dst, "flush"): dst.flush() return pyzxc_stream_decompress(src, dst, n_threads, checksum)zxc-0.9.1/wrappers/python/src/zxc/__init__.pyi000066400000000000000000000023741515574030100213750ustar00rootroot00000000000000""" ZXC - High-performance lossless compression Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. SPDX-License-Identifier: BSD-3-Clause """ from typing import Protocol, Optional # ---------- constants ---------- LEVEL_FASTEST: int LEVEL_FAST: int LEVEL_DEFAULT: int LEVEL_BALANCED: int LEVEL_COMPACT: int # ---------- types ---------- class FileLike(Protocol): """File-like object with a file descriptor. Note: This excludes in-memory streams like io.BytesIO. Use real file objects or objects wrapping OS file descriptors. """ def fileno(self) -> int: ... def readable(self) -> bool: ... def writable(self) -> bool: ... # ---------- functions ---------- def compress( data: bytes, level: int = LEVEL_DEFAULT, checksum: bool = False ) -> bytes: ... def decompress( data: bytes, decompress_size: Optional[int] = None, checksum: bool = False ) -> bytes: ... def stream_compress( src: FileLike, dst: FileLike, n_threads: int = 0, level: int = LEVEL_DEFAULT, checksum: bool = False ) -> int: ... def stream_decompress( src: FileLike, dst: FileLike, n_threads: int = 0, checksum: bool = False ) -> int: ... def get_decompressed_size(data: bytes) -> int: ...zxc-0.9.1/wrappers/python/src/zxc/_zxc.c000066400000000000000000000305021515574030100202140ustar00rootroot00000000000000/* * Copyright (c) 2025-2026, Bertrand Lebonnois * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #define PY_SSIZE_T_CLEAN #include #include "zxc.h" #define Py_Return_Errno(err) \ do { \ PyErr_SetFromErrno(err); \ return NULL; \ } while (0) #define Py_Return_Err(err, str) \ do { \ PyErr_SetString(err, str); \ return NULL; \ } while (0) // ============================================================================= // Platform // ============================================================================= static inline int zxc_dup(int fd) { #ifdef _WIN32 return _dup(fd); #else return dup(fd); #endif } static inline FILE* zxc_fdopen(int fd, const char* mode) { #ifdef _WIN32 return _fdopen(fd, mode); #else return fdopen(fd, mode); #endif } static inline int zxc_close(int fd) { #ifdef _WIN32 return _close(fd); #else return close(fd); #endif } // ============================================================================= // Wrapper functions // ============================================================================= static PyObject* pyzxc_compress(PyObject* self, PyObject* args, PyObject* kwargs); static PyObject* pyzxc_decompress(PyObject* self, PyObject* args, PyObject* kwargs); static PyObject* pyzxc_stream_compress(PyObject* self, PyObject* args, PyObject* kwargs); static PyObject* pyzxc_stream_decompress(PyObject* self, PyObject* args, PyObject* kwargs); static PyObject* pyzxc_get_decompressed_size(PyObject* self, PyObject* args, PyObject* kwargs); // ============================================================================= // Initialize python module // ============================================================================= PyDoc_STRVAR( zxc_doc, "ZXC: High-performance, lossless asymmetric compression.\n" "\n" "Functions:\n" " compress(data: bytes, level: int = LEVEL_DEFAULT, checksum: bool = False) -> bytes\n" " Compress a bytes object.\n" "\n" " decompress(data: bytes, decompress_size: int, checksum: bool = False) -> bytes\n" " Decompress a bytes object to its original size.\n" "\n" " stream_compress(src: file-like, dst: file-like, n_threads: int = 0, level: int = " "LEVEL_DEFAULT, checksum: bool = False) -> None\n" " Compress data from a readable file-like object to a writable file-like object.\n" "\n" " stream_decompress(src: file-like, dst: file-like, n_threads: int = 0, checksum: bool = " "False) -> None\n" " Decompress data from a readable file-like object to a writable file-like object.\n" "\n" "Notes:\n" " - File-like objects must support fileno(), readable(), and writable().\n" " - Stream functions release the GIL for multi-threaded compression/decompression.\n"); static PyMethodDef zxc_methods[] = { {"pyzxc_compress", (PyCFunction)pyzxc_compress, METH_VARARGS | METH_KEYWORDS, NULL}, {"pyzxc_decompress", (PyCFunction)pyzxc_decompress, METH_VARARGS | METH_KEYWORDS, NULL}, {"pyzxc_stream_compress", (PyCFunction)pyzxc_stream_compress, METH_VARARGS | METH_KEYWORDS, NULL}, {"pyzxc_stream_decompress", (PyCFunction)pyzxc_stream_decompress, METH_VARARGS | METH_KEYWORDS, NULL}, {"pyzxc_get_decompressed_size", (PyCFunction)pyzxc_get_decompressed_size, METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL} // sentinel }; static struct PyModuleDef zxc_module = {PyModuleDef_HEAD_INIT, "_zxc", zxc_doc, 0, zxc_methods}; PyMODINIT_FUNC PyInit__zxc(void) { PyObject* m = PyModule_Create(&zxc_module); if (!m) return NULL; PyModule_AddIntConstant(m, "LEVEL_FASTEST", ZXC_LEVEL_FASTEST); PyModule_AddIntConstant(m, "LEVEL_FAST", ZXC_LEVEL_FAST); PyModule_AddIntConstant(m, "LEVEL_DEFAULT", ZXC_LEVEL_DEFAULT); PyModule_AddIntConstant(m, "LEVEL_BALANCED", ZXC_LEVEL_BALANCED); PyModule_AddIntConstant(m, "LEVEL_COMPACT", ZXC_LEVEL_COMPACT); /* Error Enums */ PyModule_AddIntConstant(m, "ERROR_MEMORY", ZXC_ERROR_MEMORY); PyModule_AddIntConstant(m, "ERROR_DST_TOO_SMALL", ZXC_ERROR_DST_TOO_SMALL); PyModule_AddIntConstant(m, "ERROR_SRC_TOO_SMALL", ZXC_ERROR_SRC_TOO_SMALL); PyModule_AddIntConstant(m, "ERROR_BAD_MAGIC", ZXC_ERROR_BAD_MAGIC); PyModule_AddIntConstant(m, "ERROR_BAD_VERSION", ZXC_ERROR_BAD_VERSION); PyModule_AddIntConstant(m, "ERROR_BAD_HEADER", ZXC_ERROR_BAD_HEADER); PyModule_AddIntConstant(m, "ERROR_BAD_CHECKSUM", ZXC_ERROR_BAD_CHECKSUM); PyModule_AddIntConstant(m, "ERROR_CORRUPT_DATA", ZXC_ERROR_CORRUPT_DATA); PyModule_AddIntConstant(m, "ERROR_BAD_OFFSET", ZXC_ERROR_BAD_OFFSET); PyModule_AddIntConstant(m, "ERROR_OVERFLOW", ZXC_ERROR_OVERFLOW); PyModule_AddIntConstant(m, "ERROR_IO", ZXC_ERROR_IO); PyModule_AddIntConstant(m, "ERROR_NULL_INPUT", ZXC_ERROR_NULL_INPUT); PyModule_AddIntConstant(m, "ERROR_BAD_BLOCK_TYPE", ZXC_ERROR_BAD_BLOCK_TYPE); PyModule_AddIntConstant(m, "ERROR_BAD_BLOCK_SIZE", ZXC_ERROR_BAD_BLOCK_SIZE); return m; } // ============================================================================= // Functions definitions // ============================================================================= static PyObject* pyzxc_compress(PyObject* self, PyObject* args, PyObject* kwargs) { Py_buffer view; int level = ZXC_LEVEL_DEFAULT; int checksum = 0; static char* kwlist[] = {"data", "level", "checksum", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y*|ip", kwlist, &view, &level, &checksum)) { return NULL; } if (view.itemsize != 1) { PyBuffer_Release(&view); PyErr_SetString(PyExc_TypeError, "expected a byte buffer (itemsize==1)"); return NULL; } size_t src_size = (size_t)view.len; uint64_t bound = zxc_compress_bound(src_size); PyObject* out = PyBytes_FromStringAndSize(NULL, (Py_ssize_t)bound); if (!out) { PyBuffer_Release(&view); return NULL; } char* dst = PyBytes_AsString(out); // Return a pointer to the contents int64_t nwritten; // The number of bytes written to dst zxc_compress_opts_t copts = {0}; copts.level = level; copts.checksum_enabled = checksum; Py_BEGIN_ALLOW_THREADS nwritten = zxc_compress(view.buf, // Source buffer src_size, // Source size dst, // Destination buffer bound, // Destination capacity &copts // Options ); Py_END_ALLOW_THREADS PyBuffer_Release(&view); if (nwritten < 0) { Py_DECREF(out); Py_Return_Err(PyExc_RuntimeError, zxc_error_name((int)nwritten)); } if (nwritten == 0) { Py_DECREF(out); Py_Return_Err(PyExc_ValueError, "input is too small to be compressed"); } if (_PyBytes_Resize(&out, (Py_ssize_t)nwritten) < 0) // Realloc return NULL; return out; } static PyObject* pyzxc_get_decompressed_size(PyObject* self, PyObject* args, PyObject* kwargs) { Py_buffer view; if (!PyArg_ParseTuple(args, "y*", &view)) { return NULL; } uint64_t n = zxc_get_decompressed_size(view.buf, view.len); PyBuffer_Release(&view); return Py_BuildValue("K", n); } static PyObject* pyzxc_decompress(PyObject* self, PyObject* args, PyObject* kwargs) { Py_buffer view; int checksum = 0; Py_ssize_t decompress_size; static char* kwlist[] = {"data", "decompress_size", "checksum", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y*n|p", kwlist, &view, &decompress_size, &checksum)) { return NULL; } if (view.itemsize != 1) { PyBuffer_Release(&view); PyErr_SetString(PyExc_TypeError, "expected a byte buffer (itemsize==1)"); return NULL; } size_t src_size = (size_t)view.len; PyObject* out = PyBytes_FromStringAndSize(NULL, (Py_ssize_t)decompress_size); if (!out) { PyBuffer_Release(&view); return NULL; } char* dst = PyBytes_AsString(out); // Return a pointer to the contents int64_t nwritten; // The number of bytes written to dst zxc_decompress_opts_t dopts = {0}; dopts.checksum_enabled = checksum; Py_BEGIN_ALLOW_THREADS nwritten = zxc_decompress(view.buf, // Source buffer src_size, // Source size dst, // Destination buffer decompress_size, // Destination capacity &dopts // Options ); Py_END_ALLOW_THREADS PyBuffer_Release(&view); if (nwritten < 0) { Py_DECREF(out); Py_Return_Err(PyExc_RuntimeError, zxc_error_name((int)nwritten)); } return out; } static PyObject* pyzxc_stream_compress(PyObject* self, PyObject* args, PyObject* kwargs) { PyObject *src, *dst; int nthreads = 0; int level = ZXC_LEVEL_DEFAULT; int checksum = 0; static char* kwlist[] = {"src", "dst", "n_threads", "level", "checksum", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|iip", kwlist, &src, &dst, &nthreads, &level, &checksum)) { return NULL; } int src_fd = PyObject_AsFileDescriptor(src); int dst_fd = PyObject_AsFileDescriptor(dst); if (src_fd == -1 || dst_fd == -1) Py_Return_Err(PyExc_RuntimeError, "couldn't get file descriptor"); int src_dup = zxc_dup(src_fd); if (src_dup == -1) { Py_Return_Errno(PyExc_OSError); } int dst_dup = zxc_dup(dst_fd); if (dst_dup == -1) { zxc_close(src_dup); Py_Return_Errno(PyExc_OSError); } FILE* fsrc = zxc_fdopen(src_dup, "rb"); if (!fsrc) { zxc_close(src_dup); zxc_close(dst_dup); Py_Return_Errno(PyExc_OSError); } FILE* fdst = zxc_fdopen(dst_dup, "wb"); if (!fdst) { fclose(fsrc); zxc_close(dst_dup); Py_Return_Errno(PyExc_OSError); } int64_t nwritten; zxc_compress_opts_t scopts = {0}; scopts.n_threads = nthreads; scopts.level = level; scopts.checksum_enabled = checksum; Py_BEGIN_ALLOW_THREADS nwritten = zxc_stream_compress(fsrc, fdst, &scopts); Py_END_ALLOW_THREADS fclose(fdst); fclose(fsrc); if (nwritten < 0) Py_Return_Err(PyExc_RuntimeError, zxc_error_name((int)nwritten)); return Py_BuildValue("L", nwritten); } static PyObject* pyzxc_stream_decompress(PyObject* self, PyObject* args, PyObject* kwargs) { PyObject *src, *dst; int nthreads = 0; int checksum = 0; static char* kwlist[] = {"src", "dst", "n_threads", "checksum", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|ip", kwlist, &src, &dst, &nthreads, &checksum)) { return NULL; } int src_fd = PyObject_AsFileDescriptor(src); int dst_fd = PyObject_AsFileDescriptor(dst); if (src_fd == -1 || dst_fd == -1) Py_Return_Err(PyExc_RuntimeError, "couldn't get file descriptor"); int src_dup = zxc_dup(src_fd); if (src_dup == -1) { Py_Return_Errno(PyExc_OSError); } int dst_dup = zxc_dup(dst_fd); if (dst_dup == -1) { zxc_close(src_dup); Py_Return_Errno(PyExc_OSError); } FILE* fsrc = zxc_fdopen(src_dup, "rb"); if (!fsrc) { zxc_close(src_dup); zxc_close(dst_dup); Py_Return_Errno(PyExc_OSError); } FILE* fdst = zxc_fdopen(dst_dup, "wb"); if (!fdst) { fclose(fsrc); zxc_close(dst_dup); Py_Return_Errno(PyExc_OSError); } int64_t nwritten; zxc_decompress_opts_t sdopts = {0}; sdopts.n_threads = nthreads; sdopts.checksum_enabled = checksum; Py_BEGIN_ALLOW_THREADS nwritten = zxc_stream_decompress(fsrc, fdst, &sdopts); Py_END_ALLOW_THREADS fclose(fdst); fclose(fsrc); if (nwritten < 0) Py_Return_Err(PyExc_RuntimeError, zxc_error_name((int)nwritten)); return Py_BuildValue("L", nwritten); } zxc-0.9.1/wrappers/python/tests/000077500000000000000000000000001515574030100166545ustar00rootroot00000000000000zxc-0.9.1/wrappers/python/tests/test_buffer.py000066400000000000000000000030561515574030100215420ustar00rootroot00000000000000""" ZXC - High-performance lossless compression Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. SPDX-License-Identifier: BSD-3-Clause """ import pytest import zxc @pytest.mark.parametrize("data", [ None, "string", 123, 12.5, object(), ]) def test_compress_invalid_type(data): with pytest.raises(TypeError): zxc.compress(data) @pytest.mark.parametrize( "data,corrupt_func,exc", [ (b"hello world" * 10, lambda x: x[:-1] + b"\x01", RuntimeError), (b"a" * 10, lambda x: b"", RuntimeError), ], ids=["corrupted_data", "invalid_header"], ) def test_compress_corruption(data, corrupt_func, exc): compressed = zxc.compress(data, checksum=True) corrupted = corrupt_func(compressed) with pytest.raises(exc): zxc.decompress(corrupted, len(data), checksum=True) @pytest.mark.parametrize( "data", [ b"hello world" * 10, # default b"a", # single byte b"", b"a" * 10_000_000, # large data ], ids=[ "normal_data", "single_byte", "empty", "large_10mb", ], ) def test_compress_roundtrip(data): # test all compression levels levels_cnt = zxc.LEVEL_COMPACT for level in range(levels_cnt + 1): compressed = zxc.compress(data, level) out_size = zxc.get_decompressed_size(compressed) decompressed = zxc.decompress(compressed, out_size) assert len(data) == len(decompressed) assert data == decompressed zxc-0.9.1/wrappers/python/tests/test_stream.py000066400000000000000000000077301515574030100215670ustar00rootroot00000000000000""" ZXC - High-performance lossless compression Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. SPDX-License-Identifier: BSD-3-Clause """ import pytest import zxc import io @pytest.mark.parametrize( "src,dst,expected_error,match", [ ("not a file", io.BytesIO(), ValueError, "src and dst must be open file-like objects"), (io.BytesIO(b"data"), "not a file", ValueError, "src and dst must be open file-like objects"), (io.BytesIO(b"data"), io.BytesIO(), ValueError, "Source file must be readable"), (io.BytesIO(b"data"), io.BytesIO(), ValueError, "Destination file must be writable"), (None, None, None, "valid_file"), ], ids=[ "src_not_file", "dst_not_file", "src_not_readable", "dst_not_writable", "valid_file", ] ) def test_stream_invalid_src_dst(tmp_path, src, dst, expected_error, match): # Helper class to mock fileno behavior class MockFile(io.BytesIO): def fileno(self): return 1 class NotReadable(MockFile): def readable(self): return False class NotWritable(MockFile): def writable(self): return False if match == "Source file must be readable": src = NotReadable(b"data") dst = MockFile() elif match == "Destination file must be writable": src = MockFile(b"data") dst = NotWritable() elif match == "valid_file": src_path = tmp_path / "src.txt" dst_path = tmp_path / "dst.zxc" src_path.write_bytes(b"hello world") src = open(src_path, "rb") dst = open(dst_path, "wb") try: if not expected_error: _ = zxc.stream_compress(src, dst) else: with pytest.raises(expected_error, match=match): zxc.stream_compress(src, dst) finally: if hasattr(src, "close"): src.close() if hasattr(dst, "close"): dst.close() @pytest.mark.parametrize( "data,corrupt_func,exc", [ (b"hello world" * 10, lambda x: x[:-1] + b"\x01", RuntimeError), (b"a" * 10, lambda x: b"", RuntimeError), ], ids=["corrupted_data", "invalid_header"], ) def test_stream_compress_corruption(tmp_path, data, corrupt_func, exc): src_file_path = tmp_path / "src.bin" compressed_file_path = tmp_path / "compressed.zxc" decompressed_file_path = tmp_path / "decompressed.bin" src_file_path.write_bytes(data) with open(src_file_path, "rb") as src, open(compressed_file_path, "wb") as dst: zxc.stream_compress(src, dst, checksum=True) compressed_bytes = compressed_file_path.read_bytes() corrupted_bytes = corrupt_func(compressed_bytes) compressed_file_path.write_bytes(corrupted_bytes) with pytest.raises(exc): with open(compressed_file_path, "rb") as src, open(decompressed_file_path, "wb") as dst: zxc.stream_decompress(src, dst, checksum=True) @pytest.mark.parametrize("data", [ b"hello world" * 10, # normal b"a", # single byte b"", # empty b"a" * 10_000_000, # large ], ids=["normal", "single_byte", "empty", "large_10mb"]) def test_stream_roundtrip(tmp_path, data): src_file_path = tmp_path / "src.txt" compressed_file_path = tmp_path / "compressed.zxc" decompressed_file_path = tmp_path / "decompressed.txt" src_file_path.write_bytes(data) levels_cnt = zxc.LEVEL_COMPACT for level in range(levels_cnt + 1): with open(src_file_path, "rb") as src, \ open(compressed_file_path, "wb") as compressed: zxc.stream_compress(src, compressed, level=level) with open(compressed_file_path, "rb") as compressed, \ open(decompressed_file_path, "wb") as decompressed: zxc.stream_decompress(compressed, decompressed) decompressed_data = decompressed_file_path.read_bytes() assert decompressed_data == data assert len(decompressed_data) == len(data)zxc-0.9.1/wrappers/rust/000077500000000000000000000000001515574030100151665ustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/.gitignore000066400000000000000000000004501515574030100171550ustar00rootroot00000000000000# Rust build artifacts target/ # Cargo.lock for library crates (keep it for binaries) # Since zxc-sys and zxc are libraries, we ignore Cargo.lock Cargo.lock # Debug/release build output debug/ release/ # IDE files .vscode/ .idea/ *.swp *.swo *~ # macOS .DS_Store # Backup files *.bak *.orig zxc-0.9.1/wrappers/rust/Cargo.toml000066400000000000000000000006001515574030100171120ustar00rootroot00000000000000[workspace] members = ["zxc-sys", "zxc"] resolver = "2" [workspace.package] version = "0.9.1" authors = ["Bertrand Lebonnois"] edition = "2024" rust-version = "1.85" license = "BSD-3-Clause" repository = "https://github.com/hellobertrand/zxc" keywords = [ "zxc", "compression", "lossless", "high-performance", "data-compression", ] categories = ["compression", "encoding"] zxc-0.9.1/wrappers/rust/zxc-sys/000077500000000000000000000000001515574030100166065ustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/zxc-sys/Cargo.toml000066400000000000000000000010001515574030100205250ustar00rootroot00000000000000[package] name = "zxc-compress-sys" description = "Low-level FFI bindings to the ZXC compression library" version.workspace = true authors.workspace = true edition.workspace = true license.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true links = "zxc" build = "build.rs" [lib] name = "zxc_sys" [dependencies] libc = "0.2" [build-dependencies] cc = "1.2" [features] default = [] # Enable if you want to link against a system-installed ZXC library system = [] zxc-0.9.1/wrappers/rust/zxc-sys/build.rs000066400000000000000000000260111515574030100202530ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ //! Build script for zxc-sys //! //! This script compiles the ZXC C library with Function Multi-Versioning (FMV) //! to support runtime CPU feature detection and optimized code paths. //! //! On ARM64: Compiles `_default` and `_neon` variants //! On x86_64: Compiles `_default`, `_avx2`, and `_avx512` variants use std::env; use std::fs; use std::path::{Path, PathBuf}; /// Extract version constants from zxc_constants.h fn extract_version(include_dir: &Path) -> (u32, u32, u32) { let header_path = include_dir.join("zxc_constants.h"); let content = fs::read_to_string(&header_path) .expect("Failed to read zxc_constants.h"); let mut major = None; let mut minor = None; let mut patch = None; for line in content.lines() { let trimmed = line.trim(); // Parse lines like: #define ZXC_VERSION_MAJOR 0 if trimmed.starts_with("#define") { let parts: Vec<&str> = trimmed.split_whitespace().collect(); if parts.len() >= 3 { match parts[1] { "ZXC_VERSION_MAJOR" => major = parts[2].parse().ok(), "ZXC_VERSION_MINOR" => minor = parts[2].parse().ok(), "ZXC_VERSION_PATCH" => patch = parts[2].parse().ok(), _ => {} } } } } ( major.expect("ZXC_VERSION_MAJOR not found"), minor.expect("ZXC_VERSION_MINOR not found"), patch.expect("ZXC_VERSION_PATCH not found"), ) } /// Extract compression level constants from zxc_constants.h fn extract_compression_levels(include_dir: &Path) -> (i32, i32, i32, i32, i32) { let header_path = include_dir.join("zxc_constants.h"); let content = fs::read_to_string(&header_path) .expect("Failed to read zxc_constants.h"); let mut fastest = None; let mut fast = None; let mut default = None; let mut balanced = None; let mut compact = None; for line in content.lines() { let trimmed = line.trim(); // Parse lines like: ZXC_LEVEL_FASTEST = 1, if trimmed.starts_with("ZXC_LEVEL_") { let parts: Vec<&str> = trimmed.split('=').collect(); if parts.len() >= 2 { let name = parts[0].trim(); // Extract number, removing comma and comments let value_str = parts[1].split(&[',', '/'][..]).next().unwrap().trim(); let value: Option = value_str.parse().ok(); match name { "ZXC_LEVEL_FASTEST" => fastest = value, "ZXC_LEVEL_FAST" => fast = value, "ZXC_LEVEL_DEFAULT" => default = value, "ZXC_LEVEL_BALANCED" => balanced = value, "ZXC_LEVEL_COMPACT" => compact = value, _ => {} } } } } ( fastest.expect("ZXC_LEVEL_FASTEST not found"), fast.expect("ZXC_LEVEL_FAST not found"), default.expect("ZXC_LEVEL_DEFAULT not found"), balanced.expect("ZXC_LEVEL_BALANCED not found"), compact.expect("ZXC_LEVEL_COMPACT not found"), ) } fn main() { // Check if we should use system library instead of compiling from source if env::var("CARGO_FEATURE_SYSTEM").is_ok() { println!("cargo:rustc-link-lib=zxc"); return; } // Path to ZXC source files let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); // We use mirrored symlinks under zxc/ to preserve relative include paths // expected by the C source code (../../include/...). // During `cargo publish`, these symlinks are followed and real files are packaged. let src_lib = manifest_dir.join("zxc/src/lib"); let include_dir = manifest_dir.join("zxc/include"); // Fallback for local development if symlinks are broken or missing let (src_lib, include_dir) = if src_lib.exists() && include_dir.exists() { (src_lib, include_dir) } else { // Try direct path from workspace root let zxc_root = manifest_dir.join("../../..").canonicalize() .expect("Failed to find ZXC root directory"); (zxc_root.join("src/lib"), zxc_root.join("include")) }; // Verify paths exist assert!( src_lib.exists(), "ZXC source directory not found: {:?}", src_lib ); assert!( include_dir.exists(), "ZXC include directory not found: {:?}", include_dir ); // Extract version from header and make it available to lib.rs let (major, minor, patch) = extract_version(&include_dir); println!("cargo:rustc-env=ZXC_VERSION_MAJOR={}", major); println!("cargo:rustc-env=ZXC_VERSION_MINOR={}", minor); println!("cargo:rustc-env=ZXC_VERSION_PATCH={}", patch); // Extract compression levels from header and make them available to lib.rs let (fastest, fast, default, balanced, compact) = extract_compression_levels(&include_dir); println!("cargo:rustc-env=ZXC_LEVEL_FASTEST={}", fastest); println!("cargo:rustc-env=ZXC_LEVEL_FAST={}", fast); println!("cargo:rustc-env=ZXC_LEVEL_DEFAULT={}", default); println!("cargo:rustc-env=ZXC_LEVEL_BALANCED={}", balanced); println!("cargo:rustc-env=ZXC_LEVEL_COMPACT={}", compact); let target = env::var("TARGET").unwrap_or_default(); let is_arm64 = target.contains("aarch64") || target.contains("arm64"); let is_x86_64 = target.contains("x86_64") || target.contains("i686"); // ========================================================================= // Core library files (common to all architectures) // ========================================================================= let mut core_build = cc::Build::new(); core_build .include(&include_dir) .include(&src_lib) .include(src_lib.join("vendors")) .file(src_lib.join("zxc_common.c")) .file(src_lib.join("zxc_driver.c")) .file(src_lib.join("zxc_dispatch.c")) .opt_level(3) .warnings(false) .flag_if_supported("-pthread"); // ========================================================================= // Function Multi-Versioning: Compile variants with different suffixes // ========================================================================= // --- Default variant (baseline, always compiled) --- let mut default_compress = cc::Build::new(); default_compress .include(&include_dir) .include(&src_lib) .include(src_lib.join("vendors")) .file(src_lib.join("zxc_compress.c")) .define("ZXC_FUNCTION_SUFFIX", "_default") .opt_level(3) .warnings(false); let mut default_decompress = cc::Build::new(); default_decompress .include(&include_dir) .include(&src_lib) .include(src_lib.join("vendors")) .file(src_lib.join("zxc_decompress.c")) .define("ZXC_FUNCTION_SUFFIX", "_default") .opt_level(3) .warnings(false); // Add architecture-specific flags to core build BEFORE compiling if is_arm64 { core_build.flag_if_supported("-march=armv8-a+crc"); } else if is_x86_64 { core_build.flag_if_supported("-msse4.2"); core_build.flag_if_supported("-mpclmul"); } core_build.compile("zxc_core"); // Compile defaults default_compress.compile("zxc_compress_default"); default_decompress.compile("zxc_decompress_default"); // --- Architecture-specific variants --- if is_arm64 { // NEON variant for ARM64 let mut neon_compress = cc::Build::new(); neon_compress .include(&include_dir) .include(&src_lib) .include(src_lib.join("vendors")) .file(src_lib.join("zxc_compress.c")) .define("ZXC_FUNCTION_SUFFIX", "_neon") .flag_if_supported("-march=armv8-a+simd") .opt_level(3) .warnings(false); let mut neon_decompress = cc::Build::new(); neon_decompress .include(&include_dir) .include(&src_lib) .include(src_lib.join("vendors")) .file(src_lib.join("zxc_decompress.c")) .define("ZXC_FUNCTION_SUFFIX", "_neon") .flag_if_supported("-march=armv8-a+simd") .opt_level(3) .warnings(false); neon_compress.compile("zxc_compress_neon"); neon_decompress.compile("zxc_decompress_neon"); } else if is_x86_64 { // AVX2 variant let mut avx2_compress = cc::Build::new(); avx2_compress .include(&include_dir) .include(&src_lib) .include(src_lib.join("vendors")) .file(src_lib.join("zxc_compress.c")) .define("ZXC_FUNCTION_SUFFIX", "_avx2") .flag_if_supported("-mavx2") .flag_if_supported("-mfma") .flag_if_supported("-mbmi2") .opt_level(3) .warnings(false); let mut avx2_decompress = cc::Build::new(); avx2_decompress .include(&include_dir) .include(&src_lib) .include(src_lib.join("vendors")) .file(src_lib.join("zxc_decompress.c")) .define("ZXC_FUNCTION_SUFFIX", "_avx2") .flag_if_supported("-mavx2") .flag_if_supported("-mfma") .flag_if_supported("-mbmi2") .opt_level(3) .warnings(false); avx2_compress.compile("zxc_compress_avx2"); avx2_decompress.compile("zxc_decompress_avx2"); // AVX512 variant let mut avx512_compress = cc::Build::new(); avx512_compress .include(&include_dir) .include(&src_lib) .include(src_lib.join("vendors")) .file(src_lib.join("zxc_compress.c")) .define("ZXC_FUNCTION_SUFFIX", "_avx512") .flag_if_supported("-mavx512f") .flag_if_supported("-mavx512bw") .flag_if_supported("-mbmi2") .opt_level(3) .warnings(false); let mut avx512_decompress = cc::Build::new(); avx512_decompress .include(&include_dir) .include(&src_lib) .include(src_lib.join("vendors")) .file(src_lib.join("zxc_decompress.c")) .define("ZXC_FUNCTION_SUFFIX", "_avx512") .flag_if_supported("-mavx512f") .flag_if_supported("-mavx512bw") .flag_if_supported("-mbmi2") .opt_level(3) .warnings(false); avx512_compress.compile("zxc_compress_avx512"); avx512_decompress.compile("zxc_decompress_avx512"); } // Threading support (not needed on Windows, which uses kernel32) if !target.contains("windows") { println!("cargo:rustc-link-lib=pthread"); } // Re-run build script if source files change println!("cargo:rerun-if-changed={}", src_lib.display()); println!("cargo:rerun-if-changed={}", include_dir.display()); } zxc-0.9.1/wrappers/rust/zxc-sys/src/000077500000000000000000000000001515574030100173755ustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/zxc-sys/src/lib.rs000066400000000000000000000344471515574030100205250ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ //! Low-level FFI bindings to the ZXC compression library. //! //! This crate provides raw, unsafe bindings to the ZXC C library. //! For a safe, idiomatic Rust API, use the `zxc` crate instead. //! //! # Example //! //! ```rust,ignore //! use zxc_sys::*; //! //! unsafe { //! let bound = zxc_compress_bound(1024); //! // ... allocate buffer and compress //! } //! ``` #![allow(non_camel_case_types)] #![allow(non_upper_case_globals)] use std::ffi::c_int; use std::os::raw::c_void; // ============================================================================= // ZXC Version Constants // ============================================================================= // Version constants - automatically extracted from zxc_constants.h by build.rs const fn parse_version(s: &str) -> u32 { let bytes = s.as_bytes(); let mut result = 0u32; let mut i = 0; while i < bytes.len() { let digit = bytes[i]; if digit >= b'0' && digit <= b'9' { result = result * 10 + (digit - b'0') as u32; } i += 1; } result } pub const ZXC_VERSION_MAJOR: u32 = parse_version(env!("ZXC_VERSION_MAJOR")); pub const ZXC_VERSION_MINOR: u32 = parse_version(env!("ZXC_VERSION_MINOR")); pub const ZXC_VERSION_PATCH: u32 = parse_version(env!("ZXC_VERSION_PATCH")); // ============================================================================= // Compression Levels // ============================================================================= // Helper function to parse integer from string literal at compile time const fn parse_i32(s: &str) -> i32 { let bytes = s.as_bytes(); let mut result = 0i32; let mut i = 0; let mut sign = 1; if i < bytes.len() && bytes[i] == b'-' { sign = -1; i += 1; } while i < bytes.len() { let digit = bytes[i]; if digit >= b'0' && digit <= b'9' { result = result * 10 + (digit - b'0') as i32; } i += 1; } result * sign } // Compression level constants - automatically extracted from zxc_constants.h by build.rs /// Fastest compression, best for real-time applications pub const ZXC_LEVEL_FASTEST: i32 = parse_i32(env!("ZXC_LEVEL_FASTEST")); /// Fast compression, good for real-time applications pub const ZXC_LEVEL_FAST: i32 = parse_i32(env!("ZXC_LEVEL_FAST")); /// Recommended: ratio > LZ4, decode speed > LZ4 pub const ZXC_LEVEL_DEFAULT: i32 = parse_i32(env!("ZXC_LEVEL_DEFAULT")); /// Good ratio, good decode speed pub const ZXC_LEVEL_BALANCED: i32 = parse_i32(env!("ZXC_LEVEL_BALANCED")); /// High density. Best for storage/firmware/assets. pub const ZXC_LEVEL_COMPACT: i32 = parse_i32(env!("ZXC_LEVEL_COMPACT")); // ============================================================================= // Error Codes // ============================================================================= /// Success (no error) pub const ZXC_OK: i32 = 0; /// Memory allocation failure pub const ZXC_ERROR_MEMORY: i32 = -1; /// Destination buffer too small pub const ZXC_ERROR_DST_TOO_SMALL: i32 = -2; /// Source buffer too small or truncated input pub const ZXC_ERROR_SRC_TOO_SMALL: i32 = -3; /// Invalid magic word in file header pub const ZXC_ERROR_BAD_MAGIC: i32 = -4; /// Unsupported file format version pub const ZXC_ERROR_BAD_VERSION: i32 = -5; /// Corrupted or invalid header (CRC mismatch) pub const ZXC_ERROR_BAD_HEADER: i32 = -6; /// Block or global checksum verification failed pub const ZXC_ERROR_BAD_CHECKSUM: i32 = -7; /// Corrupted compressed data pub const ZXC_ERROR_CORRUPT_DATA: i32 = -8; /// Invalid match offset during decompression pub const ZXC_ERROR_BAD_OFFSET: i32 = -9; /// Buffer overflow detected during processing pub const ZXC_ERROR_OVERFLOW: i32 = -10; /// Read/write/seek failure on file pub const ZXC_ERROR_IO: i32 = -11; /// Required input pointer is NULL pub const ZXC_ERROR_NULL_INPUT: i32 = -12; /// Unknown or unexpected block type pub const ZXC_ERROR_BAD_BLOCK_TYPE: i32 = -13; /// Invalid block size pub const ZXC_ERROR_BAD_BLOCK_SIZE: i32 = -14; // ============================================================================= // Options Structs (mirroring C API) // ============================================================================= /// Compression options (mirrors `zxc_compress_opts_t` from C API). #[repr(C)] #[derive(Debug, Clone)] pub struct zxc_compress_opts_t { /// Worker thread count (0 = auto-detect CPU cores). pub n_threads: c_int, /// Compression level 1-5 (0 = default). pub level: c_int, /// Block size in bytes (0 = default 256 KB). Must be power of 2, 4 KB – 2 MB. pub block_size: usize, /// 1 to enable per-block and global checksums, 0 to disable. pub checksum_enabled: c_int, /// Progress callback (NULL to disable). pub progress_cb: *const c_void, /// User context pointer passed to progress_cb. pub user_data: *mut c_void, } impl Default for zxc_compress_opts_t { fn default() -> Self { Self { n_threads: 0, level: 0, block_size: 0, checksum_enabled: 0, progress_cb: std::ptr::null(), user_data: std::ptr::null_mut(), } } } /// Decompression options (mirrors `zxc_decompress_opts_t` from C API). #[repr(C)] #[derive(Debug, Clone)] pub struct zxc_decompress_opts_t { /// Worker thread count (0 = auto-detect CPU cores). pub n_threads: c_int, /// 1 to verify per-block and global checksums, 0 to skip. pub checksum_enabled: c_int, /// Progress callback (NULL to disable). pub progress_cb: *const c_void, /// User context pointer passed to progress_cb. pub user_data: *mut c_void, } impl Default for zxc_decompress_opts_t { fn default() -> Self { Self { n_threads: 0, checksum_enabled: 0, progress_cb: std::ptr::null(), user_data: std::ptr::null_mut(), } } } // ============================================================================= // Buffer-Based API // ============================================================================= unsafe extern "C" { /// Calculates the maximum compressed size for a given input. /// /// Useful for allocating output buffers before compression. pub fn zxc_compress_bound(input_size: usize) -> u64; /// Compresses a data buffer using the ZXC algorithm. /// /// # Safety /// /// - `src` must be a valid pointer to `src_size` bytes. /// - `dst` must be a valid pointer to at least `dst_capacity` bytes. /// - The caller must ensure no data races on the buffers. /// /// # Returns /// /// Number of bytes written to `dst` (>0 on success), or a negative error code. pub fn zxc_compress( src: *const c_void, src_size: usize, dst: *mut c_void, dst_capacity: usize, opts: *const zxc_compress_opts_t, ) -> i64; /// Decompresses a ZXC compressed buffer. /// /// # Safety /// /// - `src` must be a valid pointer to `src_size` bytes of compressed data. /// - `dst` must be a valid pointer to at least `dst_capacity` bytes. /// /// # Returns /// /// Number of decompressed bytes written to `dst` (>0 on success), or a negative error code. pub fn zxc_decompress( src: *const c_void, src_size: usize, dst: *mut c_void, dst_capacity: usize, opts: *const zxc_decompress_opts_t, ) -> i64; /// Returns the decompressed size stored in a ZXC compressed buffer. /// /// Reads the file footer without performing decompression. /// /// # Returns /// /// Original uncompressed size in bytes, or 0 if invalid. pub fn zxc_get_decompressed_size(src: *const c_void, src_size: usize) -> u64; /// Returns a human-readable name for the given error code. /// /// # Arguments /// /// * `code` - An error code from zxc_error_t (or any integer) /// /// # Returns /// /// A constant string such as "ZXC_OK" or "ZXC_ERROR_MEMORY". /// Returns "ZXC_UNKNOWN_ERROR" for unrecognized codes. pub fn zxc_error_name(code: c_int) -> *const std::os::raw::c_char; } // ============================================================================= // Streaming API (FILE-based) // ============================================================================= unsafe extern "C" { /// Compresses data from an input stream to an output stream. /// /// Uses a multi-threaded pipeline architecture for high throughput /// on large files. /// /// # Safety /// /// - `f_in` must be a valid FILE* opened in "rb" mode /// - `f_out` must be a valid FILE* opened in "wb" mode /// - File handles must remain valid for the duration of the call /// /// # Arguments /// /// * `f_in` - Input file stream /// * `f_out` - Output file stream /// * `n_threads` - Number of worker threads (0 = auto-detect CPU cores) /// * `level` - Compression level (1-5) /// * `checksum_enabled` - If non-zero, enables checksum verification /// /// # Returns /// /// Total compressed bytes written, or -1 on error. pub fn zxc_stream_compress( f_in: *mut libc::FILE, f_out: *mut libc::FILE, opts: *const zxc_compress_opts_t, ) -> i64; /// Decompresses data from an input stream to an output stream. /// /// Uses the same pipeline architecture as compression for maximum throughput. /// /// # Safety /// /// - `f_in` must be a valid FILE* opened in "rb" mode /// - `f_out` must be a valid FILE* opened in "wb" mode /// /// # Arguments /// /// * `f_in` - Input file stream (compressed data) /// * `f_out` - Output file stream (decompressed data) /// * `n_threads` - Number of worker threads (0 = auto-detect) /// * `checksum_enabled` - If non-zero, verifies checksums /// /// # Returns /// /// Total decompressed bytes written, or -1 on error. pub fn zxc_stream_decompress( f_in: *mut libc::FILE, f_out: *mut libc::FILE, opts: *const zxc_decompress_opts_t, ) -> i64; /// Returns the decompressed size stored in a ZXC compressed file. /// /// Reads the file footer to extract the original size without decompressing. /// The file position is restored after reading. /// /// # Safety /// /// - `f_in` must be a valid FILE* opened in "rb" mode /// /// # Returns /// /// Original uncompressed size in bytes, or -1 on error. pub fn zxc_stream_get_decompressed_size(f_in: *mut libc::FILE) -> i64; } // ============================================================================= // Tests // ============================================================================= #[cfg(test)] mod tests { use super::*; #[test] fn test_compress_bound() { unsafe { let bound = zxc_compress_bound(1024); // Should return a reasonable bound (input + overhead) assert!(bound > 1024); assert!(bound < 1024 * 2); // Should not be excessively large } } #[test] fn test_roundtrip() { // Use highly repetitive data that definitely compresses well let input: Vec = (0..4096) .map(|i| ((i % 16) as u8).wrapping_add(b'A')) .collect(); unsafe { // Allocate compression buffer let bound = zxc_compress_bound(input.len()) as usize; let mut compressed = vec![0u8; bound]; // Compress let copts = zxc_compress_opts_t { level: ZXC_LEVEL_DEFAULT, checksum_enabled: 1, ..Default::default() }; let compressed_size = zxc_compress( input.as_ptr() as *const c_void, input.len(), compressed.as_mut_ptr() as *mut c_void, compressed.len(), &copts, ); assert!(compressed_size > 0, "Compression failed"); // Highly repetitive data should compress significantly assert!((compressed_size as usize) < input.len() / 2, "Data should compress well"); // Get decompressed size let decompressed_size = zxc_get_decompressed_size( compressed.as_ptr() as *const c_void, compressed_size as usize, ); assert_eq!(decompressed_size as usize, input.len()); // Decompress let mut decompressed = vec![0u8; decompressed_size as usize]; let dopts = zxc_decompress_opts_t { checksum_enabled: 1, ..Default::default() }; let result_size = zxc_decompress( compressed.as_ptr() as *const c_void, compressed_size as usize, decompressed.as_mut_ptr() as *mut c_void, decompressed.len(), &dopts, ); assert_eq!(result_size, input.len() as i64); assert_eq!(&decompressed[..], &input[..]); } } #[test] fn test_all_levels() { let input = b"Test data for all compression levels - with some repetition \ to ensure compression works: BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"; for level in [ ZXC_LEVEL_FASTEST, ZXC_LEVEL_FAST, ZXC_LEVEL_DEFAULT, ZXC_LEVEL_BALANCED, ZXC_LEVEL_COMPACT, ] { unsafe { let bound = zxc_compress_bound(input.len()) as usize; let mut compressed = vec![0u8; bound]; let copts = zxc_compress_opts_t { level, checksum_enabled: 1, ..Default::default() }; let compressed_size = zxc_compress( input.as_ptr() as *const c_void, input.len(), compressed.as_mut_ptr() as *mut c_void, compressed.len(), &copts, ); assert!( compressed_size > 0, "Compression failed at level {}", level ); } } } } zxc-0.9.1/wrappers/rust/zxc-sys/zxc/000077500000000000000000000000001515574030100174125ustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/zxc-sys/zxc/include000077700000000000000000000000001515574030100233752../../../../includeustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/zxc-sys/zxc/src/000077500000000000000000000000001515574030100202015ustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/zxc-sys/zxc/src/lib000077700000000000000000000000001515574030100234342../../../../../src/libustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/zxc/000077500000000000000000000000001515574030100157725ustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/zxc/Cargo.toml000066400000000000000000000012121515574030100177160ustar00rootroot00000000000000[package] name = "zxc-compress" description = "Safe Rust bindings to the ZXC compression library" version.workspace = true authors.workspace = true edition.workspace = true license.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true readme = "README.md" [lib] name = "zxc" [dependencies] zxc_sys = { path = "../zxc-sys", version = "0.9.1", package = "zxc-compress-sys" } thiserror = "2.0" libc = "0.2" [target.'cfg(windows)'.dependencies] windows-sys = { version = "0.61.2", features = [ "Win32_Foundation", "Win32_System_Threading", ] } [dev-dependencies] rand = "0.9" [features] default = [] zxc-0.9.1/wrappers/rust/zxc/README.md000066400000000000000000000055001515574030100172510ustar00rootroot00000000000000# zxc Safe Rust bindings to the **ZXC compression library** — a fast LZ77-based compressor optimized for high decompression speed. [![Crates.io](https://img.shields.io/crates/v/zxc.svg)](https://crates.io/crates/zxc) [![Documentation](https://docs.rs/zxc/badge.svg)](https://docs.rs/zxc) [![License](https://img.shields.io/badge/license-BSD--3--Clause-blue.svg)](LICENSE) ## Quick Start ```rust use zxc::{compress, decompress, Level}; fn main() -> Result<(), zxc::Error> { let data = b"Hello, ZXC! This is some data to compress."; // Compress (no checksum for max speed) let compressed = compress(data, Level::Default, None)?; println!("Compressed {} -> {} bytes", data.len(), compressed.len()); // Decompress let decompressed = decompress(&compressed)?; assert_eq!(&decompressed[..], &data[..]); Ok(()) } ``` ## Compression Levels | Level | Speed | Ratio | Use Case | |-------|-------|-------|----------| | `Level::Fastest` | ★★★★★ | ★★☆☆☆ | Real-time, gaming | | `Level::Fast` | ★★★★☆ | ★★★☆☆ | Network, streaming | | `Level::Default` | ★★★☆☆ | ★★★★☆ | General purpose | | `Level::Balanced` | ★★☆☆☆ | ★★★★☆ | Archives | | `Level::Compact` | ★☆☆☆☆ | ★★★★★ | Storage, firmware | ## Features - **Fast decompression**: Optimized for read-heavy workloads - **5 compression levels**: Trade off speed vs ratio - **Optional checksums**: Disabled by default for maximum performance, enable for data integrity - **File streaming**: Multi-threaded compression/decompression for large files - **Zero-allocation API**: `compress_to` and `decompress_to` for buffer reuse - **Pure Rust API**: Safe, idiomatic interface over the C library ## Advanced Usage ### Pre-allocated Buffers ```rust use zxc::{compress_to, decompress_to, compress_bound, CompressOptions, DecompressOptions}; let data = b"Hello, world!"; // Compression let mut output = vec![0u8; compress_bound(data.len())]; let size = compress_to(data, &mut output, &CompressOptions::default())?; output.truncate(size); // Decompression let mut decompressed = vec![0u8; data.len()]; decompress_to(&output, &mut decompressed, &DecompressOptions::default())?; ``` ### Disable Checksum ```rust use zxc::{compress_with_options, decompress_with_options, CompressOptions, DecompressOptions, Level}; let opts = CompressOptions::with_level(Level::Fastest).without_checksum(); let compressed = compress_with_options(data, &opts)?; let decompressed = decompress_with_options(&compressed, &DecompressOptions::skip_checksum())?; ``` ### Query Decompressed Size ```rust use zxc::decompressed_size; if let Some(size) = decompressed_size(&compressed) { let mut buffer = vec![0u8; size]; // ... } ``` ## License BSD-3-Clause — see [LICENSE](../../LICENSE) for details. zxc-0.9.1/wrappers/rust/zxc/examples/000077500000000000000000000000001515574030100176105ustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/zxc/examples/file_compression.rs000066400000000000000000000050071515574030100235200ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ //! Example demonstrating file-based streaming compression and decompression. //! //! Run with: `cargo run --example file_compression` use std::fs; use std::io::Write; fn main() -> Result<(), Box> { println!("ZXC File Streaming Example\n"); // Create a test file with compressible data let test_data: Vec = (0..1024 * 1024) // 1 MB .map(|i| ((i % 256) ^ ((i / 256) % 256)) as u8) .collect(); let input_path = "/tmp/zxc_test_input.bin"; let compressed_path = "/tmp/zxc_test_compressed.zxc"; let output_path = "/tmp/zxc_test_output.bin"; // Write test data to file { let mut file = fs::File::create(input_path)?; file.write_all(&test_data)?; } // Get original file size let original_size = fs::metadata(input_path)?.len(); println!("Original file size: {} bytes", original_size); // Compress the file with multi-threading println!("\nCompressing with auto-detected threads..."); let compressed_bytes = zxc::compress_file( input_path, compressed_path, zxc::Level::Default, None, // Auto-detect CPU cores None, // Maximum performance (no checksum) )?; println!(" Compressed bytes written: {}", compressed_bytes); // Get compressed file size let compressed_size = fs::metadata(compressed_path)?.len(); let ratio = 100.0 * compressed_size as f64 / original_size as f64; println!(" Compressed file size: {} bytes ({:.1}%)", compressed_size, ratio); // Query the decompressed size from the file let reported_size = zxc::file_decompressed_size(compressed_path)?; println!("\n Reported decompressed size: {} bytes", reported_size); // Decompress the file println!("\nDecompressing..."); let decompressed_bytes = zxc::decompress_file( compressed_path, output_path, Some(4), // Use 4 threads )?; println!(" Decompressed bytes written: {}", decompressed_bytes); // Verify the result let output_data = fs::read(output_path)?; if output_data == test_data { println!("\n✓ Data integrity verified! Files match."); } else { println!("\n✗ ERROR: Data mismatch!"); } // Cleanup fs::remove_file(input_path)?; fs::remove_file(compressed_path)?; fs::remove_file(output_path)?; println!("\nDone."); Ok(()) } zxc-0.9.1/wrappers/rust/zxc/examples/simple.rs000066400000000000000000000034321515574030100214510ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ //! Simple example demonstrating ZXC compression and decompression. //! //! Run with: `cargo run --example simple` use zxc::{compress, decompress, decompressed_size, version_string, Level}; fn main() -> Result<(), zxc::Error> { println!("ZXC Rust Wrapper v{}\n", version_string()); // Sample data with some repetition (compresses well) let original = b"Hello, ZXC! This is a demonstration of the Rust wrapper. \ Let's add some repetitive content to show compression: \ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA \ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"; println!("Original size: {} bytes", original.len()); // Test all compression levels for level in Level::all() { let compressed = compress(original, *level, None)?; let ratio = (compressed.len() as f64 / original.len() as f64) * 100.0; println!( " Level {:?}: {} bytes ({:.1}%)", level, compressed.len(), ratio ); } // Full roundtrip demonstration println!("\nRoundtrip test:"); let compressed = compress(original, Level::Default, None)?; // Query size before decompression let size = decompressed_size(&compressed).expect("valid compressed data"); println!(" Reported decompressed size: {} bytes", size); let decompressed = decompress(&compressed)?; println!(" Actual decompressed size: {} bytes", decompressed.len()); // Verify data integrity assert_eq!(&decompressed[..], &original[..]); println!(" ✓ Data integrity verified!"); Ok(()) } zxc-0.9.1/wrappers/rust/zxc/src/000077500000000000000000000000001515574030100165615ustar00rootroot00000000000000zxc-0.9.1/wrappers/rust/zxc/src/lib.rs000066400000000000000000001137331515574030100177050ustar00rootroot00000000000000/* * ZXC - High-performance lossless compression * * Copyright (c) 2025-2026 Bertrand Lebonnois and contributors. * SPDX-License-Identifier: BSD-3-Clause */ //! Safe Rust bindings to the ZXC compression library. //! //! ZXC is a fast compression library optimized for high decompression speed. //! This crate provides a safe, idiomatic Rust API. //! //! # Quick Start //! //! ```rust //! use zxc::{compress, decompress, Level}; //! //! // Compress some data //! let data = b"Hello, ZXC! This is some data to compress."; //! let compressed = compress(data, Level::Default, None).expect("compression failed"); //! //! // Decompress it back //! let decompressed = decompress(&compressed).expect("decompression failed"); //! assert_eq!(&decompressed[..], &data[..]); //! ``` //! //! # Compression Levels //! //! ZXC provides 5 compression levels trading off speed vs ratio: //! //! | Level | Speed | Ratio | Use Case | //! |-------|-------|-------|----------| //! | `Fastest` | ★★★★★ | ★★☆☆☆ | Real-time, gaming | //! | `Fast` | ★★★★☆ | ★★★☆☆ | Network, streaming | //! | `Default` | ★★★☆☆ | ★★★★☆ | General purpose | //! | `Balanced` | ★★☆☆☆ | ★★★★☆ | Archives | //! | `Compact` | ★☆☆☆☆ | ★★★★★ | Storage, firmware | //! //! # Features //! //! - **Checksum verification**: Optional, disabled by default for maximum performance //! - **Zero-copy decompression bound**: Query the output size before decompressing #![warn(missing_docs)] #![warn(rust_2018_idioms)] use std::ffi::c_void; pub use zxc_sys::{ ZXC_LEVEL_BALANCED, ZXC_LEVEL_COMPACT, ZXC_LEVEL_DEFAULT, ZXC_LEVEL_FAST, ZXC_LEVEL_FASTEST, ZXC_VERSION_MAJOR, ZXC_VERSION_MINOR, ZXC_VERSION_PATCH, // Error codes ZXC_OK, ZXC_ERROR_MEMORY, ZXC_ERROR_DST_TOO_SMALL, ZXC_ERROR_SRC_TOO_SMALL, ZXC_ERROR_BAD_MAGIC, ZXC_ERROR_BAD_VERSION, ZXC_ERROR_BAD_HEADER, ZXC_ERROR_BAD_CHECKSUM, ZXC_ERROR_CORRUPT_DATA, ZXC_ERROR_BAD_OFFSET, ZXC_ERROR_OVERFLOW, ZXC_ERROR_IO, ZXC_ERROR_NULL_INPUT, ZXC_ERROR_BAD_BLOCK_TYPE, ZXC_ERROR_BAD_BLOCK_SIZE, }; // ============================================================================= // Error Types // ============================================================================= /// Errors that can occur during ZXC operations. #[derive(Debug, Clone, thiserror::Error)] pub enum Error { /// Memory allocation failure #[error("memory allocation failed")] Memory, /// Destination buffer too small #[error("destination buffer too small")] DstTooSmall, /// Source buffer too small or truncated input #[error("source buffer too small or truncated")] SrcTooSmall, /// Invalid magic word in file header #[error("invalid magic word in header")] BadMagic, /// Unsupported file format version #[error("unsupported file format version")] BadVersion, /// Corrupted or invalid header (CRC mismatch) #[error("corrupted or invalid header")] BadHeader, /// Block or global checksum verification failed #[error("checksum verification failed")] BadChecksum, /// Corrupted compressed data #[error("corrupted compressed data")] CorruptData, /// Invalid match offset during decompression #[error("invalid match offset")] BadOffset, /// Buffer overflow detected during processing #[error("buffer overflow detected")] Overflow, /// Read/write/seek failure on file #[error("I/O error")] Io, /// Required input pointer is NULL #[error("null input pointer")] NullInput, /// Unknown or unexpected block type #[error("unknown block type")] BadBlockType, /// Invalid block size #[error("invalid block size")] BadBlockSize, /// The compressed data appears to be invalid or truncated #[error("invalid compressed data")] InvalidData, /// Unknown error code from C library #[error("unknown error (code: {0})")] Unknown(i32), } /// Convert a negative error code from the C library to a Rust Error. fn error_from_code(code: i64) -> Error { match code as i32 { ZXC_ERROR_MEMORY => Error::Memory, ZXC_ERROR_DST_TOO_SMALL => Error::DstTooSmall, ZXC_ERROR_SRC_TOO_SMALL => Error::SrcTooSmall, ZXC_ERROR_BAD_MAGIC => Error::BadMagic, ZXC_ERROR_BAD_VERSION => Error::BadVersion, ZXC_ERROR_BAD_HEADER => Error::BadHeader, ZXC_ERROR_BAD_CHECKSUM => Error::BadChecksum, ZXC_ERROR_CORRUPT_DATA => Error::CorruptData, ZXC_ERROR_BAD_OFFSET => Error::BadOffset, ZXC_ERROR_OVERFLOW => Error::Overflow, ZXC_ERROR_IO => Error::Io, ZXC_ERROR_NULL_INPUT => Error::NullInput, ZXC_ERROR_BAD_BLOCK_TYPE => Error::BadBlockType, ZXC_ERROR_BAD_BLOCK_SIZE => Error::BadBlockSize, _ => Error::Unknown(code as i32), } } /// Result type for ZXC operations. pub type Result = std::result::Result; // ============================================================================= // Compression Levels // ============================================================================= /// Compression level presets. /// /// Higher levels produce smaller output but compress more slowly. /// Decompression speed is similar across all levels. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] #[repr(i32)] pub enum Level { /// Fastest compression, best for real-time applications (level 1) Fastest = 1, /// Fast compression, good for real-time applications (level 2) Fast = 2, /// Recommended default: ratio > LZ4, decode speed > LZ4 (level 3) #[default] Default = 3, /// Good ratio, good decode speed (level 4) Balanced = 4, /// Highest density. Best for storage/firmware/assets (level 5) Compact = 5, } impl Level { /// Returns all available compression levels. pub fn all() -> &'static [Level] { &[ Level::Fastest, Level::Fast, Level::Default, Level::Balanced, Level::Compact, ] } } impl From for i32 { fn from(level: Level) -> i32 { level as i32 } } // ============================================================================= // Compression Options // ============================================================================= /// Options for compression operations. #[derive(Debug, Clone)] pub struct CompressOptions { /// Compression level (default: `Level::Default`) pub level: Level, /// Enable checksum for data integrity (default: `true`) pub checksum: bool, } impl Default for CompressOptions { fn default() -> Self { Self { level: Level::Default, checksum: true, } } } impl CompressOptions { /// Create options with the specified compression level. pub fn with_level(level: Level) -> Self { Self { level, ..Default::default() } } /// Disable checksum computation for faster compression. pub fn without_checksum(mut self) -> Self { self.checksum = false; self } } // ============================================================================= // Decompression Options // ============================================================================= /// Options for decompression operations. #[derive(Debug, Clone, Default)] pub struct DecompressOptions { /// Verify checksum during decompression (default: `true`) pub verify_checksum: bool, } impl DecompressOptions { /// Create options that skip checksum verification. pub fn skip_checksum() -> Self { Self { verify_checksum: false, } } } // ============================================================================= // Public API // ============================================================================= /// Returns the maximum compressed size for an input of the given size. /// /// Use this to allocate a buffer before calling [`compress_to`]. /// /// # Example /// /// ```rust /// let bound = zxc::compress_bound(1024); /// assert!(bound > 1024); // Accounts for headers and worst-case expansion /// ``` #[inline] pub fn compress_bound(input_size: usize) -> u64 { unsafe { zxc_sys::zxc_compress_bound(input_size) } } /// Compresses data with the specified level. /// /// This is a convenience function that allocates the output buffer automatically. /// For zero-allocation usage, see [`compress_to`]. /// /// # Arguments /// /// * `data` - The data to compress /// * `level` - Compression level /// * `checksum` - Optional checksum for data integrity (`None` = disabled for maximum performance) /// /// # Example /// /// ```rust /// use zxc::{compress, Level}; /// /// let data = b"Hello, world!"; /// /// // Maximum performance (no checksum) /// let compressed = compress(data, Level::Default, None)?; /// /// // With data integrity verification /// let compressed = compress(data, Level::Default, Some(true))?; /// # Ok::<(), zxc::Error>(()) /// ``` pub fn compress(data: &[u8], level: Level, checksum: Option) -> Result> { let opts = CompressOptions { level, checksum: checksum.unwrap_or(false), }; compress_with_options(data, &opts) } /// Compresses data with full options control. /// /// # Example /// /// ```rust /// use zxc::{compress_with_options, CompressOptions, Level}; /// /// let data = b"Hello, world!"; /// let opts = CompressOptions::with_level(Level::Compact).without_checksum(); /// let compressed = compress_with_options(data, &opts)?; /// # Ok::<(), zxc::Error>(()) /// ``` pub fn compress_with_options(data: &[u8], options: &CompressOptions) -> Result> { let bound = compress_bound(data.len()) as usize; let mut output = Vec::with_capacity(bound); let written = unsafe { impl_compress( data, output.as_mut_ptr(), output.capacity(), options )? }; unsafe { output.set_len(written); } Ok(output) } /// Helper to handle the raw compression call. /// /// # Safety /// /// `dst_ptr` must be valid for writes up to `dst_cap` bytes. #[inline(always)] unsafe fn impl_compress( data: &[u8], dst_ptr: *mut u8, dst_cap: usize, options: &CompressOptions, ) -> Result { let written = unsafe { let copts = zxc_sys::zxc_compress_opts_t { level: options.level as i32, checksum_enabled: options.checksum as i32, ..Default::default() }; zxc_sys::zxc_compress( data.as_ptr() as *const c_void, data.len(), dst_ptr as *mut c_void, dst_cap, &copts, ) }; if written < 0 { return Err(error_from_code(written)); } if written == 0 && !data.is_empty() { return Err(Error::InvalidData); } Ok(written as usize) } /// Compresses data into a pre-allocated buffer. /// /// Returns the number of bytes written to `output`. /// /// # Errors /// /// Returns [`Error::CompressionFailed`] if the output buffer is too small /// or an internal error occurs. /// /// # Example /// /// ```rust /// use zxc::{compress_to, compress_bound, CompressOptions}; /// /// let data = b"Hello, world!"; /// let mut output = vec![0u8; compress_bound(data.len()) as usize]; /// let size = compress_to(data, &mut output, &CompressOptions::default())?; /// output.truncate(size); /// # Ok::<(), zxc::Error>(()) /// ``` pub fn compress_to(data: &[u8], output: &mut [u8], options: &CompressOptions) -> Result { unsafe { impl_compress(data, output.as_mut_ptr(), output.len(), options) } } /// Returns the original uncompressed size from compressed data. /// /// This reads the footer without performing decompression. /// Returns `None` if the data is invalid or truncated. /// /// # Example /// /// ```rust /// use zxc::{compress, decompressed_size, Level}; /// /// let data = b"Hello, world!"; /// let compressed = compress(data, Level::Default, None)?; /// let size = decompressed_size(&compressed); /// assert_eq!(size, Some(data.len() as u64)); /// # Ok::<(), zxc::Error>(()) /// ``` pub fn decompressed_size(compressed: &[u8]) -> Option { let size = unsafe { zxc_sys::zxc_get_decompressed_size(compressed.as_ptr() as *const c_void, compressed.len()) }; if size == 0 && !compressed.is_empty() { None } else { Some(size) } } /// Decompresses ZXC-compressed data. /// /// This is a convenience function that queries the output size and allocates /// the buffer automatically. For zero-allocation usage, see [`decompress_to`]. /// /// # Example /// /// ```rust /// use zxc::{compress, decompress, Level}; /// /// let data = b"Hello, world!"; /// let compressed = compress(data, Level::Default, None)?; /// let decompressed = decompress(&compressed)?; /// assert_eq!(&decompressed[..], &data[..]); /// # Ok::<(), zxc::Error>(()) /// ``` pub fn decompress(compressed: &[u8]) -> Result> { decompress_with_options(compressed, &DecompressOptions::default()) } /// Decompresses data with full options control. pub fn decompress_with_options(compressed: &[u8], options: &DecompressOptions) -> Result> { let size = decompressed_size(compressed).ok_or(Error::InvalidData)? as usize; let mut output = Vec::with_capacity(size); let written = unsafe { impl_decompress( compressed, output.as_mut_ptr(), output.capacity(), options )? }; if written != size { return Err(Error::InvalidData); } unsafe { output.set_len(written); } Ok(output) } /// Helper to handle the raw decompression call. /// /// # Safety /// /// `dst_ptr` must be valid for writes up to `dst_cap` bytes. #[inline(always)] unsafe fn impl_decompress( compressed: &[u8], dst_ptr: *mut u8, dst_cap: usize, options: &DecompressOptions, ) -> Result { let written = unsafe { let dopts = zxc_sys::zxc_decompress_opts_t { checksum_enabled: if options.verify_checksum { 1 } else { 0 }, ..Default::default() }; zxc_sys::zxc_decompress( compressed.as_ptr() as *const c_void, compressed.len(), dst_ptr as *mut c_void, dst_cap, &dopts, ) }; if written < 0 { return Err(error_from_code(written)); } if written == 0 && !compressed.is_empty() { return Err(Error::InvalidData); } Ok(written as usize) } /// Decompresses data into a pre-allocated buffer. /// /// Returns the number of bytes written to `output`. /// /// # Errors /// /// Returns an error if decompression fails due to invalid data, corruption, /// or insufficient output buffer size. pub fn decompress_to( compressed: &[u8], output: &mut [u8], options: &DecompressOptions, ) -> Result { unsafe { impl_decompress(compressed, output.as_mut_ptr(), output.len(), options) } } /// Returns the library version as a tuple (major, minor, patch). pub fn version() -> (u32, u32, u32) { (ZXC_VERSION_MAJOR, ZXC_VERSION_MINOR, ZXC_VERSION_PATCH) } /// Returns the library version as a string. pub fn version_string() -> String { format!( "{}.{}.{}", ZXC_VERSION_MAJOR, ZXC_VERSION_MINOR, ZXC_VERSION_PATCH ) } // ============================================================================= // Streaming API (File-based) // ============================================================================= use std::path::Path; use std::fs::File; use std::io; #[cfg(unix)] use std::os::unix::io::AsRawFd; /// Options for streaming compression operations. #[derive(Debug, Clone)] pub struct StreamCompressOptions { /// Compression level (default: `Level::Default`) pub level: Level, /// Number of worker threads (default: `None` = auto-detect CPU cores) pub threads: Option, /// Enable checksum for data integrity (default: `true`) pub checksum: bool, } impl Default for StreamCompressOptions { fn default() -> Self { Self { level: Level::Default, threads: None, checksum: true, } } } impl StreamCompressOptions { /// Create options with the specified compression level. pub fn with_level(level: Level) -> Self { Self { level, ..Default::default() } } /// Set the number of worker threads. pub fn threads(mut self, n: usize) -> Self { self.threads = Some(n); self } /// Disable checksum computation. pub fn without_checksum(mut self) -> Self { self.checksum = false; self } } /// Options for streaming decompression operations. #[derive(Debug, Clone, Default)] pub struct StreamDecompressOptions { /// Number of worker threads (default: `None` = auto-detect CPU cores) pub threads: Option, /// Verify checksum during decompression (default: `true`) pub verify_checksum: bool, } impl StreamDecompressOptions { /// Set the number of worker threads. pub fn threads(mut self, n: usize) -> Self { self.threads = Some(n); self } /// Skip checksum verification. pub fn skip_checksum(mut self) -> Self { self.verify_checksum = false; self } } /// Extend Error enum for I/O errors #[derive(Debug, thiserror::Error)] pub enum StreamError { /// I/O error during file operations #[error("I/O error: {0}")] Io(#[from] io::Error), /// Error from buffer operations #[error("buffer error: {0}")] BufferError(#[from] Error), /// Streaming compression failed #[error("stream compression failed")] CompressionFailed, /// Streaming decompression failed #[error("stream decompression failed")] DecompressionFailed, /// Invalid compressed file #[error("invalid compressed file")] InvalidFile, } /// Result type for streaming operations. pub type StreamResult = std::result::Result; /// Convert a Rust File to a C FILE* for read operations. /// /// This function duplicates the file descriptor before passing it to fdopen, /// so the returned FILE* owns its own fd and must be closed with fclose(). #[cfg(unix)] unsafe fn file_to_c_file_read(file: &File) -> *mut libc::FILE { let fd = file.as_raw_fd(); // Duplicate the fd so C FILE* has its own ownership let dup_fd = unsafe { libc::dup(fd) }; if dup_fd < 0 { return std::ptr::null_mut(); } let file_ptr = unsafe { libc::fdopen(dup_fd, c"rb".as_ptr()) }; if file_ptr.is_null() { // fdopen failed, close the duplicated fd to avoid leak unsafe { libc::close(dup_fd); } } file_ptr } /// Convert a Rust File to a C FILE* for write operations. /// /// This function duplicates the file descriptor before passing it to fdopen, /// so the returned FILE* owns its own fd and must be closed with fclose(). #[cfg(unix)] unsafe fn file_to_c_file_write(file: &File) -> *mut libc::FILE { let fd = file.as_raw_fd(); // Duplicate the fd so C FILE* has its own ownership let dup_fd = unsafe { libc::dup(fd) }; if dup_fd < 0 { return std::ptr::null_mut(); } let file_ptr = unsafe { libc::fdopen(dup_fd, c"wb".as_ptr()) }; if file_ptr.is_null() { // fdopen failed, close the duplicated fd to avoid leak unsafe { libc::close(dup_fd); } } file_ptr } /// Convert a Rust File to a C FILE* for read operations (Windows). /// /// This function duplicates the file handle before passing it to the C runtime, /// so the returned FILE* owns its own handle and must be closed with fclose(). #[cfg(windows)] unsafe fn file_to_c_file_read(file: &File) -> *mut libc::FILE { use std::os::windows::io::AsRawHandle; let handle = file.as_raw_handle(); // Duplicate the handle so C FILE* has its own ownership let mut dup_handle: *mut std::ffi::c_void = std::ptr::null_mut(); let result = unsafe { windows_sys::Win32::Foundation::DuplicateHandle( windows_sys::Win32::System::Threading::GetCurrentProcess(), handle as *mut std::ffi::c_void, windows_sys::Win32::System::Threading::GetCurrentProcess(), &mut dup_handle, 0, 0, windows_sys::Win32::Foundation::DUPLICATE_SAME_ACCESS, ) }; if result == 0 { return std::ptr::null_mut(); } let fd = unsafe { libc::open_osfhandle(dup_handle as libc::intptr_t, libc::O_RDONLY) }; if fd < 0 { // open_osfhandle failed, close the duplicated handle to avoid leak unsafe { windows_sys::Win32::Foundation::CloseHandle(dup_handle); } return std::ptr::null_mut(); } let file_ptr = unsafe { libc::fdopen(fd, c"rb".as_ptr()) }; if file_ptr.is_null() { // fdopen failed, close the fd (which will close the handle) unsafe { libc::close(fd); } } file_ptr } /// Convert a Rust File to a C FILE* for write operations (Windows). /// /// This function duplicates the file handle before passing it to the C runtime, /// so the returned FILE* owns its own handle and must be closed with fclose(). #[cfg(windows)] unsafe fn file_to_c_file_write(file: &File) -> *mut libc::FILE { use std::os::windows::io::AsRawHandle; let handle = file.as_raw_handle(); // Duplicate the handle so C FILE* has its own ownership let mut dup_handle: *mut std::ffi::c_void = std::ptr::null_mut(); let result = unsafe { windows_sys::Win32::Foundation::DuplicateHandle( windows_sys::Win32::System::Threading::GetCurrentProcess(), handle as *mut std::ffi::c_void, windows_sys::Win32::System::Threading::GetCurrentProcess(), &mut dup_handle, 0, 0, windows_sys::Win32::Foundation::DUPLICATE_SAME_ACCESS, ) }; if result == 0 { return std::ptr::null_mut(); } let fd = unsafe { libc::open_osfhandle(dup_handle as libc::intptr_t, libc::O_WRONLY) }; if fd < 0 { // open_osfhandle failed, close the duplicated handle to avoid leak unsafe { windows_sys::Win32::Foundation::CloseHandle(dup_handle); } return std::ptr::null_mut(); } let file_ptr = unsafe { libc::fdopen(fd, c"wb".as_ptr()) }; if file_ptr.is_null() { // fdopen failed, close the fd (which will close the handle) unsafe { libc::close(fd); } } file_ptr } /// Compresses a file using multi-threaded streaming. /// /// This is the recommended method for compressing large files, as it: /// - Processes data in chunks without loading the entire file into memory /// - Uses multiple CPU cores for parallel compression /// - Provides better throughput for files larger than a few MB /// /// # Arguments /// /// * `input` - Path to the input file /// * `output` - Path to the output file /// * `level` - Compression level /// * `threads` - Number of threads (`None` = auto-detect CPU cores) /// * `checksum` - Optional checksum for data integrity (`None` = disabled for maximum performance) /// /// # Example /// /// ```rust,no_run /// use zxc::{compress_file, Level}; /// /// // Maximum performance (no checksum, auto threads) /// let bytes = compress_file("input.bin", "output.zxc", Level::Default, None, None)?; /// /// // With data integrity verification /// let bytes = compress_file("input.bin", "output.zxc", Level::Default, None, Some(true))?; /// /// // Custom configuration /// let bytes = compress_file("input.bin", "output.zxc", Level::Compact, Some(4), Some(true))?; /// # Ok::<(), zxc::StreamError>(()) /// ``` pub fn compress_file>( input: P, output: P, level: Level, threads: Option, checksum: Option, ) -> StreamResult { let f_in = File::open(input)?; let f_out = File::create(output)?; let n_threads = threads.unwrap_or(0) as i32; let checksum_enabled = if checksum.unwrap_or(false) { 1 } else { 0 }; unsafe { let c_in = file_to_c_file_read(&f_in); let c_out = file_to_c_file_write(&f_out); // Check for errors and cleanup on failure if c_in.is_null() { if !c_out.is_null() { libc::fclose(c_out); } return Err(StreamError::Io(io::Error::last_os_error())); } if c_out.is_null() { libc::fclose(c_in); return Err(StreamError::Io(io::Error::last_os_error())); } let result = zxc_sys::zxc_stream_compress( c_in, c_out, &zxc_sys::zxc_compress_opts_t { n_threads: n_threads, level: level as i32, checksum_enabled: checksum_enabled, ..Default::default() }, ); // Always close C FILE handles (they own duplicated fds) libc::fclose(c_in); libc::fclose(c_out); if result < 0 { Err(StreamError::BufferError(error_from_code(result))) } else { Ok(result as u64) } } } /// Decompresses a file using multi-threaded streaming. /// /// # Example /// /// ```rust,no_run /// use zxc::decompress_file; /// /// // Decompress with auto-detected thread count /// let bytes = decompress_file("compressed.zxc", "output.bin", None)?; /// println!("Decompressed {} bytes", bytes); /// # Ok::<(), zxc::StreamError>(()) /// ``` pub fn decompress_file>( input: P, output: P, threads: Option, ) -> StreamResult { let f_in = File::open(input)?; let f_out = File::create(output)?; let n_threads = threads.unwrap_or(0) as i32; let checksum_enabled = 1; // Default to verify unsafe { let c_in = file_to_c_file_read(&f_in); let c_out = file_to_c_file_write(&f_out); // Check for errors and cleanup on failure if c_in.is_null() { if !c_out.is_null() { libc::fclose(c_out); } return Err(StreamError::Io(io::Error::last_os_error())); } if c_out.is_null() { libc::fclose(c_in); return Err(StreamError::Io(io::Error::last_os_error())); } let result = zxc_sys::zxc_stream_decompress( c_in, c_out, &zxc_sys::zxc_decompress_opts_t { n_threads: n_threads, checksum_enabled: checksum_enabled, ..Default::default() }, ); // Always close C FILE handles (they own duplicated fds) libc::fclose(c_in); libc::fclose(c_out); if result < 0 { Err(StreamError::BufferError(error_from_code(result))) } else { Ok(result as u64) } } } /// Returns the decompressed size stored in a compressed file. /// /// This reads the file footer without performing decompression, /// useful for pre-allocating buffers or showing progress. /// /// # Example /// /// ```rust,no_run /// use zxc::file_decompressed_size; /// /// let size = file_decompressed_size("compressed.zxc")?; /// println!("Original size: {} bytes", size); /// # Ok::<(), zxc::StreamError>(()) /// ``` pub fn file_decompressed_size>(path: P) -> StreamResult { let f = File::open(path)?; unsafe { let c_file = file_to_c_file_read(&f); if c_file.is_null() { return Err(StreamError::Io(io::Error::last_os_error())); } let result = zxc_sys::zxc_stream_get_decompressed_size(c_file); if result < 0 { Err(StreamError::InvalidFile) } else { Ok(result as u64) } } } // ============================================================================= // Tests // ============================================================================= #[cfg(test)] mod tests { use super::*; #[test] fn test_roundtrip() { let data = b"Hello, ZXC! This is a test of the safe Rust wrapper."; let compressed = compress(data, Level::Default, None).unwrap(); let decompressed = decompress(&compressed).unwrap(); assert_eq!(&decompressed[..], &data[..]); } #[test] fn test_all_levels() { let data = b"Test data with repetition: CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"; for level in Level::all() { let compressed = compress(data, *level, None).unwrap(); let decompressed = decompress(&compressed).unwrap(); assert_eq!( &decompressed[..], &data[..], "Roundtrip failed at level {:?}", level ); } } #[test] fn test_empty() { let data: &[u8] = b""; let result = compress(data, Level::Default, None); assert!(result.is_err(), "Compressing empty data should return an error"); } #[test] fn test_checksum_options() { let data = b"Test with and without checksum"; // With checksum let opts_with = CompressOptions::with_level(Level::Default); let compressed = compress_with_options(data, &opts_with).unwrap(); let decompressed = decompress(&compressed).unwrap(); assert_eq!(&decompressed[..], &data[..]); // Without checksum let opts_without = CompressOptions::with_level(Level::Fast).without_checksum(); let compressed = compress_with_options(data, &opts_without).unwrap(); let decompressed = decompress_with_options(&compressed, &DecompressOptions::skip_checksum()).unwrap(); assert_eq!(&decompressed[..], &data[..]); } #[test] fn test_decompressed_size() { let data = b"Hello, world! Testing decompressed_size function."; let compressed = compress(data, Level::Default, None).unwrap(); let size = decompressed_size(&compressed); assert_eq!(size, Some(data.len() as u64)); } #[test] fn test_version() { let (major, minor, patch) = version(); let expected = format!("{}.{}.{}", major, minor, patch); assert_eq!(version_string(), expected); let cargo_version = env!("CARGO_PKG_VERSION"); assert_eq!(expected, cargo_version); } #[test] fn test_invalid_data() { let garbage = b"not valid zxc data"; let result = decompress(garbage); assert!(result.is_err()); } #[test] fn test_specific_error_codes() { // Test invalid magic - size detection should fail first and return InvalidData let invalid_magic = b"INVALID_DATA_NOT_ZXC"; let result = decompress(invalid_magic); assert!(result.is_err(), "Should fail on invalid data"); // Test truncated data - should produce specific error let data = b"Hello, world! Testing error codes with enough data to compress well."; let compressed = compress(data, Level::Default, None).unwrap(); let truncated = &compressed[..10]; // Too short to be valid match decompress(truncated) { Err(Error::InvalidData) | Err(Error::SrcTooSmall) | Err(Error::BadHeader) | Err(Error::BadMagic) => { // Any of these errors are acceptable for truncated data } other => panic!("Expected specific error for truncated data, got: {:?}", other), } } #[test] fn test_error_messages() { // Verify error messages are descriptive let errors = vec![ (Error::Memory, "memory allocation failed"), (Error::BadChecksum, "checksum verification failed"), (Error::CorruptData, "corrupted compressed data"), (Error::DstTooSmall, "destination buffer too small"), ]; for (error, expected_msg) in errors { let msg = error.to_string(); assert!( msg.contains(expected_msg), "Error message '{}' should contain '{}'", msg, expected_msg ); } } #[test] fn test_compress_to_buffer() { let data = b"Testing compress_to with pre-allocated buffer"; let mut output = vec![0u8; compress_bound(data.len()) as usize]; let size = compress_to(data, &mut output, &CompressOptions::default()).unwrap(); output.truncate(size); let decompressed = decompress(&output).unwrap(); assert_eq!(&decompressed[..], &data[..]); } #[test] fn test_large_data() { // 1 MB of random-ish but compressible data let data: Vec = (0..1024 * 1024) .map(|i| ((i % 256) ^ ((i / 256) % 256)) as u8) .collect(); let compressed = compress(&data, Level::Default, None).unwrap(); assert!(compressed.len() < data.len()); // Should compress let decompressed = decompress(&compressed).unwrap(); assert_eq!(decompressed, data); } } // ============================================================================= // Streaming API Tests // ============================================================================= #[cfg(test)] mod streaming_tests { use super::*; use std::fs; use std::io::Write; fn temp_path(name: &str) -> String { let dir_name = format!("zxc_test_{}", std::process::id()); // Try the system temp directory first let mut path = std::env::temp_dir(); path.push(&dir_name); // If we can't create the directory in the system temp, fall back // to a subdirectory relative to the current working directory. // This happens on some Windows CI runners where TEMP is missing or // points to a path the process cannot access. if fs::create_dir_all(&path).is_err() { path = std::env::current_dir().expect("cannot determine current directory"); path.push(&dir_name); fs::create_dir_all(&path).expect("failed to create temp directory in current dir"); } path.push(name); path.to_string_lossy().into_owned() } #[test] fn test_file_roundtrip() { let input_path = temp_path("roundtrip_input.bin"); let compressed_path = temp_path("roundtrip_compressed.zxc"); let output_path = temp_path("roundtrip_output.bin"); // Create test data let data: Vec = (0..64 * 1024) // 64 KB .map(|i| ((i % 256) ^ ((i / 256) % 256)) as u8) .collect(); // Write test file { let mut f = fs::File::create(&input_path).unwrap(); f.write_all(&data).unwrap(); } // Compress let compressed_size = compress_file(&input_path, &compressed_path, Level::Default, None, None).unwrap(); assert!(compressed_size > 0); // Decompress let decompressed_size = decompress_file(&compressed_path, &output_path, None).unwrap(); assert_eq!(decompressed_size, data.len() as u64); // Verify content let result = fs::read(&output_path).unwrap(); assert_eq!(result, data); // Cleanup let _ = fs::remove_file(&input_path); let _ = fs::remove_file(&compressed_path); let _ = fs::remove_file(&output_path); } #[test] fn test_file_decompressed_size_query() { let input_path = temp_path("size_input.bin"); let compressed_path = temp_path("size_compressed.zxc"); // Create test data let data: Vec = (0..128 * 1024) // 128 KB .map(|i| (i % 256) as u8) .collect(); // Write and compress { let mut f = fs::File::create(&input_path).unwrap(); f.write_all(&data).unwrap(); } compress_file(&input_path, &compressed_path, Level::Default, None, None).unwrap(); // Query size let reported_size = file_decompressed_size(&compressed_path).unwrap(); assert_eq!(reported_size, data.len() as u64); // Cleanup let _ = fs::remove_file(&input_path); let _ = fs::remove_file(&compressed_path); } #[test] fn test_file_all_levels() { let input_path = temp_path("levels_input.bin"); // Create test data let data: Vec = (0..32 * 1024) // 32 KB .map(|i| ((i % 256) ^ ((i / 256) % 256)) as u8) .collect(); { let mut f = fs::File::create(&input_path).unwrap(); f.write_all(&data).unwrap(); } for level in Level::all() { let compressed_path = temp_path(&format!("levels_{:?}.zxc", level)); let output_path = temp_path(&format!("levels_{:?}_out.bin", level)); // Compress with this level compress_file(&input_path, &compressed_path, *level, Some(2), None).unwrap(); // Decompress decompress_file(&compressed_path, &output_path, Some(2)).unwrap(); // Verify let result = fs::read(&output_path).unwrap(); assert_eq!(result, data, "Data mismatch at level {:?}", level); // Cleanup let _ = fs::remove_file(&compressed_path); let _ = fs::remove_file(&output_path); } let _ = fs::remove_file(&input_path); } #[test] fn test_file_multithreaded() { let input_path = temp_path("mt_input.bin"); let compressed_path = temp_path("mt_compressed.zxc"); let output_path = temp_path("mt_output.bin"); // Create larger test data (1 MB) let data: Vec = (0..1024 * 1024) .map(|i| ((i % 256) ^ ((i / 256) % 256)) as u8) .collect(); { let mut f = fs::File::create(&input_path).unwrap(); f.write_all(&data).unwrap(); } // Test with different thread counts for threads in [1, 2, 4] { // Compress compress_file(&input_path, &compressed_path, Level::Default, Some(threads), None).unwrap(); // Decompress let size = decompress_file(&compressed_path, &output_path, Some(threads)).unwrap(); assert_eq!(size, data.len() as u64); // Verify let result = fs::read(&output_path).unwrap(); assert_eq!(result, data, "Mismatch with {} threads", threads); } // Cleanup let _ = fs::remove_file(&input_path); let _ = fs::remove_file(&compressed_path); let _ = fs::remove_file(&output_path); } } zxc-0.9.1/zxc.pc.in000066400000000000000000000005141515574030100140630ustar00rootroot00000000000000prefix=@CMAKE_INSTALL_PREFIX@ exec_prefix=${prefix} libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ Name: zxc Description: High-performance asymmetric lossless compression License: BSD 3-Clause Version: @PROJECT_VERSION@ Libs: -L${libdir} -lzxc Libs.private: -pthread Cflags: -I${includedir}zxc-0.9.1/zxcConfig.cmake.in000066400000000000000000000002431515574030100156660ustar00rootroot00000000000000@PACKAGE_INIT@ include(CMakeFindDependencyMacro) find_dependency(Threads) include("${CMAKE_CURRENT_LIST_DIR}/zxc-targets.cmake") check_required_components(zxc)