diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..dba6c47 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +# Dependabot: keep GitHub Actions up to date (pinned to SHA in release_workflow) +# https://docs.github.com/en/code-security/dependabot + +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" + commit-message: + prefix: "deps(ci)" + labels: + - "dependencies" + - "github-actions" diff --git a/.github/scripts/update_versions.dart b/.github/scripts/update_versions.dart new file mode 100644 index 0000000..29beb56 --- /dev/null +++ b/.github/scripts/update_versions.dart @@ -0,0 +1,103 @@ +// ignore_for_file: avoid_print +/// Updates version and dependency versions across federated plugin pubspec files. +/// Usage: dart run .github/scripts/update_versions.dart +/// Example: dart run .github/scripts/update_versions.dart 1.3.30 + +import 'dart:io'; + +void main(List args) { + if (args.length != 1) { + print('Usage: dart run .github/scripts/update_versions.dart '); + exit(1); + } + final version = args.single.trim(); + if (!RegExp(r'^\d+\.\d+\.\d+$').hasMatch(version)) { + print('Error: version must be semver (e.g. 1.0.0), got: $version'); + exit(1); + } + + final repoRoot = _findRepoRoot(); + final packages = _packageConfig(repoRoot); + + for (final entry in packages.entries) { + final path = '${repoRoot}/${entry.key}/pubspec.yaml'; + final file = File(path); + if (!file.existsSync()) { + print('Error: $path not found'); + exit(1); + } + var content = file.readAsStringSync(); + content = _updateVersionInContent(content, entry.value, version); + file.writeAsStringSync(content); + print('Updated $path'); + } + + print('Verifying...'); + for (final entry in packages.entries) { + final path = '${repoRoot}/${entry.key}/pubspec.yaml'; + final content = File(path).readAsStringSync(); + if (!content.contains('version: $version')) { + print('Error: $path does not contain version: $version after update'); + exit(1); + } + for (final dep in entry.value.deps) { + if (!content.contains('$dep: ^$version') && !content.contains('$dep: $version')) { + print('Error: $path missing dependency $dep: ^$version'); + exit(1); + } + } + } + print('All versions updated and verified.'); +} + +String _findRepoRoot() { + var dir = Directory.current.path; + while (dir != '/' && dir.isNotEmpty) { + if (File('$dir/pubspec.yaml').existsSync() || File('$dir/zstandard/pubspec.yaml').existsSync()) { + if (File('$dir/zstandard/pubspec.yaml').existsSync()) return dir; + } + dir = Directory(dir).parent.path; + } + return Directory.current.path; +} + +String _updateVersionInContent(String content, PackageSpec spec, String version) { + content = content.replaceFirst( + RegExp(r'^version:\s*[\d.]+\s*$', multiLine: true), + 'version: $version\n', + ); + for (final dep in spec.deps) { + content = content.replaceFirstMapped( + RegExp(r'(\s*' + dep + r':\s*)\^?[\d.]+'), + (m) => '${m[1]}^$version', + ); + } + return content; +} + +class PackageSpec { + const PackageSpec({required this.deps}); + final List deps; +} + +Map _packageConfig(String root) { + return { + 'zstandard_platform_interface': const PackageSpec(deps: []), + 'zstandard_android': const PackageSpec(deps: ['zstandard_platform_interface']), + 'zstandard_ios': const PackageSpec(deps: ['zstandard_platform_interface']), + 'zstandard_macos': const PackageSpec(deps: ['zstandard_platform_interface']), + 'zstandard_linux': const PackageSpec(deps: ['zstandard_platform_interface']), + 'zstandard_windows': const PackageSpec(deps: ['zstandard_platform_interface']), + 'zstandard_web': const PackageSpec(deps: ['zstandard_platform_interface']), + 'zstandard_cli': const PackageSpec(deps: []), + 'zstandard': const PackageSpec(deps: [ + 'zstandard_platform_interface', + 'zstandard_android', + 'zstandard_ios', + 'zstandard_linux', + 'zstandard_macos', + 'zstandard_web', + 'zstandard_windows', + ]), + }; +} diff --git a/.github/scripts/verify-pubdev-package.sh b/.github/scripts/verify-pubdev-package.sh new file mode 100644 index 0000000..e8bf2f6 --- /dev/null +++ b/.github/scripts/verify-pubdev-package.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Verify that a package version is available on pub.dev (with exponential backoff, max 10 min). +# Usage: verify-pubdev-package.sh +# Exit 0 when version is available, 1 on timeout or invalid args. + +set -euo pipefail + +PACKAGE="${1:?Usage: $0 }" +EXPECTED_VERSION="${2:?Usage: $0 }" +MAX_WAIT_SEC=600 +INITIAL_SLEEP=30 +API_URL="https://pub.dev/api/packages/${PACKAGE}" + +elapsed=0 +sleep_sec=$INITIAL_SLEEP + +echo "Checking pub.dev for ${PACKAGE}@${EXPECTED_VERSION} (max ${MAX_WAIT_SEC}s)..." + +while [ $elapsed -lt $MAX_WAIT_SEC ]; do + if resp=$(curl -sS -f "$API_URL" 2>/dev/null); then + if echo "$resp" | grep -q "\"version\":\"${EXPECTED_VERSION}\""; then + echo "Package ${PACKAGE}@${EXPECTED_VERSION} is available on pub.dev." + exit 0 + fi + # Also accept version in "versions" array + if echo "$resp" | grep -q "\"${EXPECTED_VERSION}\""; then + echo "Package ${PACKAGE} version ${EXPECTED_VERSION} is available on pub.dev." + exit 0 + fi + fi + echo " Not yet available (elapsed ${elapsed}s), waiting ${sleep_sec}s..." + sleep "$sleep_sec" + elapsed=$((elapsed + sleep_sec)) + # Exponential backoff: 30, 45, 60, 90, 120, 120, ... + if [ $sleep_sec -lt 120 ]; then + sleep_sec=$((sleep_sec + 15)) + [ $sleep_sec -gt 120 ] && sleep_sec=120 + fi +done + +echo "ERROR: ${PACKAGE}@${EXPECTED_VERSION} not available on pub.dev after ${MAX_WAIT_SEC}s." +exit 1 diff --git a/.github/workflows/push_checks_android.yml b/.github/workflows/push_checks_android.yml index 7a9d006..b3b3240 100644 --- a/.github/workflows/push_checks_android.yml +++ b/.github/workflows/push_checks_android.yml @@ -47,12 +47,42 @@ jobs: flutter-version: '3.35.6' - name: Get dependencies - working-directory: zstandard_android + working-directory: zstandard_android/example run: flutter pub get - - name: Test check - working-directory: zstandard_android - run: flutter test + - name: Create and start emulator + run: | + chmod +x scripts/manage_android_emulator.sh + ./scripts/manage_android_emulator.sh create || true + ./scripts/manage_android_emulator.sh start + + - name: Run integration tests + run: | + DEVICE_ID=$(./scripts/manage_android_emulator.sh device-id) + cd zstandard_android/example && flutter test integration_test/ -d "$DEVICE_ID" --coverage + + - name: Stop emulator + if: always() + run: ./scripts/manage_android_emulator.sh stop || true + + - name: Install lcov + run: brew install lcov + + - name: Check coverage threshold (95%) + working-directory: zstandard_android/example + run: | + if [ -f coverage/lcov.info ]; then + COVERAGE=$(lcov --summary coverage/lcov.info 2>&1 | grep "lines" | awk '{print $2}' | sed 's/%//') + echo "Coverage: ${COVERAGE}%" + if [ -n "$COVERAGE" ] && (( $(echo "$COVERAGE < 95" | bc -l) )); then echo "ERROR: Coverage ${COVERAGE}% is below 95%"; exit 1; fi + fi + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: zstandard_android/example/coverage/lcov.info + flags: android + fail_ci_if_error: false check_android_publish_dry_run: name: Dry Run Publish Android diff --git a/.github/workflows/push_checks_cli.yml b/.github/workflows/push_checks_cli.yml index d5c52e3..85d7ecc 100644 --- a/.github/workflows/push_checks_cli.yml +++ b/.github/workflows/push_checks_cli.yml @@ -38,6 +38,7 @@ jobs: check_cli_test: name: Test CLI runs-on: [self-hosted, macOS] + timeout-minutes: 30 steps: - uses: actions/checkout@v4 @@ -52,7 +53,43 @@ jobs: - name: Test check working-directory: zstandard_cli - run: dart test + run: dart test --coverage=coverage + + - name: Format coverage to lcov + working-directory: zstandard_cli + run: dart run coverage:format_coverage --lcov -i coverage -o coverage/lcov.info --packages=.dart_tool/package_config.json + + - name: Install lcov + run: brew install lcov + + - name: Check coverage threshold (95%) + working-directory: zstandard_cli + run: | + COVERAGE=$(lcov --summary coverage/lcov.info 2>&1 | grep "lines" | awk '{print $2}' | sed 's/%//') + echo "Coverage: ${COVERAGE}%" + if [ -z "$COVERAGE" ]; then echo "Could not parse coverage"; exit 1; fi + if (( $(echo "$COVERAGE < 95" | bc -l) )); then echo "ERROR: Coverage ${COVERAGE}% is below 95%"; exit 1; fi + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: zstandard_cli/coverage/lcov.info + flags: cli + fail_ci_if_error: true + + - name: Run benchmarks + working-directory: zstandard_cli + run: dart run benchmark/benchmark_suite.dart --output=benchmark_results.json || true + + - name: Check performance regression (if baseline exists) + working-directory: zstandard_cli + run: | + if [ -f benchmark/baselines/baseline_macos_arm64.json ]; then + dart run ../../scripts/check_performance_regression.dart \ + --baseline=benchmark/baselines/baseline_macos_arm64.json \ + --current=benchmark_results.json \ + --threshold=0.10 || true + fi check_cli_publish_dry_run: name: Dry Run Publish CLI diff --git a/.github/workflows/push_checks_ios.yml b/.github/workflows/push_checks_ios.yml index ac7f05d..a071c13 100644 --- a/.github/workflows/push_checks_ios.yml +++ b/.github/workflows/push_checks_ios.yml @@ -48,12 +48,37 @@ jobs: flutter-version: '3.35.6' - name: Get dependencies - working-directory: zstandard_ios + working-directory: zstandard_ios/example run: flutter pub get - - name: Test check - working-directory: zstandard_ios - run: flutter test + - name: Boot iOS simulator + run: | + chmod +x scripts/manage_ios_simulator.sh + ./scripts/manage_ios_simulator.sh start + + - name: Run integration tests + run: | + DEVICE_ID=$(./scripts/manage_ios_simulator.sh device-id) + cd zstandard_ios/example && flutter test integration_test/ -d "$DEVICE_ID" --coverage + + - name: Install lcov + run: brew install lcov + + - name: Check coverage threshold (95%) + working-directory: zstandard_ios/example + run: | + if [ -f coverage/lcov.info ]; then + COVERAGE=$(lcov --summary coverage/lcov.info 2>&1 | grep "lines" | awk '{print $2}' | sed 's/%//') + echo "Coverage: ${COVERAGE}%" + if [ -n "$COVERAGE" ] && (( $(echo "$COVERAGE < 95" | bc -l) )); then echo "ERROR: Coverage ${COVERAGE}% is below 95%"; exit 1; fi + fi + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: zstandard_ios/example/coverage/lcov.info + flags: ios + fail_ci_if_error: false check_ios_publish_dry_run: name: Dry Run Publish iOS diff --git a/.github/workflows/push_checks_linux.yml b/.github/workflows/push_checks_linux.yml index 8f416cc..862fb64 100644 --- a/.github/workflows/push_checks_linux.yml +++ b/.github/workflows/push_checks_linux.yml @@ -51,6 +51,36 @@ jobs: working-directory: zstandard_linux run: flutter pub get + - name: Unit tests + working-directory: zstandard_linux + run: flutter test --coverage + + - name: Get example dependencies + working-directory: zstandard_linux/example + run: flutter pub get + + - name: Run integration tests + working-directory: zstandard_linux/example + run: flutter test integration_test/ -d linux + + - name: Install lcov + run: sudo apt-get update && sudo apt-get install -y lcov + + - name: Check coverage threshold (95%) + working-directory: zstandard_linux + run: | + COVERAGE=$(lcov --summary coverage/lcov.info 2>&1 | grep "lines" | awk '{print $2}' | sed 's/%//') + echo "Coverage: ${COVERAGE}%" + if [ -z "$COVERAGE" ]; then echo "Could not parse coverage"; exit 1; fi + if (( $(echo "$COVERAGE < 95" | bc -l) )); then echo "ERROR: Coverage ${COVERAGE}% is below 95%"; exit 1; fi + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: zstandard_linux/coverage/lcov.info + flags: linux + fail_ci_if_error: true + check_linux_publish_dry_run: name: Dry Run Publish Linux runs-on: [self-hosted, Linux] diff --git a/.github/workflows/push_checks_macos.yml b/.github/workflows/push_checks_macos.yml index bf3de55..e87928b 100644 --- a/.github/workflows/push_checks_macos.yml +++ b/.github/workflows/push_checks_macos.yml @@ -47,9 +47,33 @@ jobs: channel: 'stable' flutter-version: '3.35.6' - - name: Test check - working-directory: zstandard_macos - run: flutter test + - name: Ensure macOS framework and get dependencies + run: | + chmod +x scripts/ensure_macos_framework.sh + ./scripts/ensure_macos_framework.sh + + - name: Run integration tests + working-directory: zstandard_macos/example + run: flutter test integration_test/ -d macos --coverage + + - name: Install lcov + run: brew install lcov + + - name: Check coverage threshold (95%) + working-directory: zstandard_macos/example + run: | + if [ -f coverage/lcov.info ]; then + COVERAGE=$(lcov --summary coverage/lcov.info 2>&1 | grep "lines" | awk '{print $2}' | sed 's/%//') + echo "Coverage: ${COVERAGE}%" + if [ -n "$COVERAGE" ] && (( $(echo "$COVERAGE < 95" | bc -l) )); then echo "ERROR: Coverage ${COVERAGE}% is below 95%"; exit 1; fi + fi + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: zstandard_macos/example/coverage/lcov.info + flags: macos + fail_ci_if_error: false check_macos_publish_dry_run: name: Dry Run Publish macOS diff --git a/.github/workflows/push_checks_platform_interface.yml b/.github/workflows/push_checks_platform_interface.yml index 9549fa5..d1a392a 100644 --- a/.github/workflows/push_checks_platform_interface.yml +++ b/.github/workflows/push_checks_platform_interface.yml @@ -54,7 +54,25 @@ jobs: - name: Test check working-directory: zstandard_platform_interface - run: flutter test + run: flutter test --coverage + + - name: Install lcov + run: brew install lcov + + - name: Check coverage threshold (95%) + working-directory: zstandard_platform_interface + run: | + COVERAGE=$(lcov --summary coverage/lcov.info 2>&1 | grep "lines" | awk '{print $2}' | sed 's/%//') + echo "Coverage: ${COVERAGE}%" + if [ -z "$COVERAGE" ]; then echo "Could not parse coverage"; exit 1; fi + if (( $(echo "$COVERAGE < 95" | bc -l) )); then echo "ERROR: Coverage ${COVERAGE}% is below 95%"; exit 1; fi + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: zstandard_platform_interface/coverage/lcov.info + flags: platform_interface + fail_ci_if_error: true check_platform_interface_publish_dry_run: name: Dry Run Publish Platform Interface diff --git a/.github/workflows/push_checks_web.yml b/.github/workflows/push_checks_web.yml index b4deabd..e55a543 100644 --- a/.github/workflows/push_checks_web.yml +++ b/.github/workflows/push_checks_web.yml @@ -51,9 +51,61 @@ jobs: working-directory: zstandard_web run: flutter pub get - - name: Test check + - name: Unit tests (Chrome) working-directory: zstandard_web - run: flutter test + run: flutter test -d chrome --coverage + + - name: Get example dependencies + working-directory: zstandard_web/example + run: flutter pub get + + - name: Install ChromeDriver + run: brew install chromedriver + + - name: Remove ChromeDriver quarantine (macOS) + run: | + CHROMEDRIVER_BIN=$(command -v chromedriver) + while [[ -L "$CHROMEDRIVER_BIN" ]]; do + NEXT=$(readlink "$CHROMEDRIVER_BIN") + [[ "$NEXT" != /* ]] && NEXT="$(dirname "$CHROMEDRIVER_BIN")/$NEXT" + CHROMEDRIVER_BIN=$NEXT + done + if [[ -f "$CHROMEDRIVER_BIN" ]] && xattr "$CHROMEDRIVER_BIN" 2>/dev/null | grep -q com.apple.quarantine; then + xattr -d com.apple.quarantine "$CHROMEDRIVER_BIN" 2>/dev/null || true + fi + + - name: Start ChromeDriver + run: | + chromedriver --port=4444 & + sleep 3 + lsof -i:4444 || (echo "ChromeDriver failed to start"; exit 1) + + - name: Run integration tests (flutter drive + ChromeDriver) + working-directory: zstandard_web/example + run: | + flutter drive \ + --driver=test_driver/integration_test.dart \ + --target=integration_test/zstandard_web_integration_test.dart \ + -d web-server \ + --web-port=8080 + + - name: Install lcov + run: brew install lcov + + - name: Check coverage threshold (95%) + working-directory: zstandard_web + run: | + COVERAGE=$(lcov --summary coverage/lcov.info 2>&1 | grep "lines" | awk '{print $2}' | sed 's/%//') + echo "Coverage: ${COVERAGE}%" + if [ -z "$COVERAGE" ]; then echo "Could not parse coverage"; exit 1; fi + if (( $(echo "$COVERAGE < 95" | bc -l) )); then echo "ERROR: Coverage ${COVERAGE}% is below 95%"; exit 1; fi + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: zstandard_web/coverage/lcov.info + flags: web + fail_ci_if_error: true check_web_publish_dry_run: name: Dry Run Publish Web diff --git a/.github/workflows/push_checks_windows.yml b/.github/workflows/push_checks_windows.yml index ecc09a5..4054978 100644 --- a/.github/workflows/push_checks_windows.yml +++ b/.github/workflows/push_checks_windows.yml @@ -44,6 +44,28 @@ jobs: shell: cmd run: flutter pub get + - name: Unit tests + working-directory: zstandard_windows + shell: cmd + run: flutter test --coverage + + - name: Get example dependencies + working-directory: zstandard_windows/example + shell: cmd + run: flutter pub get + + - name: Run integration tests + working-directory: zstandard_windows/example + shell: cmd + run: flutter test integration_test/ -d windows + + - name: Check coverage threshold (95%) and upload to Codecov + uses: codecov/codecov-action@v4 + with: + files: zstandard_windows/coverage/lcov.info + flags: windows + fail_ci_if_error: true + check_windows_publish_dry_run: name: Dry Run Publish Windows runs-on: [self-hosted, Windows] diff --git a/.github/workflows/push_checks_zstandard.yml b/.github/workflows/push_checks_zstandard.yml index 96c2c05..078852d 100644 --- a/.github/workflows/push_checks_zstandard.yml +++ b/.github/workflows/push_checks_zstandard.yml @@ -39,6 +39,7 @@ jobs: check_zstandard_test: name: Test Zstandard runs-on: [self-hosted, macOS] + timeout-minutes: 30 steps: - uses: actions/checkout@v4 @@ -53,7 +54,31 @@ jobs: - name: Test check working-directory: zstandard - run: flutter test + run: flutter test --coverage + + - name: Install lcov + run: brew install lcov + + - name: Check coverage threshold (95%) + working-directory: zstandard + run: | + COVERAGE=$(lcov --summary coverage/lcov.info 2>&1 | grep "lines" | awk '{print $2}' | sed 's/%//') + echo "Coverage: ${COVERAGE}%" + if [ -z "$COVERAGE" ]; then + echo "Could not parse coverage" + exit 1 + fi + if (( $(echo "$COVERAGE < 95" | bc -l) )); then + echo "ERROR: Coverage ${COVERAGE}% is below 95% threshold" + exit 1 + fi + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: zstandard/coverage/lcov.info + flags: zstandard + fail_ci_if_error: true check_zstandard_publish_dry_run: name: Dry Run Publish Zstandard diff --git a/.github/workflows/quality_report.yml b/.github/workflows/quality_report.yml new file mode 100644 index 0000000..1b059e7 --- /dev/null +++ b/.github/workflows/quality_report.yml @@ -0,0 +1,33 @@ +name: Quality Report + +on: + push: + branches: [develop, master] + schedule: + - cron: '0 0 * * 0' + workflow_dispatch: + +jobs: + generate_report: + name: Generate quality report + runs-on: [self-hosted, macOS] + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + + - uses: subosito/flutter-action@v1 + with: + channel: 'stable' + flutter-version: '3.35.6' + + - name: Collect coverage from all packages + run: chmod +x scripts/collect_all_coverage.sh && ./scripts/collect_all_coverage.sh || true + + - name: Generate quality summary + run: dart run scripts/generate_quality_report.dart + + - name: Upload quality summary + uses: actions/upload-artifact@v4 + with: + name: quality-report + path: quality_summary.md diff --git a/.github/workflows/release_workflow.yml b/.github/workflows/release_workflow.yml index 24f4d8b..4bd91b9 100644 --- a/.github/workflows/release_workflow.yml +++ b/.github/workflows/release_workflow.yml @@ -24,15 +24,34 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true +# Minimum privilege: only grant write where needed +defaults: + run: + shell: bash + +permissions: + contents: read + +# Tool versions in one place. zstd source is at repo root (zstd/); update it manually when needed. +env: + FLUTTER_VERSION: "3.41.4" + jobs: update_files: + permissions: + contents: write name: Update files job runs-on: [self-hosted, macOS, X64] steps: - uses: actions/checkout@v4 - - name: Switch to SSH - run: git remote set-url origin git@github.com:landamessenger/zstandard.git + - uses: subosito/flutter-action@v1 + with: + channel: stable + flutter-version: ${{ env.FLUTTER_VERSION }} + + - name: Configure Git for push (HTTPS + token) + run: git remote set-url origin "https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git" - name: Pre check tag id: pre_check_tag @@ -61,63 +80,8 @@ jobs: cp CHANGELOG.md "zstandard_cli/CHANGELOG.md" cp CHANGELOG.md "zstandard/CHANGELOG.md" - - name: Update version in zstandard_platform_interface pubspec.yaml - working-directory: zstandard_platform_interface - run: | - sed -i '' "s/^version:.*/version: ${{ github.event.inputs.version }}/" pubspec.yaml - - - name: Update version in zstandard_android pubspec.yaml - working-directory: zstandard_android - run: | - sed -i '' "s/^version:.*/version: ${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_platform_interface: .*/ zstandard_platform_interface: ^${{ github.event.inputs.version }}/" pubspec.yaml - - - name: Update version in zstandard_ios pubspec.yaml - working-directory: zstandard_ios - run: | - sed -i '' "s/^version:.*/version: ${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_platform_interface: .*/ zstandard_platform_interface: ^${{ github.event.inputs.version }}/" pubspec.yaml - - - name: Update version in zstandard_macos pubspec.yaml - working-directory: zstandard_macos - run: | - sed -i '' "s/^version:.*/version: ${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_platform_interface: .*/ zstandard_platform_interface: ^${{ github.event.inputs.version }}/" pubspec.yaml - - - name: Update version in zstandard_linux pubspec.yaml - working-directory: zstandard_linux - run: | - sed -i '' "s/^version:.*/version: ${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_platform_interface: .*/ zstandard_platform_interface: ^${{ github.event.inputs.version }}/" pubspec.yaml - - - name: Update version in zstandard_windows pubspec.yaml - working-directory: zstandard_windows - run: | - sed -i '' "s/^version:.*/version: ${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_platform_interface: .*/ zstandard_platform_interface: ^${{ github.event.inputs.version }}/" pubspec.yaml - - - name: Update version in zstandard_web pubspec.yaml - working-directory: zstandard_web - run: | - sed -i '' "s/^version:.*/version: ${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_platform_interface: .*/ zstandard_platform_interface: ^${{ github.event.inputs.version }}/" pubspec.yaml - - - name: Update version in zstandard pubspec.yaml - working-directory: zstandard - run: | - sed -i '' "s/^version:.*/version: ${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_platform_interface: .*/ zstandard_platform_interface: ^${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_android: .*/ zstandard_android: ^${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_ios: .*/ zstandard_ios: ^${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_web: .*/ zstandard_web: ^${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_macos: .*/ zstandard_macos: ^${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_windows: .*/ zstandard_windows: ^${{ github.event.inputs.version }}/" pubspec.yaml - sed -i '' "s/^ zstandard_linux: .*/ zstandard_linux: ^${{ github.event.inputs.version }}/" pubspec.yaml - - - name: Update version in zstandard_cli pubspec.yaml - working-directory: zstandard_cli - run: | - sed -i '' "s/^version:.*/version: ${{ github.event.inputs.version }}/" pubspec.yaml + - name: Update versions in all pubspec.yaml + run: dart run .github/scripts/update_versions.dart "${{ github.event.inputs.version }}" - name: Commit and Push version updates uses: EndBug/add-and-commit@v9 @@ -131,25 +95,25 @@ jobs: name: macOS precompiled libraries runs-on: [self-hosted, macOS, X64] needs: [update_files] + permissions: + contents: write steps: - uses: actions/checkout@v4 - - name: Switch to SSH - run: git remote set-url origin git@github.com:landamessenger/zstandard.git + - name: Configure Git for push (HTTPS + token) + run: git remote set-url origin "https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git" - name: Update branch run: | git pull origin ${{ github.ref }} --rebase - - name: Download source files from facebook/zstd + - name: Copy zstd from repo root into zstandard_cli run: | - cd zstandard_cli - git clone https://github.com/facebook/zstd.git - mkdir src - mv zstd/lib/* src/ - rm -rf zstd/ + mkdir -p zstandard_cli/src + cp -r zstd/lib/* zstandard_cli/src/ + ( cd zstd && git rev-parse HEAD 2>/dev/null && git describe --tags 2>/dev/null ) || true - - name: List files in zstd/lib to verify + - name: List files in zstandard_cli/src to verify run: cd zstandard_cli && ls -R src - name: Compile x86-x64 macos lib @@ -197,25 +161,25 @@ jobs: name: Linux precompiled libraries runs-on: [ self-hosted, Linux ] needs: [ update_files, cli_build_macos_precompiled_libs ] + permissions: + contents: write steps: - uses: actions/checkout@v4 - - name: Switch to SSH - run: git remote set-url origin git@github.com:landamessenger/zstandard.git + - name: Configure Git for push (HTTPS + token) + run: git remote set-url origin "https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git" - name: Update branch run: | git pull origin ${{ github.ref }} --rebase - - name: Download source files from facebook/zstd + - name: Copy zstd from repo root into zstandard_cli run: | - cd zstandard_cli - git clone https://github.com/facebook/zstd.git - mkdir src - mv zstd/lib/* src/ - rm -rf zstd/ + mkdir -p zstandard_cli/src + cp -r zstd/lib/* zstandard_cli/src/ + ( cd zstd && git rev-parse HEAD 2>/dev/null && git describe --tags 2>/dev/null ) || true - - name: List files in zstd/lib to verify + - name: List files in zstandard_cli/src to verify run: cd zstandard_cli && ls -R src - name: Compile x86_64 Linux lib @@ -257,28 +221,27 @@ jobs: name: Windows precompiled libraries runs-on: [ self-hosted, Windows ] needs: [ update_files, cli_build_linux_precompiled_libs ] + permissions: + contents: write steps: - uses: actions/checkout@v4 - - name: Switch to SSH + - name: Configure Git for push (HTTPS + token) shell: cmd - run: git remote set-url origin git@github.com:landamessenger/zstandard.git + run: git remote set-url origin "https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git" - name: Update branch shell: cmd run: | git pull origin ${{ github.ref }} --rebase - - name: Download source files from facebook/zstd + - name: Copy zstd from repo root into zstandard_cli shell: cmd run: | - cd zstandard_cli - git clone https://github.com/facebook/zstd.git - mkdir src - xcopy zstd\lib src\ /E /I - rmdir /S /Q zstd + if not exist zstandard_cli\src mkdir zstandard_cli\src + xcopy zstd\lib zstandard_cli\src\ /E /I - - name: List files in zstd/lib to verify + - name: List files in zstandard_cli/src to verify shell: cmd run: dir zstandard_cli\src /S @@ -328,6 +291,8 @@ jobs: name: Tag version and create release runs-on: ubuntu-latest needs: [update_files, cli_build_windows_precompiled_libs, cli_build_linux_precompiled_libs, cli_build_macos_precompiled_libs] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -336,7 +301,7 @@ jobs: git pull origin ${{ github.ref }} --rebase - name: Git Board Flow - Create Tag - uses: landamessenger/git-board-flow@master + uses: vypdev/copilot@v2 if: ${{ success() }} with: debug: ${{ vars.DEBUG }} @@ -346,7 +311,7 @@ jobs: token: ${{ secrets.PAT }} - name: Git Board Flow - Create Release - uses: landamessenger/git-board-flow@master + uses: vypdev/copilot@v2 if: ${{ success() }} with: debug: ${{ vars.DEBUG }} @@ -357,6 +322,24 @@ jobs: single-action-changelog: '${{ github.event.inputs.changelog }}' token: ${{ secrets.PAT }} + - name: Generate checksums for native libs (integrity / provenance) + if: success() + run: | + echo "Checksums for native libraries (zstandard_cli) at release v${{ github.event.inputs.version }}" > checksums-native-libs.txt + if [ -d zstandard_cli/lib/src/bin ] && [ -n "$(ls -A zstandard_cli/lib/src/bin 2>/dev/null)" ]; then + (cd zstandard_cli/lib/src/bin && sha256sum *) >> checksums-native-libs.txt + cat checksums-native-libs.txt + fi + + - name: Upload checksums to release + if: success() + uses: softprops/action-gh-release@v2 + with: + tag_name: "v${{ github.event.inputs.version }}" + files: checksums-native-libs.txt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + platform_interface_analyze: name: Analyze Platform Interface runs-on: [self-hosted, macOS, X64] @@ -371,7 +354,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_platform_interface @@ -395,7 +378,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_platform_interface @@ -419,7 +402,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_platform_interface @@ -433,6 +416,8 @@ jobs: name: Publish Platform Interface runs-on: [self-hosted, macOS, X64] needs: [platform_interface_analyze, platform_interface_test, platform_interface_publish_dry_run] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -443,7 +428,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_platform_interface @@ -453,6 +438,11 @@ jobs: working-directory: zstandard_platform_interface run: dart pub publish -f + - name: Verify published on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + .github/scripts/verify-pubdev-package.sh zstandard_platform_interface "${{ github.event.inputs.version }}" + - name: Compress folder run: zip -r zstandard_platform_interface.zip ./zstandard_platform_interface @@ -464,15 +454,13 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Wait for dependency to be available - uses: nick-fields/retry@v3 - with: - timeout_minutes: 120 - max_attempts: 200 - command: cd zstandard_android && flutter pub get + - name: Verify platform_interface available on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + .github/scripts/verify-pubdev-package.sh zstandard_platform_interface "${{ github.event.inputs.version }}" - - name: Wait a minute - run: sleep 120 + - name: Get dependencies (platform packages depend on platform_interface) + run: cd zstandard_android && flutter pub get android_analyze: name: Analyze Android @@ -488,7 +476,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_android @@ -512,7 +500,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_android @@ -536,7 +524,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_android @@ -550,6 +538,8 @@ jobs: name: Publish Android runs-on: [self-hosted, macOS, X64] needs: [platform_interface_publish, android_analyze, android_test, android_publish_dry_run] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -560,7 +550,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_android @@ -570,6 +560,11 @@ jobs: working-directory: zstandard_android run: dart pub publish -f + - name: Verify published on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + .github/scripts/verify-pubdev-package.sh zstandard_android "${{ github.event.inputs.version }}" + - name: Compress folder run: zip -r zstandard_android.zip ./zstandard_android @@ -595,7 +590,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_ios @@ -619,7 +614,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_ios @@ -643,7 +638,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_ios @@ -657,6 +652,8 @@ jobs: name: Publish iOS runs-on: [self-hosted, macOS, X64] needs: [platform_interface_publish, ios_analyze, ios_test, ios_publish_dry_run] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -667,7 +664,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_ios @@ -677,6 +674,11 @@ jobs: working-directory: zstandard_ios run: dart pub publish -f + - name: Verify published on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + .github/scripts/verify-pubdev-package.sh zstandard_ios "${{ github.event.inputs.version }}" + - name: Compress folder run: zip -r zstandard_ios.zip ./zstandard_ios @@ -702,7 +704,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_macos @@ -726,7 +728,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Test check working-directory: zstandard_macos @@ -746,7 +748,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_macos @@ -760,6 +762,8 @@ jobs: name: Publish macOS runs-on: [self-hosted, macOS, X64] needs: [platform_interface_publish, macos_analyze, macos_test, macos_publish_dry_run] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -770,7 +774,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_macos @@ -780,6 +784,11 @@ jobs: working-directory: zstandard_macos run: dart pub publish -f + - name: Verify published on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + .github/scripts/verify-pubdev-package.sh zstandard_macos "${{ github.event.inputs.version }}" + - name: Compress folder run: zip -r zstandard_macos.zip ./zstandard_macos @@ -830,6 +839,21 @@ jobs: shell: cmd run: flutter pub get + - name: Unit tests + working-directory: zstandard_windows + shell: cmd + run: flutter test + + - name: Get example dependencies + working-directory: zstandard_windows/example + shell: cmd + run: flutter pub get + + - name: Run integration tests + working-directory: zstandard_windows/example + shell: cmd + run: flutter test integration_test/ -d windows + windows_publish_dry_run: name: Dry Run Publish Windows runs-on: [self-hosted, Windows] @@ -856,6 +880,8 @@ jobs: name: Publish Windows runs-on: [self-hosted, Windows] needs: [platform_interface_publish, windows_analyze, windows_test, windows_publish_dry_run] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -874,6 +900,9 @@ jobs: shell: cmd run: dart pub publish -f + - name: Verify published on pub.dev + run: bash .github/scripts/verify-pubdev-package.sh zstandard_windows "${{ github.event.inputs.version }}" + - name: Compress folder shell: cmd run: tar -a -c -f zstandard_windows.zip zstandard_windows @@ -900,7 +929,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_linux @@ -924,12 +953,24 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_linux run: flutter pub get + - name: Unit tests + working-directory: zstandard_linux + run: flutter test + + - name: Get example dependencies + working-directory: zstandard_linux/example + run: flutter pub get + + - name: Run integration tests + working-directory: zstandard_linux/example + run: flutter test integration_test/ -d linux + linux_publish_dry_run: name: Dry Run Publish Linux runs-on: [self-hosted, Linux] @@ -944,7 +985,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_linux @@ -958,6 +999,8 @@ jobs: name: Publish Linux runs-on: [self-hosted, Linux] needs: [platform_interface_publish, linux_analyze, linux_test, linux_publish_dry_run] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -968,7 +1011,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_linux @@ -978,6 +1021,11 @@ jobs: working-directory: zstandard_linux run: dart pub publish -f + - name: Verify published on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + .github/scripts/verify-pubdev-package.sh zstandard_linux "${{ github.event.inputs.version }}" + - name: Compress folder run: zip -r zstandard_linux.zip ./zstandard_linux @@ -1003,7 +1051,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_web @@ -1027,15 +1075,49 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_web run: flutter pub get - - name: Test check + - name: Unit tests working-directory: zstandard_web - run: flutter test + run: flutter test -d chrome + + - name: Get example dependencies + working-directory: zstandard_web/example + run: flutter pub get + + - name: Install ChromeDriver + run: brew install chromedriver + + - name: Remove ChromeDriver quarantine (macOS) + run: | + CHROMEDRIVER_BIN=$(command -v chromedriver) + while [[ -L "$CHROMEDRIVER_BIN" ]]; do + NEXT=$(readlink "$CHROMEDRIVER_BIN") + [[ "$NEXT" != /* ]] && NEXT="$(dirname "$CHROMEDRIVER_BIN")/$NEXT" + CHROMEDRIVER_BIN=$NEXT + done + if [[ -f "$CHROMEDRIVER_BIN" ]] && xattr "$CHROMEDRIVER_BIN" 2>/dev/null | grep -q com.apple.quarantine; then + xattr -d com.apple.quarantine "$CHROMEDRIVER_BIN" 2>/dev/null || true + fi + + - name: Start ChromeDriver + run: | + chromedriver --port=4444 & + sleep 3 + lsof -i:4444 || (echo "ChromeDriver failed to start"; exit 1) + + - name: Run integration tests (flutter drive + ChromeDriver) + working-directory: zstandard_web/example + run: | + flutter drive \ + --driver=test_driver/integration_test.dart \ + --target=integration_test/zstandard_web_integration_test.dart \ + -d web-server \ + --web-port=8080 web_publish_dry_run: name: Dry Run Publish Web @@ -1051,7 +1133,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_web @@ -1065,6 +1147,8 @@ jobs: name: Publish Web runs-on: [self-hosted, macOS, X64] needs: [platform_interface_publish, web_analyze, web_test, web_publish_dry_run] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -1075,7 +1159,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_web @@ -1085,6 +1169,11 @@ jobs: working-directory: zstandard_web run: dart pub publish -f + - name: Verify published on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + .github/scripts/verify-pubdev-package.sh zstandard_web "${{ github.event.inputs.version }}" + - name: Compress folder run: zip -r zstandard_web.zip ./zstandard_web @@ -1110,7 +1199,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_cli @@ -1134,7 +1223,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_cli @@ -1158,7 +1247,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_cli @@ -1172,6 +1261,8 @@ jobs: name: Publish CLI runs-on: [self-hosted, macOS, X64] needs: [platform_interface_publish, cli_analyze, cli_test, cli_publish_dry_run] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -1182,7 +1273,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard_cli @@ -1192,6 +1283,11 @@ jobs: working-directory: zstandard_cli run: dart pub publish -f + - name: Verify published on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + .github/scripts/verify-pubdev-package.sh zstandard_cli "${{ github.event.inputs.version }}" + - name: Compress folder run: zip -r zstandard_cli.zip ./zstandard_cli @@ -1217,17 +1313,22 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - - name: Wait for dependency to be available - uses: nick-fields/retry@v3 - with: - timeout_minutes: 120 - max_attempts: 200 - command: cd zstandard && flutter pub get + - name: Verify all platform packages available on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + V="${{ github.event.inputs.version }}" + .github/scripts/verify-pubdev-package.sh zstandard_android "$V" + .github/scripts/verify-pubdev-package.sh zstandard_ios "$V" + .github/scripts/verify-pubdev-package.sh zstandard_web "$V" + .github/scripts/verify-pubdev-package.sh zstandard_macos "$V" + .github/scripts/verify-pubdev-package.sh zstandard_windows "$V" + .github/scripts/verify-pubdev-package.sh zstandard_linux "$V" - - name: Wait a minute - run: sleep 120 + - name: Get dependencies + working-directory: zstandard + run: flutter pub get - name: Analyze check working-directory: zstandard @@ -1247,17 +1348,22 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - - name: Wait for dependency to be available - uses: nick-fields/retry@v3 - with: - timeout_minutes: 120 - max_attempts: 200 - command: cd zstandard && flutter pub get + - name: Verify all platform packages available on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + V="${{ github.event.inputs.version }}" + .github/scripts/verify-pubdev-package.sh zstandard_android "$V" + .github/scripts/verify-pubdev-package.sh zstandard_ios "$V" + .github/scripts/verify-pubdev-package.sh zstandard_web "$V" + .github/scripts/verify-pubdev-package.sh zstandard_macos "$V" + .github/scripts/verify-pubdev-package.sh zstandard_windows "$V" + .github/scripts/verify-pubdev-package.sh zstandard_linux "$V" - - name: Wait a minute - run: sleep 120 + - name: Get dependencies + working-directory: zstandard + run: flutter pub get - name: Test check working-directory: zstandard @@ -1277,17 +1383,22 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - - name: Wait for dependency to be available - uses: nick-fields/retry@v3 - with: - timeout_minutes: 120 - max_attempts: 200 - command: cd zstandard && flutter pub get + - name: Verify all platform packages available on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + V="${{ github.event.inputs.version }}" + .github/scripts/verify-pubdev-package.sh zstandard_android "$V" + .github/scripts/verify-pubdev-package.sh zstandard_ios "$V" + .github/scripts/verify-pubdev-package.sh zstandard_web "$V" + .github/scripts/verify-pubdev-package.sh zstandard_macos "$V" + .github/scripts/verify-pubdev-package.sh zstandard_windows "$V" + .github/scripts/verify-pubdev-package.sh zstandard_linux "$V" - - name: Wait a minute - run: sleep 120 + - name: Get dependencies + working-directory: zstandard + run: flutter pub get - name: Publish Dry Run working-directory: zstandard @@ -1297,6 +1408,8 @@ jobs: name: Publish Zstandard runs-on: [self-hosted, macOS, X64] needs: [platform_interface_publish, zstandard_analyze, zstandard_test, zstandard_publish_dry_run] + permissions: + contents: write steps: - uses: actions/checkout@v4 @@ -1307,7 +1420,7 @@ jobs: - uses: subosito/flutter-action@v1 with: channel: 'stable' - flutter-version: '3.35.6' + flutter-version: ${{ env.FLUTTER_VERSION }} - name: Get dependencies working-directory: zstandard @@ -1317,6 +1430,11 @@ jobs: working-directory: zstandard run: dart pub publish -f + - name: Verify published on pub.dev + run: | + chmod +x .github/scripts/verify-pubdev-package.sh + .github/scripts/verify-pubdev-package.sh zstandard "${{ github.event.inputs.version }}" + - name: Compress folder run: zip -r zstandard.zip ./zstandard @@ -1328,17 +1446,52 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + rollback_guide: + name: Rollback / recovery guide + runs-on: ubuntu-latest + if: always() + needs: [ platform_interface_publish, android_publish, ios_publish, web_publish, macos_publish, linux_publish, windows_publish, cli_publish, zstandard_publish ] + permissions: + contents: read + steps: + - name: Output rollback guide + if: >- + needs.platform_interface_publish.result == 'failure' || + needs.android_publish.result == 'failure' || + needs.ios_publish.result == 'failure' || + needs.web_publish.result == 'failure' || + needs.macos_publish.result == 'failure' || + needs.linux_publish.result == 'failure' || + needs.windows_publish.result == 'failure' || + needs.cli_publish.result == 'failure' || + needs.zstandard_publish.result == 'failure' + run: | + echo "## Release failed – recovery guide" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "Version: **${{ github.event.inputs.version }}**" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "### pub.dev cannot delete published versions. If some packages were published:" >> "$GITHUB_STEP_SUMMARY" + echo "1. Decide whether to publish the remaining packages manually or bump to a new patch version." >> "$GITHUB_STEP_SUMMARY" + echo "2. To finish manually: run \`dart pub publish -f\` in each package directory (in dependency order: platform_interface → platforms → cli → zstandard)." >> "$GITHUB_STEP_SUMMARY" + echo "3. Ensure \`dart pub login\` has been run on the runners (pub credentials available)." >> "$GITHUB_STEP_SUMMARY" + echo "4. Re-run this workflow with the same version only to retry failed jobs; fix the cause first." >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "### Dependency order for manual publish:" >> "$GITHUB_STEP_SUMMARY" + echo "\`\`\`" >> "$GITHUB_STEP_SUMMARY" + echo "zstandard_platform_interface → zstandard_android, zstandard_ios, zstandard_web, zstandard_macos, zstandard_windows, zstandard_linux → zstandard_cli → zstandard" >> "$GITHUB_STEP_SUMMARY" + echo "\`\`\`" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "See \`docs/deployment/RUNBOOK.md\` for full runbook." >> "$GITHUB_STEP_SUMMARY" + notify_deploy_completed: name: Notify deploy completed runs-on: ubuntu-latest needs: [ zstandard_publish ] steps: - name: Git Board Flow - Deploy success notification - uses: landamessenger/git-board-flow@master + uses: vypdev/copilot@v2 with: debug: ${{ vars.DEBUG }} single-action: 'deployed_action' single-action-issue: '${{ github.event.inputs.issue }}' - openrouter-api-key: ${{ secrets.OPEN_ROUTER_API_KEY }} - openrouter-model: ${{ vars.OPEN_ROUTER_MODEL }} token: ${{ secrets.PAT }} diff --git a/.gitignore b/.gitignore index eeb7cd8..5c240d9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,11 @@ zstandard_cli/zstd/ +.zstd_upstream/ .idea/ */.idea/ */build/ */coverage/ */*.lock -.env \ No newline at end of file +.env +.android_emulator.pid +.android_emulator.log \ No newline at end of file diff --git a/README.md b/README.md index 70a0d93..10808ea 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ [![pub package](https://img.shields.io/pub/v/zstandard.svg)](https://pub.dev/packages/zstandard) -[![pub package](https://img.shields.io/pub/v/zstandard_cli.svg)](https://pub.dev/packages/zstandard_cli) +[![pub package](https://img.shields.io/pub/v/zstandard_cli.svg)](https://pub.dev/packages/zstandard_cli) +[![codecov](https://codecov.io/gh/landamessenger/zstandard/graph/badge.svg)](https://codecov.io/gh/landamessenger/zstandard) # Zstandard @@ -29,6 +30,19 @@ This repository contains a federated Flutter plugin and a CLI package for `zstan --- +## Documentation + +Full documentation is in the [**docs/**](docs/README.md) directory, including: + +- [Getting started](docs/guides/getting-started.md) +- [Architecture](docs/architecture/overview.md) +- [API reference](docs/api/main-api.md) +- [Platform guides](docs/platforms/) +- [Development and contributing](docs/development/CONTRIBUTING.md) +- [Troubleshooting](docs/troubleshooting/common-issues.md) + +--- + ## Basic Usage ### In-App (Flutter) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..9fa24e2 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,22 @@ +# Security Policy + +## Supported versions + +We release security fixes for the latest stable major version. Older major versions may receive fixes on a best-effort basis. + +## Reporting a vulnerability + +Please report security issues **privately**. Do not open a public issue. + +- **Email**: Prefer contacting the maintainers through the repository owner (e.g. via GitHub organization or the contact listed on [pub.dev](https://pub.dev/packages/zstandard)). +- **What to include**: Description of the issue, steps to reproduce, affected versions, and impact if possible. +- **Response**: We aim to acknowledge within a few days and will work with you on a fix and disclosure timeline. + +## Security practices in this repository + +- **Supply chain**: Release workflow pins GitHub Actions to full commit SHAs and pins the external `facebook/zstd` dependency to a specific ref (`ZSTD_REF` in `.github/workflows/release_workflow.yml`). +- **Pub.dev**: Publishing uses credentials from `dart pub login` on each self-hosted runner (standard Dart config locations). Keep runner access and credentials under control; rotate or re-login when needed. +- **Permissions**: CI jobs request minimal `permissions` (e.g. `contents: read` by default; `contents: write` only where needed). +- **Self-hosted runners**: If you use self-hosted runners, keep them updated, isolated, and consider ephemeral runners where possible. + +For usage-related security (input validation, untrusted data, memory), see [docs/guides/security.md](docs/guides/security.md). diff --git a/docs/GLOSSARY.md b/docs/GLOSSARY.md new file mode 100644 index 0000000..8e7bf18 --- /dev/null +++ b/docs/GLOSSARY.md @@ -0,0 +1,43 @@ +# Glossary + +Definitions of terms and acronyms used in the Zstandard plugin and CLI documentation. + +**API** — Application Programming Interface. The public methods and types exposed by the plugin (e.g. `Zstandard().compress()`, `decompress()`). + +**ARM64** — 64-bit ARM architecture. Used by Apple Silicon (M1/M2/M3), many Android devices, and some Windows/Linux machines. + +**CLI** — Command-Line Interface. The `zstandard_cli` package provides both an in-code API and command-line tools (`dart run zstandard_cli:compress`, `decompress`). + +**Compression level** — Integer from 1 to 22 controlling the trade-off between speed and compression ratio. Level 1 is fastest; level 22 gives the smallest output. + +**Dart** — The programming language used for the plugin and CLI. See [dart.dev](https://dart.dev). + +**Decompress** — Convert Zstandard-compressed bytes back to the original uncompressed data. + +**FFI** — Foreign Function Interface. The mechanism in Dart that allows calling native (C/C++) code. The plugin uses FFI to call the zstd C library on Android, iOS, macOS, Windows, and Linux. + +**Flutter** — The UI toolkit and framework. The main **zstandard** package is a Flutter plugin; **zstandard_cli** is pure Dart (no Flutter). + +**Federated plugin** — A Flutter plugin that delegates to platform-specific implementations (e.g. zstandard_android, zstandard_ios) rather than implementing everything in one package. + +**Frame** — A Zstandard-compressed unit of data with a header and optional checksum. The API compresses and decompresses one frame at a time (or a single buffer that may contain a frame). + +**Isolate** — A Dart concurrency unit. The native implementations may run compression/decompression in a separate isolate so the UI thread is not blocked. + +**LCOV** — A format for code coverage data. `flutter test --coverage` and the coverage package produce LCOV (e.g. `lcov.info`) for tools like Codecov. + +**Native** — Code that runs on the host OS (C/C++, Kotlin, Swift, etc.) as opposed to Dart or JavaScript. The zstd library is native; the plugin wraps it via FFI. + +**Precompiled** — Built in advance (e.g. the CLI’s native libraries for macOS, Windows, Linux are precompiled and shipped with the package). + +**pub.dev** — The default package repository for Dart and Flutter. Packages are published there with `dart pub publish`. + +**Uint8List** — A Dart type for a list of unsigned 8-bit bytes. The plugin’s compress and decompress APIs use `Uint8List` for input and output. + +**WASM / WebAssembly** — A binary format for running code in browsers. The **zstandard_web** implementation compiles zstd to WASM and loads it via JavaScript. + +**x64 / x86_64** — 64-bit Intel/AMD architecture. Supported on macOS, Windows, Linux, and Android emulators. + +**zstd** — Zstandard. The compression algorithm and library developed by Meta (Facebook). The plugin and CLI wrap the official [facebook/zstd](https://github.com/facebook/zstd) C library. + +**Zstandard** — Same as zstd; the full name of the algorithm and format. diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..17cc97c --- /dev/null +++ b/docs/README.md @@ -0,0 +1,73 @@ +# Zstandard Documentation + +Welcome to the Zstandard Flutter plugin documentation. This directory contains comprehensive guides, API references, and development documentation for the zstandard compression ecosystem. + +## Documentation Index + +### Architecture + +- [Overview](architecture/overview.md) — High-level architecture and federated plugin design +- [Platform Interface](architecture/platform-interface.md) — Interface contract and platform abstraction +- [FFI Implementation](architecture/ffi-implementation.md) — Native FFI pattern for mobile and desktop +- [Web Implementation](architecture/web-implementation.md) — WebAssembly and JS interop approach +- [Isolate Pattern](architecture/isolate-pattern.md) — Async compression with isolates +- [Performance](architecture/performance.md) — Benchmarks and optimization techniques + +### API Reference + +- [Main API](api/main-api.md) — Zstandard class and public API +- [Extensions](api/extensions.md) — ZstandardExt extension methods +- [Platform Interface API](api/platform-interface.md) — Platform contract reference +- [CLI API](api/cli-api.md) — zstandard_cli package API + +### Guides + +- [Getting Started](guides/getting-started.md) — Quick start guide +- [Installation](guides/installation.md) — Platform-specific setup +- [Usage Examples](guides/usage-examples.md) — Real-world examples +- [Compression Levels](guides/compression-levels.md) — Performance vs ratio guide +- [Error Handling](guides/error-handling.md) — Error scenarios and recovery +- [Security](guides/security.md) — Input validation and handling untrusted data +- [Advanced Usage](guides/advanced-usage.md) — Large files, chunking, and memory optimization +- [Best Practices](guides/best-practices.md) — Production checklist and anti-patterns +- [Performance Tips](guides/performance-tips.md) — Optimization guide +- [Migration Guide](guides/migration-guide.md) — Version migration + +### Platform-Specific + +- [Android](platforms/android.md) +- [iOS](platforms/ios.md) +- [macOS](platforms/macos.md) +- [Windows](platforms/windows.md) +- [Linux](platforms/linux.md) +- [Web](platforms/web.md) +- [CLI](platforms/cli.md) + +### Development + +- [Contributing](development/CONTRIBUTING.md) — Contribution guidelines +- [Setup](development/setup.md) — Development environment +- [Building](development/building.md) — Build instructions +- [Testing](development/testing.md) — Testing guidelines +- [Code Style](development/code-style.md) — Coding standards +- [CI/CD](development/ci-cd.md) — GitHub Actions and automation +- [Release Process](development/release-process.md) — Release workflow + +### Troubleshooting + +- [Common Issues](troubleshooting/common-issues.md) — FAQ and solutions +- [Platform Issues](troubleshooting/platform-issues.md) — Platform-specific issues +- [Debugging](troubleshooting/debugging.md) — Debug techniques + +- [Glossary](GLOSSARY.md) — Terms and acronyms + +## Package Overview + +| Package | Description | +|---------|-------------| +| [zstandard](https://pub.dev/packages/zstandard) | Main Flutter plugin for cross-platform compression | +| [zstandard_cli](https://pub.dev/packages/zstandard_cli) | Pure Dart CLI for macOS, Windows, and Linux | +| [zstandard_platform_interface](https://pub.dev/packages/zstandard_platform_interface) | Platform interface contract | +| zstandard_android, zstandard_ios, zstandard_macos | Mobile and macOS implementations | +| zstandard_linux, zstandard_windows | Desktop implementations | +| zstandard_web | Web (WebAssembly) implementation | diff --git a/docs/api/cli-api.md b/docs/api/cli-api.md new file mode 100644 index 0000000..c6fb56d --- /dev/null +++ b/docs/api/cli-api.md @@ -0,0 +1,104 @@ +# CLI API Reference + +The **zstandard_cli** package provides Zstandard compression and decompression for **pure Dart** applications targeting **macOS, Windows, and Linux** (no Flutter). It uses FFI with precompiled native zstd libraries. + +## Import + +```dart +import 'package:zstandard_cli/zstandard_cli.dart'; +``` + +## ZstandardCLI Class + +### Constructor + +```dart +ZstandardCLI() +``` + +Creates a new instance. Each instance shares the same underlying native library (loaded once per process). + +### getPlatformVersion + +```dart +Future getPlatformVersion() +``` + +Returns a string describing the current platform (e.g. `"macOS 14.0"`, `"Windows 10"`, `"Linux ..."`). Useful for CLI output or debugging. + +### compress + +```dart +Future compress(Uint8List data, {int compressionLevel = 3}) +``` + +Compresses `data` using Zstandard. + +- **data**: Bytes to compress. Empty input returns the same empty `Uint8List` as per implementation. +- **compressionLevel**: Optional; default **3**. Range 1–22. +- **Returns**: Compressed bytes, or `null` on failure. + +### decompress + +```dart +Future decompress(Uint8List data) +``` + +Decompresses Zstandard-compressed `data`. + +- **data**: Compressed bytes (full zstd frame). +- **Returns**: Decompressed bytes, or `null` on failure. + +--- + +## Extensions (zstandard_cli) + +The package also defines **ZstandardExt** on `Uint8List?`: + +### compress + +```dart +Future compress({int compressionLevel = 3}) +``` + +Same as `ZstandardCLI().compress(this, compressionLevel: compressionLevel)`. Returns `null` if the receiver is `null`. + +### decompress + +```dart +Future decompress() +``` + +Same as `ZstandardCLI().decompress(this)`. Returns `null` if the receiver is `null`. + +**Example:** + +```dart +final data = Uint8List.fromList([1, 2, 3, 4, 5]); +final compressed = await data.compress(compressionLevel: 5); +final decompressed = await compressed?.decompress(); +``` + +## Command-Line Entry Points + +When used as a CLI (e.g. `dart run zstandard_cli:compress` / `zstandard_cli:decompress`), the package provides: + +- **compress**: Reads a file (or stdin), compresses with a given level, writes to file (or stdout). Usage: `dart run zstandard_cli:compress `. +- **decompress**: Reads a compressed file, decompresses, writes output. Usage: `dart run zstandard_cli:decompress `. + +See the package README and [Platforms — CLI](../platforms/cli.md) for exact usage and file naming. + +## Platform Support + +| Platform | Architectures | Precompiled library | +|----------|----------------|----------------------| +| macOS | x64, arm64 | Yes | +| Windows | x64, arm64 | Yes | +| Linux | x64, arm64 | Yes | + +The library is loaded at runtime from the package’s resources based on the current platform and architecture. + +## See Also + +- [Platforms — CLI](../platforms/cli.md) +- [Main API](main-api.md) — Flutter plugin API diff --git a/docs/api/extensions.md b/docs/api/extensions.md new file mode 100644 index 0000000..31bd49f --- /dev/null +++ b/docs/api/extensions.md @@ -0,0 +1,71 @@ +# Extensions API Reference + +The **zstandard** package adds extension methods on `Uint8List?` so you can call `compress` and `decompress` directly on byte data. + +## Import + +```dart +import 'package:zstandard/zstandard.dart'; +``` + +The extensions are exported from the main library. + +## ZstandardExt on Uint8List? + +Extension on nullable `Uint8List`. If the receiver is `null`, both methods return `null` without calling the platform. + +### compress + +```dart +Future compress({int compressionLevel = 3}) +``` + +Compresses this byte list using the default (or specified) compression level. + +- **compressionLevel**: Optional; defaults to **3**. Range 1–22. +- **Returns**: Compressed bytes, or `null` if the receiver is `null` or compression failed. + +**Example:** + +```dart +final data = Uint8List.fromList([10, 20, 30, 40, 50]); +final compressed = await data.compress(); +final compressedHigh = await data.compress(compressionLevel: 10); +``` + +### decompress + +```dart +Future decompress() +``` + +Decompresses this byte list, which must be Zstandard-compressed data. + +- **Returns**: Decompressed bytes, or `null` if the receiver is `null` or decompression failed. + +**Example:** + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## Null Safety + +- On `null` receiver, `compress()` and `decompress()` return `null` and do not throw. +- Always check the result for `null` when the source might be null or when the operation can fail. + +**Example:** + +```dart +Uint8List? maybeData = ...; +final compressed = await maybeData.compress(); +if (compressed != null) { + final back = await compressed.decompress(); +} +``` + +## See Also + +- [Main API](main-api.md) — `Zstandard` class +- [Usage Examples](../guides/usage-examples.md) diff --git a/docs/api/main-api.md b/docs/api/main-api.md new file mode 100644 index 0000000..9b3b53e --- /dev/null +++ b/docs/api/main-api.md @@ -0,0 +1,102 @@ +# Main API Reference + +The main package **zstandard** exposes a single public class and re-exports the extension methods. Applications should only depend on this package. + +## Zstandard Class + +**Library:** `package:zstandard/zstandard.dart` + +### Constructor + +```dart +factory Zstandard() +``` + +Creates or returns the singleton instance. Use this to obtain the shared `Zstandard` instance. + +**Example:** + +```dart +final zstandard = Zstandard(); +``` + +### Instance Property + +```dart +ZstandardPlatform get instance +``` + +Returns the currently registered platform implementation. Typically you do not need to access this; use `compress` and `decompress` on the `Zstandard` instance instead. Useful for testing (mock the platform) or for calling `getPlatformVersion`. + +### getPlatformVersion + +```dart +Future getPlatformVersion() +``` + +Returns a platform-specific version or identifier string (e.g. for display or debugging). May be `null` if the platform does not provide one. + +### compress + +```dart +Future compress(Uint8List data, int compressionLevel) +``` + +Compresses `data` using Zstandard with the given `compressionLevel`. + +- **data**: Raw bytes to compress. Can be any length; empty input is allowed (behavior is platform-dependent). +- **compressionLevel**: Integer from **1** (fastest, least compression) to **22** (slowest, best compression). Default in extensions is **3**. +- **Returns**: Compressed bytes as `Uint8List`, or `null` if compression failed. + +**Example:** + +```dart +final zstandard = Zstandard(); +final bytes = Uint8List.fromList([1, 2, 3, 4, 5]); +final compressed = await zstandard.compress(bytes, 3); +if (compressed != null) { + // use compressed +} +``` + +### decompress + +```dart +Future decompress(Uint8List data) +``` + +Decompresses Zstandard-compressed `data`. + +- **data**: Bytes produced by `compress` (or any valid zstd frame). +- **Returns**: Decompressed bytes as `Uint8List`, or `null` if decompression failed (e.g. invalid or corrupted input). + +**Example:** + +```dart +final decompressed = await zstandard.decompress(compressed!); +if (decompressed != null) { + // use decompressed +} +``` + +## Compression Levels + +| Level | Typical use | Speed | Ratio | +|-------|------------------|--------|--------| +| 1 | Real-time, low latency | Fastest | Lower | +| 3 | Default balance | Fast | Good | +| 10–19 | High compression | Slower | Higher | +| 20–22 | Maximum ratio | Slowest | Best | + +Invalid levels (e.g. < 1 or > 22) may be accepted or rejected depending on the platform; avoid them for portability. + +## Threading and Performance + +- All methods return `Future`s. Heavy work may be offloaded to a background isolate on native platforms to avoid blocking the UI. +- For large data, prefer using the main plugin API (which can use isolates) rather than blocking the main thread. + +## See Also + +- [Extensions](extensions.md) — `compress` and `decompress` on `Uint8List?` +- [Platform Interface](platform-interface.md) — Contract implemented by each platform +- [Compression Levels Guide](../guides/compression-levels.md) diff --git a/docs/api/platform-interface.md b/docs/api/platform-interface.md new file mode 100644 index 0000000..7854a68 --- /dev/null +++ b/docs/api/platform-interface.md @@ -0,0 +1,76 @@ +# Platform Interface API Reference + +The **zstandard_platform_interface** package defines the contract that every platform implementation (Android, iOS, macOS, Linux, Windows, Web) must satisfy. Application code typically uses the main **zstandard** package and does not depend on this package directly. + +## ZstandardPlatform + +**Library:** `package:zstandard_platform_interface/zstandard_platform_interface.dart` + +Abstract base class for all platform implementations. Extends `PlatformInterface` from the plugin_platform_interface package. + +### instance (static getter) + +```dart +static ZstandardPlatform get instance +``` + +Returns the current platform implementation. Defaults to `MethodChannelZstandardPlatform`. + +### instance (static setter) + +```dart +static set instance(ZstandardPlatform instance) +``` + +Sets the platform implementation. Only instances created with the correct token (from this package) can be set. Platform packages call this in their `registerWith()`. + +### getPlatformVersion + +```dart +Future getPlatformVersion() +``` + +Returns a platform-specific version or identifier string. Base implementation throws `UnimplementedError`. + +### compress + +```dart +Future compress(Uint8List data, int compressionLevel) +``` + +Compresses `data` at the given `compressionLevel` (1–22). Base implementation throws `UnimplementedError`. + +### decompress + +```dart +Future decompress(Uint8List data) +``` + +Decompresses Zstandard-compressed `data`. Base implementation throws `UnimplementedError`. + +--- + +## MethodChannelZstandardPlatform + +Default implementation used when no native implementation is registered (e.g. in tests or unsupported platforms). + +- **getPlatformVersion()**: Implemented; invokes the method channel `plugins.flutter.io/zstandard` with method `getPlatformVersion`. +- **compress()**: Not implemented; throws `UnimplementedError`. +- **decompress()**: Not implemented; throws `UnimplementedError`. + +So in environments where only the method channel is available, only `getPlatformVersion` is usable unless a test sets a mock platform. + +## Implementing the Interface + +Platform packages: + +1. Extend `ZstandardPlatform`. +2. Implement `getPlatformVersion`, `compress`, and `decompress`. +3. In registration, set `ZstandardPlatform.instance = MyPlatform()` (with the token from the interface). + +See [Architecture — Platform Interface](../architecture/platform-interface.md) for the registration flow. + +## See Also + +- [Architecture — Platform Interface](../architecture/platform-interface.md) +- [Main API](main-api.md) diff --git a/docs/architecture/ffi-implementation.md b/docs/architecture/ffi-implementation.md new file mode 100644 index 0000000..670d278 --- /dev/null +++ b/docs/architecture/ffi-implementation.md @@ -0,0 +1,70 @@ +# FFI Implementation + +Native platforms (Android, iOS, macOS, Linux, Windows) use Dart’s **FFI (Foreign Function Interface)** to call the Zstandard C library directly. This document describes the shared pattern used across these implementations. + +## Overview + +Each native platform package: + +1. Ships or builds the official [facebook/zstd](https://github.com/facebook/zstd) C library for that platform. +2. Generates Dart FFI bindings (e.g. with `ffigen`) for the zstd functions used by the plugin. +3. Opens the native library at runtime and calls `ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, and `ZSTD_getFrameContentSize` from Dart. +4. Manages memory (allocate output buffers, copy bytes, free) using `package:ffi` (e.g. `malloc.allocate` / `malloc.free`). + +## Shared C API Usage + +The plugin uses a minimal subset of the zstd C API: + +| C function | Purpose | +|------------|---------| +| `ZSTD_compressBound(srcSize)` | Upper bound for compressed size; used to allocate the destination buffer. | +| `ZSTD_compress(dst, dstCapacity, src, srcSize, compressionLevel)` | Compresses `src` into `dst`; returns compressed size or an error code. | +| `ZSTD_getFrameContentSize(src, srcSize)` | Gets original size from a zstd frame (or -1 / -2 if unknown/error). | +| `ZSTD_decompress(dst, dstCapacity, src, srcSize)` | Decompresses `src` into `dst`; returns decompressed size or an error code. | + +## Binding Generation + +Bindings are typically generated with **ffigen** from the zstd headers. The generated Dart file (e.g. `zstandard_linux_bindings_generated.dart`) exposes a class that wraps the `DynamicLibrary` and provides typed Dart methods for the C functions above. + +Example pattern: + +```dart +final DynamicLibrary _dylib = DynamicLibrary.open('libzstandard_linux_plugin.so'); +final ZstandardLinuxBindings _bindings = ZstandardLinuxBindings(_dylib); +``` + +Library names and loading differ per platform (e.g. `.so` on Linux/Android, `.dylib` on macOS/iOS, `.dll` on Windows). Each platform’s `lib/` code opens the appropriate library and instantiates the bindings once. + +## Memory Management + +Compression and decompression follow the same pattern across native implementations: + +1. **Allocate** input buffer with `malloc.allocate(size)` and copy `Uint8List` into it. +2. **Allocate** output buffer: + - Compression: size = `ZSTD_compressBound(srcSize)` + - Decompression: size = from `ZSTD_getFrameContentSize` or a fallback (e.g. `compressedSize * 20`) when size is unknown. +3. **Call** `ZSTD_compress` or `ZSTD_decompress`. +4. **Copy** result into a new `Uint8List` (only the written length). +5. **Free** both buffers in a `finally` block so memory is always released. + +All platforms use this pattern to avoid leaks and to stay safe with Dart’s GC and native memory. + +## Platform-Specific Details + +- **Android**: Native code is built as part of the Android project; the Dart plugin loads the library via the engine (e.g. `DynamicLibrary.open('libzstandard_android_plugin.so')` or similar as configured). +- **iOS / macOS**: zstd is built as a static library or framework and linked into the app; the plugin opens the corresponding dynamic library or uses the linked symbols as per project setup. +- **Linux**: CMake builds `libzstandard_linux_plugin.so`; the plugin loads it by name. +- **Windows**: CMake builds `zstandard_windows_plugin.dll` (or similar); the plugin loads it by name. + +Each platform’s README and `docs/platforms/` guide should describe how to build and where the library is placed. + +## Error Handling + +- `ZSTD_compress` and `ZSTD_decompress` return negative values on error. The Dart code checks for `result > 0` and returns `null` otherwise (or throws, depending on the package’s public API contract). +- `ZSTD_getFrameContentSize` returns -1 (unknown) or -2 (error). Implementations use a fallback destination size when the frame size is unknown. + +## Related Documentation + +- [Overview](overview.md) +- [Platform Interface](platform-interface.md) +- [Isolate Pattern](isolate-pattern.md) — Offloading compression/decompression to isolates on native platforms diff --git a/docs/architecture/isolate-pattern.md b/docs/architecture/isolate-pattern.md new file mode 100644 index 0000000..e5bd5cd --- /dev/null +++ b/docs/architecture/isolate-pattern.md @@ -0,0 +1,64 @@ +# Isolate Pattern for Async Compression + +On native platforms (Android, iOS, macOS, Linux, Windows), the plugin can run compression and decompression in a **background isolate** so that CPU-heavy work does not block the UI thread. This document describes the pattern used in the native implementations. + +## Motivation + +- **ZSTD_compress** and **ZSTD_decompress** are CPU-bound and can take noticeable time for large inputs. +- Dart is single-threaded per isolate. Running zstd on the main isolate would cause frame drops and jank in a Flutter app. +- **Isolates** allow running Dart code (and FFI calls) on a separate thread. The main isolate sends input and receives the result asynchronously. + +## Design + +The native platform packages (e.g. zstandard_linux) implement: + +1. **Synchronous FFI wrappers** + Top-level functions such as `compress(...)` and `decompress(...)` that call the bindings and return the result size. These run in whichever isolate calls them. + +2. **A long-lived helper isolate** + Created once (lazily) and used for all async requests. It holds the FFI bindings and runs only zstd calls. + +3. **Async entry points** + `compressAsync` and `decompressAsync` (or similar) that: + - Allocate buffers in the **main** isolate (or a sending isolate), + - Send a request to the helper isolate (e.g. via `SendPort`), + - The helper isolate runs `ZSTD_compress` or `ZSTD_decompress` and sends back the result (e.g. written size or error), + - The main isolate copies the result into a new `Uint8List` and completes the `Future`. + +4. **Public API** + The platform’s `compress`/`decompress` methods that implement `ZstandardPlatform` may use the synchronous wrappers on the main isolate for small inputs, or use the async path for large inputs. The exact policy (e.g. threshold) is implementation-defined. Alternatively, all work may be offloaded to the helper isolate for simplicity. + +## Communication + +- **Request types**: e.g. `_CompressRequest` and `_DecompressRequest` holding: request id, pointers to src/dst buffers, sizes, compression level (for compress). +- **Response types**: e.g. `_CompressResponse` and `_DecompressResponse` holding request id and result (size or error code). +- **Ports**: The main isolate has a `ReceivePort` that listens for: + - The helper’s `SendPort` (once, at startup), + - Response objects. Each response is matched to a `Completer` by request id; the completer is completed with the result. +- **Helper isolate**: Has a `ReceivePort` that listens for request objects, runs the appropriate zstd call, and sends the corresponding response back. + +## Memory and Pointers + +- **Pointers** (e.g. `Pointer`) cannot be sent between isolates; only simple values and some Dart objects can. So in the typical design: + - Buffers are allocated in the **helper** isolate (or in shared memory if the implementation uses it). The main isolate sends only the **data** (e.g. as `Uint8List`); the helper isolate allocates, compresses/decompresses, and sends the result back as bytes (or a copy). +- Alternatively, the main isolate allocates buffers and sends a **copy** of the data; the helper isolate allocates its own buffers, copies the input, runs zstd, and sends back the output bytes. The exact approach depends on the package implementation. + +## Usage in the Plugin + +Application code does not see isolates directly. It only calls: + +- `Zstandard().compress(data, level)` +- `Zstandard().decompress(data)` +- or the extension methods on `Uint8List?` + +The platform implementation (e.g. `ZstandardLinux`) implements these as `Future` and may use the isolate-based async path internally so that the UI stays responsive. + +## Web + +The web implementation does **not** use this isolate pattern. It uses the JS/WASM API on the main thread. For large data on web, consider doing the work in a Web Worker if the implementation supports it, or chunking input to avoid long main-thread blocks. + +## Related Documentation + +- [Overview](overview.md) +- [FFI Implementation](ffi-implementation.md) +- [Platform Interface](platform-interface.md) diff --git a/docs/architecture/overview.md b/docs/architecture/overview.md new file mode 100644 index 0000000..2eab880 --- /dev/null +++ b/docs/architecture/overview.md @@ -0,0 +1,89 @@ +# Architecture Overview + +The Zstandard Flutter plugin follows a **federated plugin** architecture, where a main package delegates platform-specific work to separate packages while exposing a single unified API to application code. + +## High-Level Design + +```mermaid +graph TD + App[Flutter Application] + Main[zstandard
Main Plugin] + Interface[zstandard_platform_interface
Abstract Contract] + + App --> Main + Main --> Interface + + Interface --> Android[zstandard_android] + Interface --> iOS[zstandard_ios] + Interface --> macOS[zstandard_macos] + Interface --> Linux[zstandard_linux] + Interface --> Windows[zstandard_windows] + Interface --> Web[zstandard_web] + + Android --> FFI1[FFI + Native zstd] + iOS --> FFI2[FFI + Native zstd] + macOS --> FFI3[FFI + Native zstd] + Linux --> FFI4[FFI + Native zstd] + Windows --> FFI5[FFI + Native zstd] + Web --> WASM[WebAssembly + zstd.js] +``` + +## Components + +### Main Plugin (zstandard) + +The main package is the only one applications depend on. It: + +- Exposes the public API: `Zstandard` class and `ZstandardExt` extension on `Uint8List?` +- Uses conditional imports to load either native or web implementation (`ZstandardImpl`) +- Does not contain platform code; it delegates to the platform interface and registered implementations + +### Platform Interface (zstandard_platform_interface) + +Defines the contract all platform implementations must satisfy: + +- **ZstandardPlatform**: Abstract base class with `getPlatformVersion()`, `compress()`, and `decompress()` +- **MethodChannelZstandardPlatform**: Default implementation used when no native implementation is registered (e.g. in unit tests); only `getPlatformVersion()` is implemented via method channel +- Platform packages extend `ZstandardPlatform`, implement the three methods, and register themselves via `ZstandardPlatform.instance = ...` + +### Platform Implementations + +| Package | Technology | Notes | +|---------|------------|--------| +| zstandard_android | FFI + JNI | Native zstd library in `android/`, Dart bindings via FFI | +| zstandard_ios | FFI | Native zstd in `Classes/zstd/` (synced from repo root `zstd/`), CocoaPods | +| zstandard_macos | FFI | Native zstd in `Classes/zstd/` (synced from repo root `zstd/`), CocoaPods | +| zstandard_linux | FFI | Native zstd in `src/`, CMake in `linux/` | +| zstandard_windows | FFI | Native zstd in `src/`, CMake in `windows/` | +| zstandard_web | JS interop + WASM | zstd.js / zstd.wasm loaded from `web/` | + +### CLI Package (zstandard_cli) + +Standalone pure Dart package for desktop (macOS, Windows, Linux). It: + +- Does not depend on Flutter +- Loads precompiled native zstd libraries per platform/architecture +- Provides the same `compress`/`decompress` API and extensions for use in Dart CLI apps or `dart run` + +## Platform Detection and Registration + +1. Application calls `Zstandard().compress(...)` or uses the extension. +2. The main plugin uses `ZstandardImpl()`, which uses `PlatformManager` to detect the current platform (`kIsWeb`, `Platform.isAndroid`, etc.). +3. On first access to `instance`, the appropriate platform implementation is registered (e.g. `ZstandardAndroid.registerWith()`) and `ZstandardPlatform.instance` is set. +4. All subsequent calls use that registered instance. + +Web uses a separate implementation path: `ZstandardImpl` is conditionally imported from `zstandard_impl_web.dart` when not on `dart:io`, and registers the web implementation. + +## Data Flow + +1. **Compression**: `Uint8List` → main plugin → platform instance → native/JS implementation → compressed `Uint8List?` +2. **Decompression**: Compressed `Uint8List` → main plugin → platform instance → native/JS implementation → decompressed `Uint8List?` + +All APIs are asynchronous (`Future`). On native platforms, heavy work can be offloaded to isolates to avoid blocking the UI. + +## Related Documentation + +- [Platform Interface](platform-interface.md) — Contract and registration details +- [FFI Implementation](ffi-implementation.md) — Native platform implementation +- [Web Implementation](web-implementation.md) — Web platform implementation +- [Isolate Pattern](isolate-pattern.md) — Async compression with isolates diff --git a/docs/architecture/performance.md b/docs/architecture/performance.md new file mode 100644 index 0000000..e715a40 --- /dev/null +++ b/docs/architecture/performance.md @@ -0,0 +1,62 @@ +# Performance + +This document describes performance characteristics of the Zstandard plugin and CLI, optimization techniques, and how to measure and compare behaviour across platforms. + +## Overview + +- **Compression** and **decompression** speed and memory use depend on: + - **Compression level** (1 = fastest, 22 = best ratio) + - **Input size** and **content** (repetitive data compresses better) + - **Platform** (native FFI vs WebAssembly, isolate usage) +- **Decompression** is typically faster than compression and largely independent of the level used to compress. + +## Compression levels + +| Level | Relative speed | Relative ratio | Typical use | +|-------|----------------|----------------|-------------| +| 1 | Fastest | Lowest | Real-time, interactive | +| 3 | Fast | Good | **Default**; general use | +| 5–9 | Medium | Better | Balanced | +| 10–19 | Slower | High | Storage, archival | +| 20–22 | Slowest | Highest | Maximum ratio | + +Higher levels use more CPU and memory during compression; decompression memory and speed are less affected. + +## Platform behaviour + +- **Native (Android, iOS, macOS, Windows, Linux)**: Work runs in a **background isolate** by default, so the UI thread is not blocked. Throughput is comparable to the underlying zstd C library; some builds disable assembly optimizations for portability (e.g. Android, iOS). +- **Web**: Runs on the **main thread** (no isolates). For large data, prefer smaller chunks or offload to a Web Worker if you implement it. Throughput is generally lower than native. +- **CLI**: Runs in the **current isolate**; suitable for CLI/server where blocking is acceptable. Throughput is similar to native. + +See the [platform guides](../platforms/) for platform-specific performance notes. + +## Optimization techniques + +1. **Choose the right level**: Use 1–3 for speed, 10+ for size when CPU and time allow. +2. **Chunk large data**: Process in fixed-size chunks to limit peak memory and (on web) keep the UI responsive. See [Advanced usage](../guides/advanced-usage.md). +3. **Reuse the instance**: `Zstandard()` is a singleton; no need to cache it. Same for `ZstandardCLI()`. +4. **Limit concurrency**: Many simultaneous compress/decompress calls increase peak memory; batch or limit parallelism if needed. +5. **Avoid compressing very small payloads**: Frame overhead can make compressed output larger than input; consider a size threshold below which you skip compression. + +## Measuring performance + +- **Time**: Use `Stopwatch` (or platform equivalents) around `compress`/`decompress` for your typical payload sizes and levels. +- **Memory**: Use Dart DevTools, Xcode Instruments, or Android Profiler to observe allocations and peak usage. +- **Benchmarks**: The repo can include a benchmark suite (e.g. under `benchmark/`) to measure throughput and detect regressions; run it locally or in CI. + +## Benchmarks and regression detection + +If a benchmark suite is present (e.g. `dart run benchmark/compression_benchmark.dart` or similar), run it before releases and after changes to compression paths. Compare: + +- Compression throughput (MB/s or similar) for levels 1, 3, 10, 22. +- Decompression throughput. +- Roundtrip (compress then decompress) correctness and time. + +Setting baseline numbers and comparing in CI helps catch performance regressions. + +## See also + +- [Compression levels](../guides/compression-levels.md) +- [Performance tips](../guides/performance-tips.md) +- [Advanced usage](../guides/advanced-usage.md) +- [Platform guides](../platforms/) diff --git a/docs/architecture/platform-interface.md b/docs/architecture/platform-interface.md new file mode 100644 index 0000000..40d1e7c --- /dev/null +++ b/docs/architecture/platform-interface.md @@ -0,0 +1,67 @@ +# Platform Interface Design + +The platform interface defines the contract between the main zstandard plugin and each platform-specific implementation. It uses the [plugin_platform_interface](https://pub.dev/packages/plugin_platform_interface) pattern for type-safe, testable platform abstraction. + +## Interface Contract + +All platform implementations must extend `ZstandardPlatform` and implement: + +| Method | Signature | Description | +|--------|-----------|-------------| +| `getPlatformVersion` | `Future` | Returns a string identifying the platform (e.g. for debugging or display). | +| `compress` | `Future compress(Uint8List data, int compressionLevel)` | Compresses `data` with the given level (1–22). Returns compressed bytes or `null` on failure. | +| `decompress` | `Future decompress(Uint8List data)` | Decompresses zstd-compressed `data`. Returns decompressed bytes or `null` on failure. | + +## Abstract Base Class + +`ZstandardPlatform` extends `PlatformInterface` from plugin_platform_interface: + +- **Token**: A unique token is used so that only valid platform instances can be set on `ZstandardPlatform.instance`. +- **Default implementation**: `MethodChannelZstandardPlatform` is the default. It implements only `getPlatformVersion()` via the method channel `plugins.flutter.io/zstandard`. `compress()` and `decompress()` are not implemented and throw `UnimplementedError` if called on the default implementation. +- **Registration**: Each platform package calls `ZstandardPlatform.instance = MyPlatform()` (with the correct token) in its `registerWith()` (or equivalent) so the main plugin uses the real implementation. + +## Registration Flow + +```mermaid +sequenceDiagram + participant App + participant Main as zstandard + participant Impl as ZstandardImpl + participant PM as PlatformManager + participant Reg as Platform registerWith + participant PI as ZstandardPlatform.instance + + App->>Main: Zstandard().compress(data, 3) + Main->>Impl: instance + Impl->>PM: isAndroid / isIOS / ... + PM-->>Impl: platform + Impl->>Reg: ZstandardAndroid.registerWith() (e.g.) + Reg->>PI: instance = ZstandardAndroid() + Impl->>PI: return instance + Main->>PI: compress(data, 3) + PI-->>Main: Uint8List? +``` + +1. First call to `Zstandard().instance` triggers registration. +2. `ZstandardImpl` checks `PlatformManager` and calls the appropriate `registerWith()`. +3. That sets `ZstandardPlatform.instance` to the concrete implementation. +4. The main plugin then forwards `compress`/`decompress`/`getPlatformVersion` to that instance. + +## Why This Design + +- **Single API**: Applications use only the main package; they do not reference platform packages directly. +- **Testability**: Tests can replace `ZstandardPlatform.instance` with a mock that implements the same three methods. +- **Federated packages**: Each platform lives in its own package with its own native build and dependencies. +- **Web vs native**: The main package uses conditional imports to select native or web `ZstandardImpl`; both paths end up with a registered `ZstandardPlatform` implementation. + +## Implementing a New Platform + +1. Create a new package (e.g. `zstandard_fuchsia`) that depends on `zstandard_platform_interface`. +2. Implement a class that extends `ZstandardPlatform` and implements `getPlatformVersion`, `compress`, and `decompress`. +3. Expose a `registerWith()` (or similar) that sets `ZstandardPlatform.instance = YourPlatform()` using the token from the interface package. +4. In the main plugin’s `ZstandardImpl`, add detection for the new platform and call your `registerWith()` when that platform is active. + +## Related Documentation + +- [Overview](overview.md) +- [API Reference — Platform Interface](../api/platform-interface.md) diff --git a/docs/architecture/web-implementation.md b/docs/architecture/web-implementation.md new file mode 100644 index 0000000..73fdaa4 --- /dev/null +++ b/docs/architecture/web-implementation.md @@ -0,0 +1,74 @@ +# Web Implementation + +The web platform cannot use Dart FFI. Instead, **zstandard_web** uses JavaScript and WebAssembly: the Zstandard C library is compiled to WASM with Emscripten, and Dart calls into it via JS interop. + +## Architecture + +```mermaid +graph LR + Dart[zstandard_web Dart] + JS[zstd.js] + WASM[zstd.wasm] + Dart -->|dart:js_interop| JS + JS --> WASM +``` + +- **zstd.js**: Emscripten-generated JS glue that loads and initializes the WASM module and exposes functions like `compressData` and `decompressData`. +- **zstd.wasm**: Compiled zstd C code (compression, decompression, bounds, frame size). +- **Dart**: Uses `dart:js_interop` (and `package:web`) to call the JS functions and pass `Uint8List` data. + +## Differences from Native Implementations + +| Aspect | Native (FFI) | Web (JS/WASM) | +|--------|----------------|----------------| +| Entry point | C functions via FFI | JS functions `compressData` / `decompressData` | +| Threading | Can use Dart isolates | No isolates for WASM; work runs on main thread or in workers if implemented | +| Memory | Dart + native malloc/free | JS typed arrays + WASM linear memory | +| Build | CMake / Gradle / Xcode | Emscripten (emcc) build of zstd | + +The web implementation does not use the same isolate-based async helpers as native; it awaits the JS interop calls that run the WASM compression/decompression. + +## Required Setup in the App + +1. **Assets**: Copy `zstd.js` and `zstd.wasm` into the Flutter web project (e.g. `web/` directory). The exact paths are documented in [Platforms — Web](../platforms/web.md). +2. **HTML**: Include the script in your `web/index.html` so the WASM module is loaded before the app runs: + ```html + + ``` +3. **Initialization**: The Emscripten module must be loaded and ready before any call to `compress` or `decompress`. The web implementation assumes the global functions exist when invoked. + +## JS API Contract + +The Dart code expects the following in the global scope (or on a known object): + +- **compressData(inputData, compressionLevel)** + - `inputData`: `Uint8Array` + - `compressionLevel`: number (1–22) + - Returns: `Uint8Array` (compressed) or `null` on error + +- **decompressData(compressedData)** + - `compressedData`: `Uint8Array` + - Returns: `Uint8Array` (decompressed) or `null` on error + +Dart converts between `Uint8List` and JS typed arrays via `dart:js_interop` so that the same `Uint8List` API is used across all platforms. + +## Building zstd.js and zstd.wasm + +zstd is built with Emscripten. High-level steps: + +1. Install and activate [Emscripten SDK](https://emscripten.org/). +2. Clone the [facebook/zstd](https://github.com/facebook/zstd) repository. +3. Run `emcc` on the zstd sources with flags for WASM, exported functions (`ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, `ZSTD_getFrameContentSize`, `malloc`, `free`), and output name. +4. Add the wrapper functions `compressData` and `decompressData` in `zstd.js` (or a separate script) that allocate buffers, call the exported C functions, and return the result or null. + +Detailed commands and wrapper code are in the [zstandard_web README](https://github.com/landamessenger/zstandard/tree/master/zstandard_web) and in [Platforms — Web](../platforms/web.md). + +## Small Data Handling + +Some web implementations may return the original data unchanged when the input is very small (e.g. < 9 bytes) because zstd has a minimum frame size. The Dart implementation may handle this by returning the input as-is for both compress and decompress in those cases. See the package source and [Platforms — Web](../platforms/web.md) for the exact behavior. + +## Related Documentation + +- [Overview](overview.md) +- [Platform Interface](platform-interface.md) +- [Platforms — Web](../platforms/web.md) diff --git a/docs/deployment/RUNBOOK.md b/docs/deployment/RUNBOOK.md new file mode 100644 index 0000000..07014ff --- /dev/null +++ b/docs/deployment/RUNBOOK.md @@ -0,0 +1,83 @@ +# Deployment and recovery runbook + +This runbook describes how to run a release, what the pipeline does, and how to recover when a release fails. + +## Prerequisites + +- **Pub.dev**: Each runner that publishes (macOS, Linux, Windows) must have run `dart pub login` so that `dart pub publish` can authenticate. Credentials are read from the usual Dart config location on each machine. +- **Secrets** (in GitHub repo settings): `PAT` (personal access token with repo scope), if used by the release workflow for tagging/notifications. +- **Variables** (optional): `DEBUG`, `OPEN_ROUTER_MODEL` (or similar) if your notification/tooling uses them. + +## Running a release + +1. **Update CHANGELOG.md** at the repo root with the new version and user-facing changes. +2. In GitHub: **Actions → Task - Release → Run workflow**. +3. Fill inputs: + - **version**: Semver, e.g. `1.3.30` (do not include `v`). + - **title**: Short release title. + - **changelog**: Summary (or paste from CHANGELOG). + - **issue**: Launcher/issue reference (e.g. `-1` if not used). +4. Run the workflow. It will: + - Validate that tag `v` does not exist. + - Copy CHANGELOG into all packages. + - Update all `pubspec.yaml` version and dependency versions (via `.github/scripts/update_versions.dart`). + - Commit and push version bumps. + - Build native libs (macOS, Linux, Windows) from pinned `facebook/zstd` ref. + - Create tag and GitHub release. + - Publish to pub.dev in order: **platform_interface → platform packages (parallel) → cli → zstandard**, with verification after each publish. + - Notify on success (or run the rollback guide job on failure). + +## Dependency order (for manual publish) + +If you must publish manually (e.g. after a partial failure), use this order: + +```text +zstandard_platform_interface + → zstandard_android, zstandard_ios, zstandard_web, zstandard_macos, zstandard_windows, zstandard_linux + → zstandard_cli + → zstandard +``` + +From repo root, with credentials configured: + +```bash +# 1. Platform interface +cd zstandard_platform_interface && dart pub publish -f && cd ../.. + +# 2. Platforms (any order after platform_interface) +for pkg in zstandard_android zstandard_ios zstandard_web zstandard_macos zstandard_windows zstandard_linux; do + (cd $pkg && dart pub publish -f) && cd ../.. +done + +# 3. CLI +cd zstandard_cli && dart pub publish -f && cd ../.. + +# 4. Main plugin +cd zstandard && dart pub publish -f && cd ../.. +``` + +## When a release fails + +- **pub.dev does not allow deleting or overwriting published versions.** If some packages were published and others failed, you have two options: + 1. **Fix the failure** (e.g. fix a test, fix credentials, fix network) and **re-run the workflow** with the **same version**. Only the steps that did not yet succeed will effectively run again (e.g. later packages can now resolve the already-published ones). + 2. **Bump to a new patch version** (e.g. 1.3.30 → 1.3.31), fix the issue, and run a new release so all packages are published under the new version. + +- The workflow includes a **Rollback / recovery guide** job that runs when any publish job fails. It writes a short recovery summary to the GitHub Actions job summary. Use that and this runbook to decide next steps. + +- **Common causes of failure** + - **Credentials**: Runner not logged in to pub.dev. On each publishing runner (macOS, Linux, Windows), run `dart pub login` and ensure the account has publish rights for the packages. + - **Dependency not found**: A package (e.g. `zstandard_platform_interface`) was just published and pub.dev has not indexed it yet. The workflow waits up to ~10 minutes (with backoff) and verifies via the pub.dev API; if it still fails, wait a bit and re-run the same version. + - **Tests or analyze**: Fix the failing package locally, commit, and re-run the release with the same version. + +## Updating the zstd (C library) version + +The workflow uses the **zstd** directory at the **repo root** (no download in CI). To upgrade: + +1. Replace or update the contents of the root `zstd/` directory (e.g. clone [facebook/zstd](https://github.com/facebook/zstd), checkout the desired tag like `v1.5.7`, and copy its contents over `zstd/`, or use a git submodule). +2. Ensure `zstd/lib/` contains the library sources the builders expect. +3. Commit the changes and run the release as usual; the workflow will copy `zstd/lib/*` into `zstandard_cli/src/` and build. + +## Related docs + +- [Release process](../development/release-process.md) – versioning and pre-release checklist. +- [SECURITY.md](../../SECURITY.md) – reporting vulnerabilities and CI security practices. diff --git a/docs/development/CONTRIBUTING.md b/docs/development/CONTRIBUTING.md new file mode 100644 index 0000000..61b52c7 --- /dev/null +++ b/docs/development/CONTRIBUTING.md @@ -0,0 +1,54 @@ +# Contributing to Zstandard + +Thank you for your interest in contributing to the Zstandard Flutter plugin and CLI. This document outlines how to set up your environment, follow our standards, and submit changes. + +## Code of Conduct + +Be respectful and professional. We aim to maintain a welcoming environment for everyone. + +## Getting Started + +1. **Fork and clone** the repository. +2. **Set up your development environment** — see [Setup](setup.md). +3. **Create a branch** for your work: `git checkout -b feature/your-feature` or `fix/your-fix`. +4. **Make your changes** following our [Code Style](code-style.md) and [Testing](testing.md) guidelines. +5. **Run tests** for the packages you changed: see [Testing](testing.md). +6. **Commit** with clear messages (e.g. "Add compression level validation in platform interface"). +7. **Push** and open a **Pull Request** against the default branch. + +## What to Contribute + +- **Bug fixes**: Ensure there is an issue or discussion, then submit a fix with tests if applicable. +- **New features**: Open an issue first to discuss the design and scope. For new platform support, see the architecture docs. +- **Documentation**: Fixes and improvements to docs in `docs/` and to dartdoc comments are always welcome. +- **Tests**: Adding or fixing tests is highly valued; see [Testing](testing.md). + +## Pull Request Process + +1. **Target branch**: Open PRs against the repository’s default branch (e.g. `master` or `main`). +2. **CI**: Ensure all relevant CI checks pass (lint, tests, build). +3. **Review**: Address review feedback from maintainers. +4. **Scope**: Keep PRs focused. Split large changes into smaller, reviewable PRs when possible. +5. **Changelog**: For user-facing changes, add an entry to `CHANGELOG.md` under "Unreleased" or the next version. + +## Development Workflow + +- **Building**: See [Building](building.md) for how to build the plugin and native code per platform. +- **Testing**: See [Testing](testing.md) for unit and integration test instructions. +- **Releases**: See [Release Process](release-process.md) for how versions are published. + +## Package Structure + +- **zstandard** — Main Flutter plugin; applications depend only on this. +- **zstandard_platform_interface** — Abstract platform contract. +- **zstandard_android, zstandard_ios, zstandard_macos, zstandard_linux, zstandard_windows, zstandard_web** — Platform implementations. +- **zstandard_cli** — Standalone Dart CLI package (no Flutter). + +When changing the platform interface, ensure all platform implementations are updated and tests pass. + +## Questions + +- Open a [GitHub Issue](https://github.com/landamessenger/zstandard/issues) for bugs or feature requests. +- Use [GitHub Discussions](https://github.com/landamessenger/zstandard/discussions) for questions and ideas. + +Thank you for contributing. diff --git a/docs/development/building.md b/docs/development/building.md new file mode 100644 index 0000000..132b68f --- /dev/null +++ b/docs/development/building.md @@ -0,0 +1,148 @@ +# Building + +This document describes how to build the Zstandard plugin, its native libraries, and the CLI package. + +## All-in-one (macOS) + +From the repository root, you can run sync, bindings, all macOS-runnable builds, and all tests in sequence: + +```bash +./scripts/run_all_macos.sh +``` + +This runs: verify zstd at repo root → regenerate bindings → build Android → build CLI (dylibs) → build iOS → build web → build macOS → test Android → test CLI → test iOS → test web → test macOS. Requires macOS, Flutter, Xcode, CocoaPods, Android SDK/NDK (for Android), and CMake. All platforms use the single canonical source at `zstd/` (see workflow below). Stops on first failure. + +## Flutter Plugin (All Platforms) + +### Build the example app + +From the repository root: + +```bash +cd zstandard/example +flutter pub get +flutter run +``` + +Select the target platform (Android, iOS, macOS, Windows, Linux, web). This will compile the plugin and, for native platforms, the platform-specific native code as part of the Flutter build. + +### Build release artifacts + +```bash +cd zstandard/example +flutter build apk # Android +flutter build ios # iOS +flutter build macos # macOS +flutter build windows # Windows +flutter build linux # Linux +flutter build web # Web +``` + +The native libraries (e.g. Android .so, iOS framework, Windows DLL, Linux .so) are built automatically by Flutter’s build system when you build the app that uses the plugin. + +## Native Libraries (Platform Packages) + +All platforms use a **single source of truth** for the zstd C library: **`zstd/`** at the repository root. Android, Linux, and Windows compile directly from `zstd/` via CMake (`zstd_build/`). iOS and macOS copy `zstd/` into the plugin’s `Classes/zstd/` at pod install or build time (see below); that copy is temporary and should not be edited. + +If you are developing or modifying a platform package’s native code: + +### Android + +- The plugin builds the native library via `zstandard_android/zstd_build/CMakeLists.txt`, which compiles sources from `../../zstd/`. +- Building the Android app (e.g. `flutter build apk` or running from Android Studio) triggers the native build via Gradle/CMake. +- Ensure the NDK is installed and that the canonical `zstd/` directory exists at repo root. + +### iOS / macOS + +- The canonical source is **`zstd/`** at repo root. CocoaPods only sees files inside the pod, so each podspec uses a **`prepare_command`** (at pod install) and a **script phase** (before headers at build time) to copy `zstd/` into `zstandard_ios/ios/Classes/zstd/` and `zstandard_macos/macos/Classes/zstd/` respectively. No `pre_install` in the app Podfile is required. +- Ensure `zstd/` exists at repo root (e.g. run `./scripts/update_zstd.sh` if needed). Then build the example app for iOS or macOS; the podspec sync and Xcode/CocoaPods will build the native target. +- The product is a framework that the Dart code loads via FFI. + +### Linux + +- The plugin builds the zstd library via `zstandard_linux/zstd_build/CMakeLists.txt`, which compiles sources from `../../zstd/`, and links it into the plugin. +- From the example app: `flutter build linux` or `flutter run -d linux` will invoke CMake and produce `libzstandard_linux_plugin.so`. + +### Windows + +- The plugin builds the zstd DLL via `zstandard_windows/zstd_build/CMakeLists.txt`, which compiles sources from `../../zstd/`. +- From the example app: `flutter build windows` or `flutter run -d windows` will invoke CMake and produce the plugin DLL and the bundled `zstandard_windows.dll`. + +### Web + +- No native “build” in the C sense. You need `zstd.js` and `zstd.wasm` in the app’s `web/` directory. +- To regenerate them: use Emscripten to compile the facebook/zstd C library and add the `compressData`/`decompressData` wrappers. See [Web Implementation](../architecture/web-implementation.md) and the zstandard_web README. + +## CLI Package + +The CLI is pure Dart plus FFI; it uses **precompiled** native libraries shipped with the package (macOS, Windows, Linux). + +### Run tests + +```bash +cd zstandard_cli +dart test +``` + +### Run the CLI entry points + +```bash +dart run zstandard_cli:compress +dart run zstandard_cli:decompress +``` + +### Building a standalone executable (optional) + +```bash +cd zstandard_cli +dart compile exe bin/compress.dart # if the package exposes such a script +# or use the package’s documented entry point +``` + +The compiled executable will still need the native library (e.g. .dylib, .dll, .so) to be available at runtime as the package expects. + +## Workflow: updating zstd and running the app (do not edit native zstd) + +**Do not modify the native zstd C library by hand.** All platforms use the single **`zstd/`** directory at the repo root. The flow is: + +1. **Update the canonical zstd source** + From repo root: + ```bash + ./scripts/update_zstd.sh # latest from dev (upstream default) + ./scripts/update_zstd.sh v1.5.6 # specific tag or branch + ``` + This fetches from the [official repo](https://github.com/facebook/zstd) and updates `zstd/`. If you prefer to do it manually: `git clone --depth 1 https://github.com/facebook/zstd.git /tmp/zstd && mkdir -p zstd && cp -R /tmp/zstd/lib/* zstd/`. + +2. **Sync zstd into iOS and macOS** (so CocoaPods can see the C sources): + ```bash + ./scripts/sync_zstd_ios_macos.sh + ``` + This copies `zstd/` to `zstandard_ios/ios/Classes/zstd/` and `zstandard_macos/macos/Classes/zstd/`. The **podspecs** handle the sync automatically: `prepare_command` runs at pod install when applicable, and a script phase runs before headers at build time. You only need to run the script by hand in special cases (e.g. fresh clone before the first `pod install`, or right after `update_zstd.sh` if you want the copy in place before building). + + After each build, the iOS and macOS podspecs run a script phase that **removes** the copied `Classes/zstd` directory. The next build recreates it via the podspec’s sync phase. + +3. **Regenerate FFI bindings** (from repo root): + ```bash + ./scripts/regenerate_bindings.sh + ``` + This runs `dart run ffigen` in each platform package (android, ios, macos, linux, windows, cli). Commit any changed `*_bindings_generated.dart` files. + +4. **Run the app** (e.g. `flutter run` from `zstandard/example` for the desired platform). + +Because all platforms reference the same `zstd/` directory, a single `update_zstd.sh` updates every platform at once. + +## FFI Bindings Regeneration (manual) + +If you only need to regenerate bindings for one package: + +1. Install **ffigen** (and LLVM if required): see the Dart FFI documentation. +2. From the package directory (e.g. `zstandard_ios`), run: `dart run ffigen --config ffigen.yaml`. +3. Commit the updated `*_bindings_generated.dart` file. + +## Troubleshooting + +- **Native library not found at runtime**: Ensure you built for the correct platform/architecture and that the library is in the path or next to the executable as the plugin expects. +- **CMake errors**: Install the required build tools (CMake, C compiler) and ensure the zstd source path in CMake matches the package layout. +- **CocoaPods errors**: Run `pod install` in the example’s `ios/` or `macos/` and ensure the plugin’s podspec is correct. + +See [Troubleshooting](../troubleshooting/common-issues.md) for more. diff --git a/docs/development/ci-cd.md b/docs/development/ci-cd.md new file mode 100644 index 0000000..f04c65e --- /dev/null +++ b/docs/development/ci-cd.md @@ -0,0 +1,91 @@ +# CI/CD + +This document describes the continuous integration and deployment setup for the Zstandard plugin and CLI, including GitHub Actions workflows and how to use them. + +## Overview + +The repository uses **GitHub Actions** for: + +- **Push checks**: Analyze and test each package on every push (except to protected branches). +- **Release workflow**: Version bumping, building precompiled CLI libraries, tagging, and publishing to pub.dev. +- **Hotfix workflow**: Expedited fixes and releases when needed. + +Workflows are in [`.github/workflows/`](https://github.com/landamessenger/zstandard/tree/master/.github/workflows). + +## Push check workflows + +Each package has a dedicated workflow that runs on push to non-protected branches: + +| Workflow file | Package | Runner | Steps | +|---------------|---------|--------|--------| +| `push_checks_zstandard.yml` | zstandard | self-hosted macOS | Analyze, Test (with coverage), Publish dry run | +| `push_checks_android.yml` | zstandard_android | self-hosted macOS | Analyze, Test (with coverage), Publish dry run | +| `push_checks_ios.yml` | zstandard_ios | self-hosted macOS | Analyze, Test (with coverage), Publish dry run | +| `push_checks_macos.yml` | zstandard_macos | self-hosted macOS | Analyze, Test (with coverage), Publish dry run | +| `push_checks_linux.yml` | zstandard_linux | self-hosted Linux | Analyze, Test (with coverage), Publish dry run | +| `push_checks_windows.yml` | zstandard_windows | self-hosted Windows | Analyze, Test (with coverage), Publish dry run | +| `push_checks_web.yml` | zstandard_web | self-hosted macOS | Analyze, Test (with coverage), Publish dry run | +| `push_checks_cli.yml` | zstandard_cli | self-hosted macOS | Analyze, Test (with coverage), Publish dry run | +| `push_checks_platform_interface.yml` | zstandard_platform_interface | self-hosted macOS | Analyze, Test (with coverage), Publish dry run | + +**Branches excluded** from running these checks: `develop`, `release/**`, `hotfix/**`, `master`. + +**Concurrency**: Only the latest run per branch/PR is kept; in-progress runs are cancelled when a new push occurs. + +### Coverage + +- Flutter packages: `flutter test --coverage` produces `coverage/lcov.info` in the package directory. +- CLI package: `dart test --coverage=coverage` then `dart run coverage:format_coverage` to produce lcov. +- Coverage is uploaded to **Codecov** (or similar) via the `codecov/codecov-action@v4` step when the workflow runs. Upload failure does not fail the job (`fail_ci_if_error: false`). + +## Release workflow + +**File**: [`.github/workflows/release_workflow.yml`](https://github.com/landamessenger/zstandard/blob/master/.github/workflows/release_workflow.yml) + +Triggered manually (**workflow_dispatch**) with inputs such as version, title, changelog, and optional issue reference. + +**Main phases**: + +1. **Update files**: Bump version and dependency versions in all packages; copy CHANGELOG; commit. +2. **Build precompiled CLI libraries** (on platform-specific runners): + - **macOS**: Clone facebook/zstd, build Intel and ARM64 libs, merge with `lipo` into a universal `libzstandard_macos.dylib`; commit. + - **Linux**: Clone zstd, build x64 and ARM64 `.so`; commit. + - **Windows**: Clone zstd, build x64 and ARM64 DLLs; commit. +3. **Tag and release**: Create git tag (e.g. `v1.3.30`) and GitHub release with changelog. +4. **Publish**: Publish packages to pub.dev in dependency order (platform_interface → platform implementations → zstandard → zstandard_cli). + +The workflow uses **self-hosted** runners for macOS, Linux, and Windows to build native binaries and run platform-specific steps. + +## Hotfix workflow + +**File**: [`.github/workflows/hotfix_workflow.yml`](https://github.com/landamessenger/zstandard/blob/master/.github/workflows/hotfix_workflow.yml) + +Used for expedited fixes (e.g. security or critical bugs). Typically triggered manually and may skip some steps or use a shorter path to release. See the workflow file and team docs for details. + +## Running checks locally + +To mimic CI locally: + +- **Analyze**: `flutter analyze` or `dart analyze` in each package. +- **Test**: `flutter test` or `dart test` in each package. +- **Test with coverage**: `flutter test --coverage` (Flutter) or `dart test --coverage=coverage` then format (CLI). +- **All packages**: Use the [test scripts](../../scripts/) (e.g. `./scripts/test_all.sh` or `scripts\test_all.bat`). + +## Build automation scripts + +Scripts under [**scripts/**](https://github.com/landamessenger/zstandard/tree/master/scripts) help build native libraries and run tests locally: + +- `build_macos.sh`, `build_linux.sh`, `build_windows.bat`: Build precompiled zstd libraries for the CLI. +- `build_android.sh`, `build_ios.sh`: Build or prepare the Android/iOS plugin. +- `sync_zstd_ios_macos.sh`: Sync the canonical zstd C source (`zstd/` at repo root) into the iOS and macOS plugin `Classes/zstd/` trees. +- `regenerate_bindings.sh`: Regenerate FFI bindings (ffigen) for all platform packages after zstd source updates. +- `test_all.sh` / `test_all.bat`: Run tests in all packages. +- `coverage_report.sh` / `coverage_report.bat`: Generate coverage reports. + +See the script contents and [Building](building.md) for requirements (CMake, NDK, Xcode, etc.). + +## See also + +- [Release process](release-process.md) +- [Testing](testing.md) +- [Building](building.md) diff --git a/docs/development/code-style.md b/docs/development/code-style.md new file mode 100644 index 0000000..fad76f2 --- /dev/null +++ b/docs/development/code-style.md @@ -0,0 +1,56 @@ +# Code Style + +This document describes the coding standards for the Zstandard project. Following these keeps the codebase consistent and maintainable. + +## Dart / Flutter Conventions + +- Follow the [official Dart style guide](https://dart.dev/guides/language/effective-dart/style). +- Use **flutter_lints** (or the project’s chosen lint set) and fix all reported issues. The project’s `pubspec.yaml` files typically include `flutter_lints: ^5.0.0` under `dev_dependencies`. +- Run `dart analyze` or `flutter analyze` from the package directory and fix analyzer warnings and errors. + +## Formatting + +- Use `dart format` (or `dart format .` in each package) to format code. The project may use a line length of 80 characters; match the existing code. +- Format before committing. Many projects enable format-on-save in the IDE. + +## Naming + +- **Classes**: `PascalCase` (e.g. `ZstandardPlatform`, `ZstandardLinux`). +- **Libraries and files**: `snake_case` (e.g. `zstandard_platform_interface.dart`, `zstandard_impl_native.dart`). +- **Variables, parameters, methods**: `camelCase` (e.g. `compressionLevel`, `getPlatformVersion`). +- **Constants**: `camelCase` or `lowerCamelCase` for const variables (e.g. `_token`, `_libName`). Use `lowerCamelCase` for const values that are not private. + +## Documentation + +- Add **dartdoc** comments to all public APIs (classes, methods, parameters, return values). See the plan’s dartdoc task and the [API docs](../api/main-api.md). +- Use `///` for documentation comments. Include a brief summary, parameter and return descriptions, and mention exceptions or null when relevant. +- Prefer linking to related APIs with `[ClassName]` or `[methodName]`. + +## Error Handling + +- Prefer returning `null` for recoverable failures (e.g. compress/decompress failure) when the API returns `Uint8List?`. +- Use `UnimplementedError` in abstract or default platform implementations for methods that must be overridden. +- Avoid swallowing errors; log or rethrow when appropriate, and document possible exceptions in dartdoc. + +## Imports + +- Order imports: Dart SDK, Flutter, then third-party packages, then project packages. Use alphabetical order within each group if the project does. +- Use `package:` imports for project packages (e.g. `package:zstandard_platform_interface/zstandard_platform_interface.dart`). + +## Platform and FFI Code + +- In FFI code, always free allocated memory in a `finally` block (or use the same pattern the project uses) to avoid leaks. +- Use the project’s existing patterns for opening the native library and for calling into C (e.g. generated bindings, error checking). + +## Tests + +- Use descriptive `group()` and `test()` names (e.g. `'compress and decompress roundtrip for large data'`). +- Prefer `setUp()` and `tearDown()` for shared initialization and cleanup. +- Mock the platform in unit tests rather than depending on a real native implementation when testing plugin logic. + +## Commit Messages + +- Use clear, imperative messages (e.g. "Add compression level validation", "Fix memory leak in Linux decompress"). +- Reference issues when applicable (e.g. "Fix #123: null check in extension"). + +Consistency with the existing codebase takes precedence when the style guide is silent; when in doubt, match surrounding code. diff --git a/docs/development/emulator-setup.md b/docs/development/emulator-setup.md new file mode 100644 index 0000000..1e47839 --- /dev/null +++ b/docs/development/emulator-setup.md @@ -0,0 +1,171 @@ +# Emulator and Simulator Setup + +This document describes how to set up Android emulators and iOS simulators for running integration tests locally and in CI. It applies to macOS hosts. + +## Android Emulator + +### Architecture (Apple Silicon vs Intel) + +On **Apple Silicon (M1/M2/M3)** the script uses an **arm64-v8a** system image so the emulator runs natively. Using x86_64 on ARM often causes the emulator process to exit immediately. On **Intel Macs** it uses **x86_64**. The AVD name includes the ABI (e.g. `zstandard_test_arm64` or `zstandard_test_x86_64`). + +### Prerequisites + +- **Android SDK**: Install via [Android Studio](https://developer.android.com/studio) or the [command-line tools](https://developer.android.com/studio#command-tools). Set `ANDROID_HOME` or `ANDROID_SDK_ROOT` to the SDK root (e.g. `~/Library/Android/sdk` on macOS). +- **Platform tools**: Include `adb` (usually in `$ANDROID_HOME/platform-tools`). +- **Emulator**: Install the "Android Emulator" package from SDK Manager. +- **System image**: The script installs the correct image for your CPU. On Apple Silicon you may need to install once: `sdkmanager --install "system-images;android-30;google_apis;arm64-v8a"`. + +### Script: `scripts/manage_android_emulator.sh` + +The repository provides a script to create, start, stop, and query the emulator: + +| Command | Description | +|------------|-------------| +| `create` | Create an AVD (default `zstandard_test_arm64` or `zstandard_test_x86_64` by arch, or `$ZSTANDARD_AVD_NAME`) if it does not exist. Uses the correct system image for your CPU (arm64-v8a on Apple Silicon, x86_64 on Intel). | +| `start` | Start the emulator in headless mode and wait for boot. Creates the AVD if missing. | +| `stop` | Stop the running emulator and clean up. | +| `status` | Print whether the emulator is running. | +| `device-id`| Print the device ID for use with `flutter test -d `. | + +**Environment variables:** + +- `ZSTANDARD_AVD_NAME`: AVD name (default: `zstandard_test_arm64` on arm64 Macs, `zstandard_test_x86_64` on Intel). +- `ZSTANDARD_AVD_API_LEVEL`: API level for the system image (default: `30`). +- `ZSTANDARD_AVD_BOOT_TIMEOUT`: Boot completion timeout in seconds (default: `240`). Increase on slow machines (e.g. `300`). +- `ZSTANDARD_AVD_DEVICE_READY_TIMEOUT`: Time to wait for the emulator to appear in `adb devices` (default: `60`). + +**Example:** + +```bash +# From repo root +export ANDROID_HOME=~/Library/Android/sdk +./scripts/manage_android_emulator.sh create # once +./scripts/manage_android_emulator.sh start +./scripts/test_android_integration.sh +./scripts/manage_android_emulator.sh stop +``` + +### Installing a system image + +If `create` fails because no system image is installed: + +```bash +$ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager "system-images;android-30;google_apis;x86_64" +``` + +Then run `./scripts/manage_android_emulator.sh create` again. + +### Troubleshooting + +- **"adb not found"**: Ensure `platform-tools` is installed and that `ANDROID_HOME` (or `ANDROID_SDK_ROOT`) is set. +- **"emulator not found"**: Install the Android Emulator package via SDK Manager. +- **Emulator does not boot**: Increase `ZSTANDARD_AVD_BOOT_TIMEOUT` or check that the system image matches the host (e.g. `x86_64` on Intel Macs; ARM images for Apple Silicon may be used where available). +- **"No device/emulator found"**: Run `./scripts/manage_android_emulator.sh start` and wait until `device-id` returns a value before running tests. +- **"Emulator did not boot within Xs"**: First boot can take several minutes. Set a higher timeout: `ZSTANDARD_AVD_BOOT_TIMEOUT=300 ./scripts/test_android_integration.sh`, or skip Android in the full suite: `ZSTANDARD_SKIP_ANDROID=1 ./scripts/test_all_integration.sh`. +- **"Emulator process died before device appeared"**: Usually means the wrong ABI for your Mac. On Apple Silicon you must use an **arm64-v8a** system image; the script now selects it automatically. If you had an old AVD (e.g. `zstandard_test` with x86_64), the script now uses a different AVD name per arch (`zstandard_test_arm64` / `zstandard_test_x86_64`). Install the image: `sdkmanager --install "system-images;android-30;google_apis;arm64-v8a"` (for M1/M2). Check the emulator log: `cat .android_emulator.log` in the repo root. + +--- + +## iOS Simulator + +### Prerequisites + +- **Xcode**: Install from the Mac App Store. Accept the license and install additional components if prompted. +- **Command Line Tools**: `xcode-select --install` if needed. Simulators are controlled via `xcrun simctl`. + +### Script: `scripts/manage_ios_simulator.sh` + +| Command | Description | +|------------|-------------| +| `start` | Boot a simulator (default device: `iPhone 16`, overridable with `ZSTANDARD_IOS_DEVICE`). | +| `stop` | Shut down the booted simulator. | +| `list` | List available simulators. | +| `status` | Print whether a simulator is booted. | +| `device-id`| Print the device ID (UUID or name) for `flutter test -d `. | + +**Environment variables:** + +- `ZSTANDARD_IOS_DEVICE`: Device name (e.g. `iPhone 16`, `iPhone 15`). Default: `iPhone 16`. +- `ZSTANDARD_IOS_BOOT_TIMEOUT`: Boot wait timeout in seconds (default: `60`). + +**Example:** + +```bash +./scripts/manage_ios_simulator.sh start +./scripts/test_ios_integration.sh +./scripts/manage_ios_simulator.sh stop # optional +``` + +### Troubleshooting + +- **"No available iPhone simulator"**: Open Xcode → Window → Devices and Simulators and download a simulator runtime, or run `xcrun simctl list devices available` to see what is installed. Adjust `ZSTANDARD_IOS_DEVICE` to a device you have (e.g. `iPhone 15`). +- **Simulator already booted**: The script will reuse the booted simulator. Use `device-id` to get the ID for `flutter test -d`. +- **Flutter cannot find device**: Ensure the simulator is booted and run `flutter devices` to confirm the device id; then pass that id to `flutter test integration_test/ -d `. + +--- + +## Web (Chrome + ChromeDriver) + +Web tests run in Chrome. The script runs both **unit tests** (`flutter test -d chrome`) and **integration tests** (`flutter drive` with a local web server and ChromeDriver). + +### Prerequisites + +- **Chrome**: Installed and on PATH. +- **ChromeDriver** (for integration tests only): Must be on PATH and listen on port **4444**. Flutter uses it to drive the browser for integration tests. + - Install: `brew install chromedriver` (macOS), or [download](https://googlechromelabs.github.io/chrome-for-testing/) a version that matches your Chrome. + - **macOS**: If a security popup says "chromedriver cannot be opened" or "Apple could not verify...", remove the quarantine attribute: + `xattr -d com.apple.quarantine "$(which chromedriver)"` + If that fails (e.g. symlink), use the real binary path (e.g. `/opt/homebrew/Caskroom/chromedriver//chromedriver-mac-arm64/chromedriver`). + - See [Flutter: Test in a web browser](https://docs.flutter.dev/testing/integration-tests#web). + +### Script: `scripts/test_web_integration.sh` + +From the repo root: + +```bash +./scripts/test_web_integration.sh +``` + +This runs: + +1. **Unit tests** in Chrome (`zstandard_web` package tests). +2. **Integration tests** via `flutter drive --target=integration_test/... -d web-server`, which starts a local server and uses ChromeDriver to control Chrome (headless). If ChromeDriver is not running on port 4444, the script tries to start it; if that fails, integration tests are skipped and the script still succeeds if unit tests passed. + +### Running integration tests manually + +If the script skips integration tests (ChromeDriver not available), start ChromeDriver in another terminal, then run the script again: + +```bash +# Terminal 1 +chromedriver --port=4444 + +# Terminal 2 (repo root) +./scripts/test_web_integration.sh +``` + +Or run only the integration tests from the example app: + +```bash +chromedriver --port=4444 & +cd zstandard_web/example +flutter drive \ + --driver=test_driver/integration_test.dart \ + --target=integration_test/zstandard_web_integration_test.dart \ + -d web-server --web-port=8080 +``` + +### Unit tests only (no ChromeDriver) + +From `zstandard_web`: + +```bash +flutter test -d chrome +``` + +--- + +## Performance tips + +- **Android**: Reuse a single emulator and avoid stopping it between test runs to save boot time. The test script can leave the emulator running; use `manage_android_emulator.sh stop` when done. +- **iOS**: Similarly, leaving the simulator booted between runs avoids repeated boot time. +- **CI**: Self-hosted runners with pre-created AVDs and simulators can reduce job time. Ensure `ANDROID_HOME` (or `ANDROID_SDK_ROOT`) and Xcode are configured on the runner. diff --git a/docs/development/release-process.md b/docs/development/release-process.md new file mode 100644 index 0000000..43a29e8 --- /dev/null +++ b/docs/development/release-process.md @@ -0,0 +1,68 @@ +# Release Process + +This document outlines how releases of the Zstandard plugin and CLI are prepared and published. The project uses a centralized version and CHANGELOG across all packages. + +## Versioning + +- All packages (zstandard, zstandard_platform_interface, zstandard_android, zstandard_ios, zstandard_macos, zstandard_linux, zstandard_windows, zstandard_web, zstandard_cli) share the **same version number** (e.g. 1.3.29). +- Follow [semantic versioning](https://semver.org/): MAJOR.MINOR.PATCH. Bump: + - **MAJOR** for incompatible API changes. + - **MINOR** for new backward-compatible features. + - **PATCH** for backward-compatible bug fixes. + +## Pre-Release Checklist + +- [ ] All tests pass (`flutter test` / `dart test` in each package). +- [ ] `flutter analyze` (or `dart analyze`) reports no errors in the packages you are releasing. +- [ ] CHANGELOG.md is updated with user-facing changes for the release. +- [ ] Version in root and in each package’s `pubspec.yaml` is updated to the new version. +- [ ] Inter-package dependencies use the new version (e.g. `zstandard_android` depends on `zstandard_platform_interface: ^x.y.z`). + +## Release Workflow (CI) + +The project uses a **Release** workflow (e.g. GitHub Actions “Task - Release”) that: + +1. **Validates** that the release version and tag do not already exist. +2. **Copies** CHANGELOG.md into each package. +3. **Updates** `version:` and dependency versions in every package’s `pubspec.yaml`. +4. **Publishes** packages to pub.dev (order may be: platform_interface first, then platform implementations, then main plugin, then CLI). +5. **Creates** a git tag (e.g. `v1.3.29`) and possibly a GitHub release. + +The workflow is typically triggered manually (workflow_dispatch) with inputs such as: + +- **version**: e.g. `1.3.30` +- **title**: Release title +- **changelog**: Summary of changes +- **issue**: Optional launcher issue reference + +## Manual Steps (if not using full automation) + +If you need to release without the full workflow: + +1. Update **CHANGELOG.md** at the repo root with the new version and list of changes. +2. Update **version** in every package’s **pubspec.yaml** to the new version. +3. Update **dependency versions** in each package that depends on another (e.g. `zstandard_android` depends on `zstandard_platform_interface: ^X.Y.Z` — set to the new version). +4. Copy **CHANGELOG.md** into each package’s directory if the project keeps a copy per package. +5. **Publish** in dependency order: + - `zstandard_platform_interface` + - Platform packages (android, ios, macos, linux, windows, web) + - `zstandard` + - `zstandard_cli` +6. **Tag** the release: `git tag vX.Y.Z` and push the tag. +7. **Create** a GitHub release from the tag and paste the changelog. + +## Publishing to pub.dev + +- Use `dart pub publish` (or `flutter pub publish`) from each package directory. Confirm the package name and version when prompted. +- Ensure you are logged in (`dart pub login`) and have permissions to publish the package. +- Publish in order so that dependencies are available: platform_interface first, then platform implementations, then zstandard, then zstandard_cli. + +## After Release + +- Bump the development version in `pubspec.yaml` files if the project uses a separate “next” version (e.g. 1.3.30+1 or 1.4.0-dev). +- Add an “Unreleased” or “Next” section in CHANGELOG.md for the next release. +- Announce the release (e.g. GitHub release notes, changelog link) as appropriate. + +## Hotfixes + +For critical fixes, the project may use a **hotfix** workflow (see `.github/workflows/hotfix_workflow.yml` and issue templates). Follow the same versioning and publish order; use a PATCH bump (e.g. 1.3.29 → 1.3.30). diff --git a/docs/development/setup.md b/docs/development/setup.md new file mode 100644 index 0000000..9860c12 --- /dev/null +++ b/docs/development/setup.md @@ -0,0 +1,97 @@ +# Development Setup + +This guide describes how to set up your machine to develop and contribute to the Zstandard plugin and CLI. + +## Prerequisites + +- **Dart SDK**: ^3.6.0 (see each package’s `pubspec.yaml` for the exact constraint). +- **Flutter SDK**: >=3.3.0 (for plugin and example apps). +- **Git**: To clone and work with the repository. + +Optional, for native work: + +- **Android**: Android Studio / SDK and NDK if you modify Android native code. +- **iOS/macOS**: Xcode and CocoaPods if you modify iOS/macOS native code. +- **Linux**: CMake and a C compiler (gcc/clang) for building the Linux plugin. +- **Windows**: CMake and Visual Studio Build Tools (or equivalent) for building the Windows plugin. +- **Web**: Node/npm optional; Emscripten is needed only if you rebuild zstd.js/zstd.wasm. + +## Clone the Repository + +```bash +git clone https://github.com/landamessenger/zstandard.git +cd zstandard +``` + +If you use a fork: + +```bash +git remote add upstream https://github.com/landamessenger/zstandard.git +``` + +## Get Dependencies + +From the repository root, fetch dependencies for all packages: + +```bash +flutter pub get +``` + +Then, for each package you will work on: + +```bash +cd zstandard +flutter pub get + +cd ../zstandard_platform_interface +flutter pub get + +cd ../zstandard_android # or ios, macos, linux, windows, web +flutter pub get + +cd ../zstandard_cli +dart pub get +``` + +Or use a script if the project provides one to run `pub get` in all packages. + +## IDE Setup + +- **VS Code / Cursor**: Install the Dart and Flutter extensions. Open the repo root so that all packages are visible. +- **Android Studio / IntelliJ**: Install the Flutter plugin and open the repo root. Ensure the Dart SDK is configured. + +## Verifying the Setup + +1. **Analyze**: From the repo root, run: + ```bash + flutter analyze + ``` + Fix any reported issues in the packages you touch. + +2. **Tests**: Run tests for the main package and the one you are changing: + ```bash + cd zstandard && flutter test + cd ../zstandard_cli && dart test + ``` + See [Testing](testing.md) for full test commands. + +3. **Example app**: Run the example app for your target platform to confirm the plugin works: + ```bash + cd zstandard/example + flutter run + ``` + Choose the desired platform (e.g. Android, iOS, macOS, Windows, Linux, web). + +## Platform-Specific Notes + +- **Android**: Ensure `ANDROID_HOME` is set and that an emulator or device is available. +- **iOS/macOS**: Ensure Xcode and CocoaPods are installed. Run `pod install` in the example’s `ios/` or `macos/` if needed. +- **Linux**: Install CMake and the build essentials; the Linux plugin’s CMake will build the native library. +- **Windows**: Ensure CMake and a C++ toolchain are available; the Windows plugin’s CMake will build the DLL. +- **Web**: No native build required for running the app; ensure `zstd.js` and `zstd.wasm` are in the example’s `web/` if you run the web example. + +## Next Steps + +- [Building](building.md) — How to build the plugin and native code. +- [Testing](testing.md) — How to run and write tests. +- [Code Style](code-style.md) — Coding standards. diff --git a/docs/development/testing.md b/docs/development/testing.md new file mode 100644 index 0000000..d4ca44e --- /dev/null +++ b/docs/development/testing.md @@ -0,0 +1,178 @@ +# Testing Guidelines + +This document describes how to run and write tests for the Zstandard plugin and CLI. + +## Integration Tests for Platform Packages + +Platform-specific packages (Android, iOS, macOS, Web) use **integration tests** that run on real devices, emulators, or Chrome. This gives full coverage without skips. + +- **Android**: Tests run in `zstandard_android/example/integration_test/` on an Android emulator. +- **iOS**: Tests run in `zstandard_ios/example/integration_test/` on an iOS simulator. +- **macOS**: Tests run in `zstandard_macos/example/integration_test/` after the native framework is built. +- **Web**: Unit tests run with `flutter test -d chrome` (Chrome required; no VM execution). + +Linux and Windows tests still run only on their native OS in CI. + +### Prerequisites (macOS) + +1. **Android**: Android SDK with emulator (API 28+). Set `ANDROID_HOME` or `ANDROID_SDK_ROOT`. +2. **iOS**: Xcode with simulators installed. +3. **macOS**: Xcode command-line tools. +4. **Web**: Chrome browser. + +See [Emulator and simulator setup](emulator-setup.md) for details. + +### Running all tests (no skipeos) + +From the repository root: + +```bash +./scripts/test_all_integration.sh +``` + +This runs unit tests for pure Dart packages, then integration tests for Android (emulator), iOS (simulator), macOS (after building the framework), and Web (Chrome). + +### Running individual platform tests + +**Android** (starts emulator if needed; first boot can take 2–4 minutes): + +```bash +./scripts/test_android_integration.sh +``` + +To skip Android when running the full suite (e.g. if no emulator or slow machine): `ZSTANDARD_SKIP_ANDROID=1 ./scripts/test_all_integration.sh`. To allow more time for boot: `ZSTANDARD_AVD_BOOT_TIMEOUT=300 ./scripts/test_android_integration.sh`. + +**iOS** (boots simulator if needed): + +```bash +./scripts/test_ios_integration.sh +``` + +**macOS** (builds framework if needed): + +```bash +./scripts/ensure_macos_framework.sh +cd zstandard_macos/example +flutter test integration_test/ -d macos +``` + +**Web** (Chrome): + +```bash +./scripts/test_web_integration.sh +``` + +## Running Tests + +### Main plugin (zstandard) + +```bash +cd zstandard +flutter test +``` + +### Platform interface + +```bash +cd zstandard_platform_interface +flutter test +``` + +### Platform implementations + +- **Android, iOS, macOS**: Platform tests live in each package’s `example/integration_test/`. Use the scripts above (e.g. `./scripts/test_android_integration.sh`). The package `test/` directory only contains a pointer test. +- **Linux, Windows**: From the package directory, `flutter test` (run on the corresponding OS). +- **Web**: From `zstandard_web`, run `flutter test -d chrome` (Chrome required). + +### CLI package + +```bash +cd zstandard_cli +dart test +``` + +### All packages (quick run, may skip platform-specific tests) + +```bash +./scripts/test_all.sh +``` + +Use `./scripts/test_all_integration.sh` for full coverage without skipeos (see above). + +### Integration tests (main plugin) + +The main plugin’s example app also has integration tests: + +```bash +cd zstandard/example +flutter test integration_test/ +``` + +Use `-d ` to run on a specific device or simulator. + +## Test Structure + +- **Unit tests**: In each package’s `test/` directory. Use `test()` and `group()` from `package:test` or `package:flutter_test`. Mock the platform when testing the main plugin or platform interface. For Android, iOS, macOS, and Web, the main platform tests have been moved to integration tests. +- **Integration tests**: In each platform example’s `integration_test/` directory. They run on a real device, emulator, simulator, or Chrome and exercise the full native/WASM stack with no skipeos. + +## Writing Tests + +### Platform interface + +- **Contract tests**: Verify that the default implementation (method channel) throws `UnimplementedError` for `compress` and `decompress` if not overridden. Verify `getPlatformVersion` behavior when a mock is set. +- **Mock platform**: Implement a fake `ZstandardPlatform` that returns deterministic results and verify that the main plugin (or code under test) behaves correctly when this mock is set as `ZstandardPlatform.instance`. + +### Main plugin + +- **Singleton**: Verify that `Zstandard()` returns the same instance. +- **Compress/decompress**: With a mock platform, verify that `compress` and `decompress` forward to the platform and return the platform’s result. +- **Extensions**: Test `Uint8List?.compress()` and `decompress()` with null and non-null receiver, and with a mock platform. + +### Platform implementations (native) + +For Android, iOS, and macOS, platform tests are **integration tests** in `example/integration_test/`. They include: + +- **Compression roundtrip**: Small, large, and empty input. +- **Compression levels**: 1, 3, 10, 22. +- **Error handling**: Corrupted or random bytes for decompress; expect `null` or appropriate handling. +- **Edge cases**: Empty input, highly compressible data, large data. +- **Property-based tests**: Roundtrip property with generative input (e.g. kiri_check). +- **Leak tracking**: Where applicable, ensure no leaks after compress/decompress. + +Linux and Windows keep unit tests in `test/` that run only when the host OS matches. Web tests run in Chrome via `flutter test -d chrome`. + +### CLI + +- The existing tests in `zstandard_cli/test/` are a good reference: small/large/empty data, repeated values, min/max compression level. Add tests for invalid compression levels and platform detection if desired. + +## Coverage + +To collect coverage (when supported): + +```bash +cd zstandard +flutter test --coverage +``` + +View the generated `coverage/lcov.info` with a tool like `lcov` or your IDE. Aim for high coverage on the main plugin and platform interface; platform-specific code may have lower coverage when run on a single host. + +## Mutation testing + +Mutation testing measures test quality by mutating source code and checking whether tests detect the changes. A mutation score of 90% or above is required. + +- **Config**: `mutation_test_config.xml` at repo root (Flutter packages) or `zstandard_cli/mutation_test_config.xml` (CLI). +- **Run for one package** (from repo root): `cd && dart run mutation_test ../mutation_test_config.xml` (use `mutation_test_config.xml` for zstandard_cli). +- **Run for all packages**: `./scripts/run_mutation_test.sh all` (takes a long time). +- **Threshold**: Failure if mutation score < 90%. + +## CI + +CI runs platform-specific tests as follows: + +- **Android**: Starts an emulator, runs `zstandard_android/example` integration tests, then stops the emulator. +- **iOS**: Boots a simulator, runs `zstandard_ios/example` integration tests. +- **macOS**: Builds the native framework (if needed), then runs `zstandard_macos/example` integration tests. +- **Web**: Runs `flutter test -d chrome` in the `zstandard_web` package. +- **Linux / Windows**: Run `flutter test` on their respective runners. + +Ensure your changes do not break these jobs. Add new tests for new behavior and fix any failing tests before submitting a PR. diff --git a/docs/guides/advanced-usage.md b/docs/guides/advanced-usage.md new file mode 100644 index 0000000..5df4823 --- /dev/null +++ b/docs/guides/advanced-usage.md @@ -0,0 +1,124 @@ +# Advanced Usage + +This guide covers patterns for large data, chunking, concurrent compression, and memory optimization when using the Zstandard plugin and CLI. + +## Large file handling + +The plugin API works on in-memory buffers: `compress(Uint8List)` and `decompress(Uint8List)`. For very large files, loading the entire file into memory may be impractical. Use **chunking**: process the file in fixed-size chunks and compress or decompress each chunk separately. + +### Chunked compression + +1. Read a chunk of the file (e.g. 256 KB or 1 MB). +2. Compress the chunk with `compress(chunk, level)`. +3. Store the compressed chunk (e.g. write length then bytes so you can read back later). +4. Repeat until the file is done. + +```dart +Future compressFileChunked(String path, String outPath, int chunkSize) async { + final file = File(path); + final out = File(outPath); + final z = Zstandard(); + final data = await file.readAsBytes(); + final sink = out.openWrite(); + for (var offset = 0; offset < data.length;) { + final end = (offset + chunkSize).clamp(0, data.length); + final chunk = Uint8List.sublistView(data, offset, end); + offset = end; + final compressed = await z.compress(chunk, 3); + if (compressed == null) throw Exception('Compression failed'); + // Write length (4 bytes) then data + sink.add(ByteData(4)..setUint32(0, compressed.length, Endian.big)..buffer.asUint8List()); + sink.add(compressed); + } + await sink.close(); +} +``` + +For very large files, avoid loading the whole file with `readAsBytes()`; instead read in a loop using `RandomAccessFile.read()` with a fixed buffer size and compress each chunk. + +### Chunked decompression + +If you stored chunks as [length, bytes, length, bytes, ...], read back the same way: + +1. Read 4 bytes (length). +2. Read that many bytes (one compressed chunk). +3. Call `decompress(chunk)` and use or write the result. +4. Repeat until the stream ends. + +```dart +Future decompressFileChunked(String path, String outPath) async { + final file = File(path); + final out = File(outPath); + final z = Zstandard(); + final bytes = await file.readAsBytes(); + final sink = out.openWrite(); + int offset = 0; + + while (offset + 4 <= bytes.length) { + final length = ByteData.view(bytes.buffer, bytes.offsetInBytes + offset, 4).getUint32(0, Endian.big); + offset += 4; + if (offset + length > bytes.length) break; + final chunk = Uint8List.sublistView(bytes, offset, offset + length); + offset += length; + final decompressed = await z.decompress(chunk); + if (decompressed == null) throw Exception('Decompression failed'); + sink.add(decompressed); + } + await sink.close(); +} +``` + +### Chunk size trade-offs + +- **Smaller chunks** (e.g. 64–256 KB): Lower peak memory, more overhead (frame headers per chunk), slightly worse ratio. +- **Larger chunks** (e.g. 1–4 MB): Better ratio, higher peak memory per chunk. +- Choose a size that fits your memory budget and performance needs. + +## Streaming and chunking + +The plugin does not expose a streaming API. To get streaming-like behaviour: + +- **Producer**: Read input in chunks (file, network, etc.), compress each chunk, and send or write the compressed chunks (e.g. with length prefix as above). +- **Consumer**: Read length-prefixed compressed chunks, decompress each with `decompress()`, and process or write the decompressed bytes. + +This gives you control over memory and back-pressure at the application level. + +## Concurrent compression + +You can run multiple `compress` or `decompress` calls in parallel (e.g. for different chunks or different inputs). The singleton `Zstandard()` is safe to use from multiple isolates or concurrent futures. + +```dart +final z = Zstandard(); +final futures = chunks.map((c) => z.compress(c, 3)); +final results = await Future.wait(futures); +``` + +- **Pros**: Better CPU utilization on multi-core devices; useful when processing many chunks or files. +- **Cons**: Peak memory increases with the number of concurrent operations. Limit concurrency (e.g. with a pool or `Future.wait` on batches) to avoid OOM. + +## Memory optimization + +1. **Limit concurrency**: Process a fixed number of chunks at a time instead of all at once. +2. **Reuse buffers**: Where possible, reuse `Uint8List` buffers for reading chunks instead of allocating new ones every time. +3. **Choose level**: Lower levels (1–3) use less memory than high levels (19–22). Use lower levels for large data if memory is tight. +4. **Chunk size**: Smaller chunks reduce peak memory but increase overhead; tune for your environment. +5. **Release references**: After writing or sending a compressed/decompressed chunk, let the reference go so the GC can reclaim it before the next chunk. + +## Web platform + +On web, compression and decompression run on the main thread (no isolates). For large data: + +- Prefer smaller chunks to keep the UI responsive. +- Consider moving work to a Web Worker and calling the same API from there if you run Dart in the worker. +- Lower compression levels reduce CPU time and improve responsiveness. + +## CLI and batch processing + +The CLI compresses or decompresses whole files. For very large files, split them first (e.g. with `split` on Unix or a custom script), compress each part with the CLI, then reassemble and decompress on the other side. Alternatively, use the Flutter plugin in a small Dart script with the chunked patterns above for full control. + +## See also + +- [Performance tips](performance-tips.md) +- [Compression levels](compression-levels.md) +- [Security](security.md) — validating and limiting input size +- [Error handling](error-handling.md) diff --git a/docs/guides/best-practices.md b/docs/guides/best-practices.md new file mode 100644 index 0000000..1ec1355 --- /dev/null +++ b/docs/guides/best-practices.md @@ -0,0 +1,101 @@ +# Best Practices + +This guide summarizes recommended practices, a production checklist, and common anti-patterns when using the Zstandard plugin and CLI. + +## Do's + +1. **Check for null** after every `compress` and `decompress` when the result is used. Null means failure; handle it (log, retry, or show an error). +2. **Use a valid compression level** (1–22). Clamp or validate user input to this range for portability. +3. **Reuse the Zstandard instance** — it is a singleton; no need to cache it yourself. +4. **Limit input size** when processing user or untrusted data to avoid excessive memory use and potential abuse. +5. **Prefer extension methods** when you have a single value: `data.compress()`, `compressed?.decompress()` for clearer null-aware code. +6. **Use chunking for large data** (see [Advanced usage](advanced-usage.md)) to control memory and keep the UI responsive. +7. **Choose the right level**: level 3 for general use; 1 for speed; 10+ for size when CPU and time allow. +8. **Validate decompressed content** when data comes from untrusted sources; the API only guarantees valid zstd output, not safe application-level content. +9. **Run tests and analyze** before release: `flutter test`, `flutter analyze` (or `dart test` / `dart analyze` for the CLI package). +10. **Pin package versions** in `pubspec.yaml` (e.g. `zstandard: ^1.3.0`) and update in a controlled way. + +## Don'ts + +1. **Don't ignore null results** — using a null result as if it were data can lead to crashes or wrong behaviour. +2. **Don't use compression levels outside 1–22** — behaviour is implementation-defined and may differ by platform. +3. **Don't decompress untrusted data without a size limit** — cap input size to what you are willing to allocate. +4. **Don't assume decompress throws** on invalid input — it typically returns null; handle null. +5. **Don't load entire very large files into memory** if you can avoid it; use chunked reading and compression. +6. **Don't run many concurrent compress/decompress operations** without limiting concurrency; memory usage can grow quickly. +7. **Don't rely on platform-specific behaviour** (e.g. specific error messages or edge-case handling); stick to the documented API and null semantics. +8. **Don't skip dependency updates** indefinitely; update periodically and run tests to catch breaking changes. + +## Production checklist + +Before shipping an app or script that uses zstandard: + +- [ ] **Error handling**: All `compress`/`decompress` call sites check for null and handle failure. +- [ ] **Compression level**: Level is validated (1–22) when it comes from config or user input. +- [ ] **Input size**: Limits applied for user or untrusted data (e.g. max decompress size, max compress size). +- [ ] **Large data**: Very large payloads use chunking or size limits to avoid OOM. +- [ ] **Platforms**: App is tested on every platform you support (Android, iOS, web, etc.). +- [ ] **Dependencies**: `flutter pub get` / `dart pub get` and `flutter pub upgrade` run; no unexpected breakages. +- [ ] **Analytics or logging**: Null results or failures logged or reported where appropriate. +- [ ] **Security**: No sensitive data logged in raw form; untrusted data handled as in [Security](security.md). +- [ ] **Performance**: Compression level and chunk size chosen for your latency and memory constraints. + +## Anti-patterns + +**Ignoring null** + +```dart +// Bad +final c = await z.compress(data, 3); +await send(c!); // Can throw if compress failed + +// Good +final c = await z.compress(data, 3); +if (c == null) return handleError(); +await send(c); +``` + +**Unbounded decompression** + +```dart +// Bad: no size limit on untrusted input +final d = await z.decompress(userBytes); + +// Good: reject or cap size before calling +if (userBytes.length > maxDecompressSize) return reject(); +final d = await z.decompress(userBytes); +``` + +**Assuming exceptions** + +```dart +// Bad: decompress returns null on invalid data, does not throw +try { + final d = await z.decompress(badBytes); + use(d); // d may be null +} catch (e) { ... } + +// Good +final d = await z.decompress(badBytes); +if (d == null) return handleInvalid(); +use(d); +``` + +**Unvalidated level** + +```dart +// Bad +final level = int.parse(userInput); +await z.compress(data, level); + +// Good +final level = int.parse(userInput).clamp(1, 22); +await z.compress(data, level); +``` + +## See also + +- [Error handling](error-handling.md) +- [Security](security.md) +- [Advanced usage](advanced-usage.md) +- [Performance tips](performance-tips.md) diff --git a/docs/guides/compression-levels.md b/docs/guides/compression-levels.md new file mode 100644 index 0000000..1cf7f95 --- /dev/null +++ b/docs/guides/compression-levels.md @@ -0,0 +1,42 @@ +# Compression Levels + +Zstandard supports compression levels from **1** to **22**. Higher levels give better compression ratio but are slower and use more memory. + +## Summary + +| Level | Speed | Ratio | Typical use | +|-------|----------|---------|---------------------------| +| 1 | Fastest | Lowest | Real-time, low latency | +| 3 | Fast | Good | **Default**; general use | +| 5–9 | Medium | Better | Balanced | +| 10–19 | Slower | High | Storage, archival | +| 20–22 | Slowest | Highest | Maximum ratio | + +## Choosing a level + +- **Default (3)**: Use when you don’t have special requirements. Good balance of speed and ratio. +- **Level 1**: When speed matters more than size (e.g. real-time or interactive). +- **Level 10–19**: When you care about size and can afford more CPU (e.g. backups, assets). +- **Level 22**: When you want the smallest output and time is not critical. + +## In code + +```dart +// Default (level 3) via extension +final c1 = await data.compress(); + +// Explicit level +final c2 = await data.compress(compressionLevel: 1); +final c3 = await zstandard.compress(data, 19); +``` + +## Notes + +- Invalid levels (e.g. < 1 or > 22) may be accepted or rejected depending on the platform; stick to 1–22 for portability. +- Compression level does not affect decompression; decompression speed is largely independent of the level used to compress. +- For very small inputs, the compressed size may be larger than the input; level has limited impact there. + +## See also + +- [Performance tips](performance-tips.md) +- [API — Main](../api/main-api.md) diff --git a/docs/guides/error-handling.md b/docs/guides/error-handling.md new file mode 100644 index 0000000..101cd9f --- /dev/null +++ b/docs/guides/error-handling.md @@ -0,0 +1,57 @@ +# Error Handling + +This guide describes how errors and edge cases are represented when using the Zstandard plugin and CLI, and how to handle them in your code. + +## Null as failure + +The main plugin and CLI return `Future` for compress and decompress. A **null** result means the operation failed (e.g. compression error, corrupted or invalid input for decompression). + +Always check for null before using the result: + +```dart +final compressed = await zstandard.compress(data, 3); +if (compressed == null) { + // Compression failed; log or show an error + return; +} + +final decompressed = await zstandard.decompress(compressed); +if (decompressed == null) { + // Decompression failed; data may be corrupted or not zstd + return; +} +``` + +## Extension methods and null + +- Calling `compress()` or `decompress()` on a **null** `Uint8List?` returns **null** (no throw). +- If the underlying operation fails, the Future completes with **null**. + +```dart +Uint8List? maybeData = ...; +final compressed = await maybeData.compress(); // null if maybeData is null +final decompressed = await compressed?.decompress(); // null if compressed is null or decompress fails +``` + +## Invalid input + +- **Decompression**: Passing data that is not a valid Zstandard frame (e.g. random bytes, truncated data) typically results in a **null** return. The plugin does not throw in this case. +- **Compression**: Invalid compression level (e.g. out of range) may or may not be validated by the implementation; behavior can differ by platform. Use levels 1–22 for portability. + +## Exceptions + +- **UnimplementedError**: Thrown by the default platform implementation (method channel) when `compress` or `decompress` is called without a registered native implementation. In normal use with the full plugin and a supported platform, this should not occur. +- **MissingPluginException**: Can occur if the method channel is used but no implementation is registered (e.g. in tests). Register a mock platform or the real implementation to avoid it. +- **DynamicLibrary loading**: On native platforms, if the zstd library fails to load, the first FFI call may throw. Ensure the app is built and run on a supported platform/architecture. + +## Best practices + +1. **Check null** after every `compress` and `decompress` when failure is possible. +2. **Use null-safe chains** with extensions: `compressed?.decompress()`. +3. **Log or report** null results in production (e.g. analytics, user message) instead of ignoring them. +4. **Validate input** when it comes from untrusted sources (e.g. file upload); invalid data will usually yield null on decompress. + +## See also + +- [API — Main](../api/main-api.md) +- [Troubleshooting — Common issues](../troubleshooting/common-issues.md) diff --git a/docs/guides/getting-started.md b/docs/guides/getting-started.md new file mode 100644 index 0000000..dc2daab --- /dev/null +++ b/docs/guides/getting-started.md @@ -0,0 +1,81 @@ +# Getting Started + +This guide gets you from zero to compressing and decompressing data with the Zstandard Flutter plugin in a few minutes. + +## Add the dependency + +In your Flutter app’s `pubspec.yaml`: + +```yaml +dependencies: + zstandard: ^1.3.29 +``` + +Then run: + +```bash +flutter pub get +``` + +## Basic usage + +Import and use the main class: + +```dart +import 'package:zstandard/zstandard.dart'; + +void main() async { + final zstandard = Zstandard(); + final data = Uint8List.fromList([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + + // Compress with default-like level (e.g. 3) + final compressed = await zstandard.compress(data, 3); + if (compressed == null) return; // handle failure + + // Decompress + final decompressed = await zstandard.decompress(compressed); + if (decompressed != null && listEquals(data, decompressed)) { + print('Roundtrip OK'); + } +} +``` + +Or use the extension methods on `Uint8List?`: + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## Command-line (CLI) + +For a pure Dart app on macOS, Windows, or Linux (no Flutter): + +```yaml +dependencies: + zstandard_cli: ^1.3.29 +``` + +```dart +import 'package:zstandard_cli/zstandard_cli.dart'; + +void main() async { + final data = Uint8List.fromList([1, 2, 3, 4, 5]); + final compressed = await data.compress(compressionLevel: 3); + final decompressed = await compressed?.decompress(); +} +``` + +Or run the CLI from the shell: + +```bash +dart run zstandard_cli:compress myfile.txt 3 +dart run zstandard_cli:decompress myfile.txt.zstd +``` + +## Next steps + +- [Installation](installation.md) — Platform-specific setup (e.g. web assets). +- [Usage examples](usage-examples.md) — More examples and patterns. +- [Compression levels](compression-levels.md) — Choose the right level for speed vs ratio. +- [API — Main](../api/main-api.md) — Full API reference. diff --git a/docs/guides/installation.md b/docs/guides/installation.md new file mode 100644 index 0000000..aa9e0cc --- /dev/null +++ b/docs/guides/installation.md @@ -0,0 +1,68 @@ +# Installation + +This page covers platform-specific installation and setup for the Zstandard plugin and CLI. + +## Flutter app (all platforms) + +Add the main plugin to your app: + +```yaml +dependencies: + zstandard: ^1.3.29 +``` + +Run: + +```bash +flutter pub get +``` + +No extra steps are required for **Android, iOS, macOS, Windows, or Linux**; the federated plugin pulls in the right implementation and builds the native code when you build your app. + +## Web + +For **Flutter web**, you must add two assets and include a script: + +1. Copy **zstd.js** and **zstd.wasm** from the [zstandard_web](https://github.com/landamessenger/zstandard/tree/master/zstandard_web) package (e.g. from its `blob/` or as documented there) into your app’s **web/** directory (e.g. `web/zstd.js`, `web/zstd.wasm`). + +2. In **web/index.html**, include the script before your app loads: + +```html + + + + + + + + + +``` + +Without these, the web implementation will not work. See [Platforms — Web](../platforms/web.md) for details. + +## CLI (Dart only) + +For a **pure Dart** project (no Flutter) on macOS, Windows, or Linux: + +```yaml +dependencies: + zstandard_cli: ^1.3.29 +``` + +```bash +dart pub get +``` + +The package ships with precompiled native libraries; no extra installation is needed. See [Platforms — CLI](../platforms/cli.md). + +## Verifying installation + +- **Flutter**: Run your app on the desired platform and call `Zstandard().compress(data, 3)` and `decompress(compressed)`; check that you get a non-null result for valid input. +- **Web**: Ensure no console errors about `compressData`/`decompressData` or WASM loading. +- **CLI**: Run `dart test` inside the `zstandard_cli` package or use `dart run zstandard_cli:compress` in a project that depends on it. + +## See also + +- [Getting started](getting-started.md) +- [Platforms](../platforms/) — Per-platform details diff --git a/docs/guides/migration-guide.md b/docs/guides/migration-guide.md new file mode 100644 index 0000000..591297f --- /dev/null +++ b/docs/guides/migration-guide.md @@ -0,0 +1,38 @@ +# Migration Guide + +This page helps you upgrade between versions of the Zstandard plugin and CLI with minimal breakage. + +## General approach + +1. **Check the CHANGELOG** for the version you are upgrading to. Look for "Breaking changes" or "Deprecation". +2. **Update dependencies** in `pubspec.yaml` to the new version (e.g. `zstandard: ^1.3.30`). +3. **Run** `flutter pub get` (or `dart pub get` for CLI). +4. **Fix analyzer and tests**: run `flutter analyze` and `flutter test` (or `dart test`) and address any new errors or deprecations. +5. **Manually test** compress/decompress and platform-specific paths (e.g. web, Android) that you use. + +## Dependency version constraints + +- Prefer **caret** constraints for compatibility with patch updates: `zstandard: ^1.3.29`. +- If you must pin exact versions, use `zstandard: 1.3.29`. Prefer caret for easier upgrades. + +## API stability + +- The main plugin API (`Zstandard()`, `compress`, `decompress`, extensions on `Uint8List?`) is stable. New parameters are usually optional. +- The platform interface (`ZstandardPlatform`) is for implementors; application code should not depend on it directly. If you mock it in tests, check the CHANGELOG for any interface changes. + +## Platform interface changes + +If `ZstandardPlatform` gains new methods in a minor version, platform packages will implement them. As an app author you don’t need to change anything unless you implement or mock the platform yourself. + +## Web assets + +- If the web implementation’s required assets (zstd.js, zstd.wasm) or paths change, the CHANGELOG or zstandard_web README will note it. Update your `web/` copy and `index.html` script tag if needed. + +## CLI + +- CLI API (`ZstandardCLI`, extensions) is stable. Entry point names (e.g. `zstandard_cli:compress`) may be documented in the package README; check if they change. +- Precompiled native libraries are shipped with the package; no migration step unless you use custom builds. + +## Reporting issues + +If an upgrade breaks your app and the CHANGELOG doesn’t document it, open an issue with your previous version, new version, and a minimal repro. diff --git a/docs/guides/performance-tips.md b/docs/guides/performance-tips.md new file mode 100644 index 0000000..24ec44c --- /dev/null +++ b/docs/guides/performance-tips.md @@ -0,0 +1,39 @@ +# Performance Tips + +Suggestions to get the best performance and resource usage when using the Zstandard plugin and CLI. + +## Compression level + +- Use **level 1–3** when speed matters (e.g. real-time, interactive). Level 3 is the default and is a good balance. +- Use **level 10+** only when you care more about size than speed (e.g. one-off backups, archival). Higher levels use more CPU and memory. + +## Data size + +- **Small data** (e.g. < 100 bytes): Compression may not reduce size (zstd has frame overhead). Consider skipping compression for very small payloads. +- **Large data**: The plugin may use a background isolate on native platforms to avoid blocking the UI. For very large inputs (e.g. tens of MB), consider **chunking**: compress chunks and store/transmit separately, or use streaming if the API supports it in the future. +- **Empty data**: Handled quickly; no need to avoid. + +## Memory + +- Compress and decompress allocate buffers (input + output). For very large inputs, peak memory is roughly proportional to input size plus compressed/decompressed size. Chunking reduces peak usage. +- On native platforms, work may run in an isolate; the main isolate only holds the input and result bytes, which helps keep UI responsive. + +## Reuse + +- **Zstandard()** is a singleton; reusing it is efficient. No need to cache the instance yourself. +- **ZstandardCLI()**: Creating a new instance is cheap; the underlying native library is loaded once per process. + +## Platform-specific + +- **Web**: No isolates; compression/decompression run on the main thread. For large data on web, consider chunking or moving work to a Web Worker if you implement it. +- **Native (Android, iOS, macOS, Linux, Windows)**: The implementation may offload work to an isolate; you get non-blocking behavior without extra code. + +## Measuring + +- Use `Stopwatch` or your preferred profiler to measure compress/decompress time for your typical payload sizes and levels. +- Use the Dart DevTools or platform tools to observe memory usage if you suspect high allocation. + +## See also + +- [Compression levels](compression-levels.md) +- [Architecture — Isolate pattern](../architecture/isolate-pattern.md) diff --git a/docs/guides/security.md b/docs/guides/security.md new file mode 100644 index 0000000..30a82b7 --- /dev/null +++ b/docs/guides/security.md @@ -0,0 +1,68 @@ +# Security + +This guide covers security considerations when using the Zstandard plugin and CLI: input validation, handling untrusted data, memory safety, and how to report vulnerabilities. + +## Input validation + +### Compression + +- **Input data**: The plugin accepts `Uint8List` for compression. No inherent size limit is enforced by the API; very large inputs may cause high memory usage or platform-specific limits. Validate or cap input size in your application when processing user-controlled data. +- **Compression level**: Valid levels are **1–22**. Levels outside this range may be accepted by some implementations but can produce non-portable or unexpected behaviour. Always validate the level (e.g. clamp to 1–22) before calling `compress`. + +```dart +int safeLevel(int level) { + if (level < 1) return 1; + if (level > 22) return 22; + return level; +} +final compressed = await zstandard.compress(data, safeLevel(userLevel)); +``` + +### Decompression + +- **Untrusted compressed data**: Data that is not a valid Zstandard frame (random bytes, truncated data, or crafted payloads) will typically result in a **null** return from `decompress`, not an exception. The native zstd library is designed to fail safely on invalid input. +- **Bomb resistance**: Zstandard frames contain size information; the library uses bounded memory during decompression. Very large stored sizes in a malicious frame could still lead to large allocations. Prefer validating or limiting input size when decompressing data from untrusted sources. + +## Handling untrusted data + +When decompressing data from untrusted sources (network, user uploads, third-party files): + +1. **Check for null**: Always treat a null result as failure and do not use the result. + +```dart +final decompressed = await zstandard.decompress(receivedBytes); +if (decompressed == null) { + // Invalid or malicious input; reject + return; +} +// Only use decompressed after null check +``` + +2. **Limit input size**: Reject or refuse to decompress payloads above a size you are willing to allocate (e.g. cap at 10 MB or 100 MB depending on your use case). + +3. **Validate after decompression**: If the decompressed data has a known format (JSON, protocol buffer, etc.), validate it before use. The plugin only guarantees that the bytes are a valid zstd decompression result, not that the content is safe for your application. + +4. **Avoid trusting compressed size blindly**: If you expose decompressed size or progress to users, ensure it comes from the library’s result (e.g. length of the returned `Uint8List`) rather than from unvalidated metadata. + +## Memory safety + +- **Native code**: The plugin uses the official Zstandard C library via FFI (and WebAssembly on web). The library is widely used and maintained; buffer overflows and similar issues in zstd are addressed by upstream. +- **Dart/Flutter**: The Dart API uses `Uint8List`; no raw pointers are exposed. Memory is managed by the Dart VM and the native allocator used by zstd. +- **Large allocations**: Compression and decompression allocate memory proportional to input and output. To avoid out-of-memory conditions, limit input size and consider processing large files in chunks if supported by your workflow (see [Advanced usage](advanced-usage.md)). + +## Vulnerability reporting + +If you discover a security vulnerability in this plugin, its dependencies, or the way it uses the Zstandard library: + +1. **Do not** open a public GitHub issue for security-sensitive findings. +2. Report privately to the maintainers (e.g. via the repository’s contact or security policy, if stated). +3. Include a clear description, steps to reproduce, and impact if possible. +4. Allow a reasonable time for a fix before any public disclosure. + +For issues in the **upstream Zstandard library** (Facebook/Meta), follow the [Zstandard project’s security policy](https://github.com/facebook/zstd/security). + +## See also + +- [Error handling](error-handling.md) — null semantics and failure handling +- [Advanced usage](advanced-usage.md) — large data and memory considerations +- [Best practices](best-practices.md) — production checklist diff --git a/docs/guides/usage-examples.md b/docs/guides/usage-examples.md new file mode 100644 index 0000000..dc56449 --- /dev/null +++ b/docs/guides/usage-examples.md @@ -0,0 +1,107 @@ +# Usage Examples + +This page shows common usage patterns for the Zstandard plugin and CLI. + +## Flutter: compress and decompress + +```dart +import 'dart:typed_data'; +import 'package:zstandard/zstandard.dart'; + +Future roundtrip() async { + final data = Uint8List.fromList(List.generate(1000, (i) => i % 256)); + final z = Zstandard(); + + final compressed = await z.compress(data, 3); + if (compressed == null) return; + + final decompressed = await z.decompress(compressed); + assert(decompressed != null && listEquals(data, decompressed!)); +} +``` + +## Using extensions + +```dart +final data = Uint8List.fromList([1, 2, 3, 4, 5]); +final compressed = await data.compress(compressionLevel: 5); +final decompressed = await compressed?.decompress(); +``` + +## Different compression levels + +```dart +final data = Uint8List.fromList(List.filled(10000, 42)); + +// Fast, less compression +final fast = await data.compress(compressionLevel: 1); + +// Default balance +final balanced = await data.compress(compressionLevel: 3); + +// High compression +final high = await data.compress(compressionLevel: 19); +``` + +## Handling null and errors + +```dart +Uint8List? compressSafely(Uint8List? input) async { + if (input == null) return null; + final compressed = await input.compress(compressionLevel: 3); + return compressed; // may be null on failure +} + +void example() async { + final compressed = await compressSafely(someData); + if (compressed != null) { + final back = await compressed.decompress(); + if (back != null) { + // use back + } + } +} +``` + +## CLI in code + +```dart +import 'package:zstandard_cli/zstandard_cli.dart'; + +void main() async { + final cli = ZstandardCLI(); + final data = Uint8List.fromList([1, 2, 3, 4, 5]); + final compressed = await cli.compress(data, compressionLevel: 3); + final decompressed = await cli.decompress(compressed ?? Uint8List(0)); +} +``` + +## File compression (conceptual) + +Read file → compress → write; read compressed file → decompress → use: + +```dart +import 'dart:io'; +import 'package:zstandard/zstandard.dart'; + +Future compressFile(File file, File outFile) async { + final bytes = await file.readAsBytes(); + final compressed = await bytes.compress(compressionLevel: 3); + if (compressed != null) { + await outFile.writeAsBytes(compressed); + } +} + +Future decompressFile(File file) async { + final bytes = await file.readAsBytes(); + return bytes.decompress(); +} +``` + +For production, use streaming or chunking for large files to control memory use. + +## See also + +- [API — Main](../api/main-api.md) +- [Compression levels](compression-levels.md) +- [Error handling](error-handling.md) diff --git a/docs/platforms/android.md b/docs/platforms/android.md new file mode 100644 index 0000000..c36b5b1 --- /dev/null +++ b/docs/platforms/android.md @@ -0,0 +1,86 @@ +# Android Platform Guide + +The **zstandard_android** package provides the Android implementation of the Zstandard Flutter plugin using FFI and the native zstd library. + +## Support + +| Architecture | Support | +|--------------|---------| +| armeabi-v7a | As per Flutter/Android | +| arm64-v8a | Yes | +| x86_64 | Yes (e.g. emulators) | + +## Installation + +Add the main plugin to your app; the Android implementation is included via the federated plugin: + +```yaml +dependencies: + zstandard: ^1.3.29 +``` + +No additional Gradle or native setup is required for normal use. The plugin registers the Android implementation automatically when running on Android. + +## Architecture + +- **Native layer**: The facebook/zstd C library is built as part of the Android project (e.g. via CMake or Android NDK) and exposed as a shared library (e.g. `libzstandard_android_plugin.so`). +- **Dart layer**: The package uses Dart FFI to open the library and generated bindings to call `ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, and `ZSTD_getFrameContentSize`. +- **Isolates**: The implementation may use a helper isolate for async compression/decompression to avoid blocking the UI thread. + +## Usage + +Use the main package API; the Android implementation is used automatically: + +```dart +import 'package:zstandard/zstandard.dart'; + +final zstandard = Zstandard(); +final compressed = await zstandard.compress(data, 3); +final decompressed = await zstandard.decompress(compressed!); +``` + +Or use the extensions: + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## Building the Native Library + +If you are developing or modifying the zstandard_android package: + +1. The native zstd source is under `zstandard_android/src/` (or the project’s native path). +2. The Android build (e.g. `android/build.gradle`, CMake) compiles zstd and produces the shared library. +3. FFI bindings are generated (e.g. with `ffigen`) from the zstd headers and committed or generated at build time. + +See the package’s `android/` and `src/` directories and the main repo’s [Building](development/building.md) guide. + +## Testing + +- **Unit tests**: Run from the package directory: `flutter test` +- **Integration tests**: Use the example app: run the app on an Android device or emulator and execute `integration_test` (e.g. `flutter test integration_test/` from the example). + +## Performance characteristics + +- **Compression/decompression**: Runs on a background isolate by default, so the UI thread stays responsive. +- **Memory**: Peak usage is proportional to input size plus compressed/decompressed output; lower compression levels (1–3) use less memory than high levels (19–22). +- **Throughput**: Comparable to native zstd; actual speed depends on device CPU and thermal state. Level 1–3 are fastest; level 22 is slowest. +- **16KB page size**: The build supports Android 15+ 16KB page size; no extra configuration needed. + +## Known limitations + +- Requires a device/emulator with a supported ABI; otherwise the native library may fail to load. +- Very large inputs may use significant memory (input + output buffers); consider chunking for very large data (see [Advanced usage](../guides/advanced-usage.md)). +- Assembly optimizations (e.g. BMI2) are disabled in the Android build for compatibility; compression may be slightly slower than a fully optimized build. + +## Troubleshooting + +- **Library not found**: Ensure you are running on a supported ABI and that the plugin’s native library is built and packaged (e.g. `flutter build apk` or run from IDE). +- **Crashes on compress/decompress**: Check that input is valid and that you are not passing null where `Uint8List` is required; check [Common Issues](../troubleshooting/common-issues.md). + +## See Also + +- [Architecture — FFI Implementation](../architecture/ffi-implementation.md) +- [Architecture — Isolate Pattern](../architecture/isolate-pattern.md) +- [API — Main](../api/main-api.md) diff --git a/docs/platforms/cli.md b/docs/platforms/cli.md new file mode 100644 index 0000000..5c136a3 --- /dev/null +++ b/docs/platforms/cli.md @@ -0,0 +1,108 @@ +# CLI Platform Guide + +The **zstandard_cli** package provides Zstandard compression and decompression for **pure Dart** applications (no Flutter) on **macOS, Windows, and Linux**. It uses FFI with precompiled native zstd libraries and supports both in-code API and command-line entry points. + +## Support + +| Platform | x64 | arm64 | Precompiled | +|----------|-----|-------|-------------| +| macOS | Yes | Yes | Yes | +| Windows | Yes | Yes | Yes | +| Linux | Yes | Yes | Yes | + +## Installation + +Add the package to your Dart project (not Flutter): + +```yaml +dependencies: + zstandard_cli: ^1.3.29 +``` + +## Usage in Code + +```dart +import 'package:zstandard_cli/zstandard_cli.dart'; + +void main() async { + final cli = ZstandardCLI(); + final data = Uint8List.fromList([1, 2, 3, 4, 5]); + + final compressed = await cli.compress(data, compressionLevel: 3); + final decompressed = await cli.decompress(compressed ?? Uint8List(0)); +} +``` + +With extensions: + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## Command-Line Usage + +Compress a file with a given compression level: + +```bash +dart run zstandard_cli:compress +``` + +Example: `dart run zstandard_cli:compress myfile.txt 3` + +Decompress a file: + +```bash +dart run zstandard_cli:decompress +``` + +Example: `dart run zstandard_cli:decompress myfile.txt.zstd` + +Output file names and default paths are defined by the package (e.g. compressed files may get a `.zstd` suffix). See the package README for exact behavior. + +## Architecture + +- **Precompiled libraries**: The package ships with native zstd libraries per platform/architecture (e.g. in `lib/src/bin/` or similar). At runtime, the correct library is loaded based on the current platform and CPU architecture. +- **FFI**: Dart opens the library with `DynamicLibrary` and uses generated bindings to call `ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, and `ZSTD_getFrameContentSize`. +- **No Flutter**: No dependency on Flutter; suitable for server or CLI Dart apps. + +## API Summary + +- **ZstandardCLI()** — Create an instance. +- **compress(Uint8List data, {int compressionLevel = 3})** — Compress; returns `Future`. +- **decompress(Uint8List data)** — Decompress; returns `Future`. +- **getPlatformVersion()** — Returns a string like `"macOS 14.0"` or `"Linux ..."`. +- **Extensions** on `Uint8List?`: `compress({int compressionLevel = 3})`, `decompress()`. + +See [CLI API Reference](../api/cli-api.md) for full details. + +## Testing + +From the package directory: + +```bash +dart test +``` + +The package has a solid set of unit tests (small/large/empty data, compression levels, roundtrip). Run them on the target platform to ensure the native library loads and behaves correctly. + +## Performance characteristics + +- **No isolates**: Runs in the current isolate; suitable for CLI or server where blocking is acceptable. +- **Throughput**: Comparable to native zstd; level 1–3 fastest, level 22 slowest. +- **Memory**: Proportional to input and output; precompiled libs are built with standard zstd options. + +## Known limitations + +- **Desktop only**: macOS, Windows, Linux. For mobile or web, use the main **zstandard** Flutter plugin. +- **Precompiled binaries**: You depend on the package’s shipped libraries; for custom builds or other platforms you would need to build and load your own library (see [Building](development/building.md) and `scripts/build_*.sh`). + +## Troubleshooting + +- **Library not found**: Ensure you are on a supported platform and architecture. Check that the package’s native library for that platform/arch is present and that `openZstdLibrary()` (or equivalent) can find it. +- **Compress/decompress returns null**: Check that input is valid (e.g. non-empty for cases where the implementation requires it, valid zstd frame for decompress). See [Common Issues](../troubleshooting/common-issues.md). + +## See Also + +- [API — CLI](../api/cli-api.md) +- [Architecture — FFI Implementation](../architecture/ffi-implementation.md) diff --git a/docs/platforms/ios.md b/docs/platforms/ios.md new file mode 100644 index 0000000..d06bda2 --- /dev/null +++ b/docs/platforms/ios.md @@ -0,0 +1,84 @@ +# iOS Platform Guide + +The **zstandard_ios** package provides the iOS implementation of the Zstandard Flutter plugin using FFI and the native zstd library. + +## Support + +| Architecture | Support | +|--------------|---------| +| arm64 | Yes (device) | +| x86_64 | Yes (simulator) | + +## Installation + +Add the main plugin to your app; the iOS implementation is included via the federated plugin: + +```yaml +dependencies: + zstandard: ^1.3.29 +``` + +No additional setup is required for normal use. The plugin registers the iOS implementation automatically when running on iOS. + +## Architecture + +- **Native layer**: The facebook/zstd C library is synced from the repo root `zstd/` into the package’s `ios/Classes/zstd/` (via the podspec’s prepare_command and script phases) and built as part of the CocoaPods target. It is linked into the app and loaded by the Dart plugin via FFI. +- **Dart layer**: The package uses Dart FFI and generated bindings to call `ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, and `ZSTD_getFrameContentSize`. +- **Isolates**: The implementation may use a helper isolate for async compression/decompression to avoid blocking the UI thread. + +## Usage + +Use the main package API; the iOS implementation is used automatically: + +```dart +import 'package:zstandard/zstandard.dart'; + +final zstandard = Zstandard(); +final compressed = await zstandard.compress(data, 3); +final decompressed = await zstandard.decompress(compressed!); +``` + +Or use the extensions: + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## Building the Native Library + +If you are developing the zstandard_ios package: + +1. The canonical zstd source is at the repo root `zstd/`; the podspec syncs it into `ios/Classes/zstd/` at install/build time. +2. The project uses a Podspec and CocoaPods to build the framework. +3. FFI bindings are generated (e.g. with `ffigen`) from the zstd headers. + +See the package’s `ios/` directory and the repo’s [Building](development/building.md) guide. + +## Testing + +- **Unit tests**: From the package directory: `flutter test` +- **Integration tests**: Run the example app on an iOS device or simulator and run `integration_test` (e.g. `flutter test integration_test/` from the example). + +## Performance characteristics + +- **Compression/decompression**: Runs on a background isolate so the UI thread stays responsive. +- **Memory**: Peak usage scales with input and output size; high levels (19–22) use more memory. +- **Throughput**: Level 1–3 are fastest; level 22 is slowest. Assembly is disabled in the iOS build for compatibility. + +## Known limitations + +- Simulator and device use different architectures; ensure the correct slice is built for the target. +- Very large inputs may use significant memory; consider chunking (see [Advanced usage](../guides/advanced-usage.md)). +- Static linking only; no dynamic zstd loading. + +## Troubleshooting + +- **Library or symbol not found**: Ensure the native target is included in your app’s build and that you are building for the correct architecture (simulator vs device). +- **Crashes**: Verify inputs are valid and not null where required; see [Common Issues](../troubleshooting/common-issues.md). + +## See Also + +- [Architecture — FFI Implementation](../architecture/ffi-implementation.md) +- [Architecture — Isolate Pattern](../architecture/isolate-pattern.md) +- [API — Main](../api/main-api.md) diff --git a/docs/platforms/linux.md b/docs/platforms/linux.md new file mode 100644 index 0000000..f77b3f8 --- /dev/null +++ b/docs/platforms/linux.md @@ -0,0 +1,84 @@ +# Linux Platform Guide + +The **zstandard_linux** package provides the Linux implementation of the Zstandard Flutter plugin using FFI and the native zstd library. + +## Support + +| Architecture | Support | +|--------------|---------| +| x64 | Yes | +| arm64 | Yes | + +## Installation + +Add the main plugin to your app; the Linux implementation is included via the federated plugin: + +```yaml +dependencies: + zstandard: ^1.3.29 +``` + +No additional setup is required for normal use. The plugin registers the Linux implementation automatically when running on Linux. + +## Architecture + +- **Native layer**: The facebook/zstd C library is built with CMake (e.g. under `linux/` or `src/`) and produces a shared library `libzstandard_linux_plugin.so` that the Dart plugin loads via FFI. +- **Dart layer**: The package uses Dart FFI and generated bindings to call `ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, and `ZSTD_getFrameContentSize`. +- **Isolates**: The implementation may use a helper isolate for async compression/decompression. + +## Usage + +Use the main package API; the Linux implementation is used automatically: + +```dart +import 'package:zstandard/zstandard.dart'; + +final zstandard = Zstandard(); +final compressed = await zstandard.compress(data, 3); +final decompressed = await zstandard.decompress(compressed!); +``` + +Or use the extensions: + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## Building the Native Library + +If you are developing the zstandard_linux package: + +1. The native zstd source is under the package’s `src/`; the Linux build is typically under `linux/` using CMake. +2. CMake builds the shared library (e.g. `libzstandard_linux_plugin.so`). +3. FFI bindings are generated (e.g. with `ffigen`) from the zstd headers. + +See the package’s `linux/` and `src/` directories and the repo’s [Building](development/building.md) guide. + +## Testing + +- **Unit tests**: From the package directory: `flutter test` +- **Integration tests**: Run the example Linux app and execute `integration_test` from the example. + +## Performance characteristics + +- **Compression/decompression**: Typically runs in a background isolate so the UI thread is not blocked. +- **Memory**: Allocations scale with input and output size; high compression levels (19–22) use more memory. +- **Throughput**: Similar to native zstd; level 1–3 are fastest, level 22 slowest. Depends on host CPU. + +## Known limitations + +- Only Linux is supported; for other platforms use the corresponding platform package. +- Very large inputs may use significant memory; consider chunking (see [Advanced usage](../guides/advanced-usage.md)). +- The shared library must be on the library path (e.g. next to the executable or `LD_LIBRARY_PATH`) when the app runs. + +## Troubleshooting + +- **libzstandard_linux_plugin.so not found**: Ensure the .so is built and available in the library path when the app runs (e.g. same directory as the executable or `LD_LIBRARY_PATH`). Build with `flutter build linux` or run from the IDE. +- **Crashes**: Verify inputs and null safety; see [Common Issues](../troubleshooting/common-issues.md). + +## See Also + +- [Architecture — FFI Implementation](../architecture/ffi-implementation.md) +- [Architecture — Isolate Pattern](../architecture/isolate-pattern.md) +- [API — Main](../api/main-api.md) diff --git a/docs/platforms/macos.md b/docs/platforms/macos.md new file mode 100644 index 0000000..338c9bd --- /dev/null +++ b/docs/platforms/macos.md @@ -0,0 +1,84 @@ +# macOS Platform Guide + +The **zstandard_macos** package provides the macOS implementation of the Zstandard Flutter plugin using FFI and the native zstd library. + +## Support + +| Architecture | Support | +|--------------|---------| +| x64 | Yes | +| arm64 | Yes (Apple Silicon) | + +## Installation + +Add the main plugin to your app; the macOS implementation is included via the federated plugin: + +```yaml +dependencies: + zstandard: ^1.3.29 +``` + +No additional setup is required for normal use. The plugin registers the macOS implementation automatically when running on macOS. + +## Architecture + +- **Native layer**: The facebook/zstd C library is synced from the repo root `zstd/` into the package’s `macos/Classes/zstd/` (via the podspec’s prepare_command and script phases) and built as part of the CocoaPods target, producing a framework that the Dart plugin loads via FFI. +- **Dart layer**: The package uses Dart FFI and generated bindings to call `ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, and `ZSTD_getFrameContentSize`. +- **Isolates**: The implementation may use a helper isolate for async compression/decompression. + +## Usage + +Use the main package API; the macOS implementation is used automatically: + +```dart +import 'package:zstandard/zstandard.dart'; + +final zstandard = Zstandard(); +final compressed = await zstandard.compress(data, 3); +final decompressed = await zstandard.decompress(compressed!); +``` + +Or use the extensions: + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## Building the Native Library + +If you are developing the zstandard_macos package: + +1. The canonical zstd source is at the repo root `zstd/`; the podspec syncs it into `macos/Classes/zstd/` at install/build time. +2. The macOS build (CocoaPods/Xcode) compiles zstd from `Classes/zstd/` and produces the framework. +3. FFI bindings are generated (e.g. with `ffigen`) from the zstd headers. + +See the package’s build configuration and the repo’s [Building](development/building.md) guide. + +## Testing + +- **Unit tests**: From the package directory: `flutter test` +- **Integration tests**: Run the example macOS app and execute `integration_test` from the example. + +## Performance characteristics + +- **Compression/decompression**: Typically runs in a background isolate. +- **Memory**: Proportional to input and output; lower levels use less memory. +- **Throughput**: Level 1–3 fastest; level 22 slowest. Supports both Intel and Apple Silicon. + +## Known limitations + +- Only macOS is supported; for other platforms use the corresponding platform package. +- Very large inputs may use significant memory; consider chunking (see [Advanced usage](../guides/advanced-usage.md)). +- The native library must be built for the current architecture (x64 or arm64) or as a universal binary. + +## Troubleshooting + +- **Library not found**: Ensure the native library/framework is built and linked for the current architecture (x64 vs arm64). +- **Crashes**: Verify inputs and null safety; see [Common Issues](../troubleshooting/common-issues.md). + +## See Also + +- [Architecture — FFI Implementation](../architecture/ffi-implementation.md) +- [Architecture — Isolate Pattern](../architecture/isolate-pattern.md) +- [API — Main](../api/main-api.md) diff --git a/docs/platforms/web.md b/docs/platforms/web.md new file mode 100644 index 0000000..cedc54f --- /dev/null +++ b/docs/platforms/web.md @@ -0,0 +1,111 @@ +# Web Platform Guide + +The **zstandard_web** package provides the web implementation of the Zstandard Flutter plugin using JavaScript and WebAssembly (zstd compiled with Emscripten). It does not use Dart FFI. + +## Support + +| Environment | Support | +|-------------|---------| +| Browser (Chrome, Firefox, Safari, Edge) | Yes | +| WebAssembly | Yes (zstd.wasm) | + +## Installation + +1. Add the main plugin to your app: + +```yaml +dependencies: + zstandard: ^1.3.29 +``` + +2. **Copy web assets** into your Flutter web project: + - **zstd.js** — Emscripten-generated JS that loads and wraps the WASM module. + - **zstd.wasm** — Compiled Zstandard C library. + + These files are provided by the zstandard_web package (e.g. under `blob/` or as documented in the package README). Copy them into your app’s `web/` directory (e.g. `web/zstd.js`, `web/zstd.wasm`). + +3. **Include the script** in your `web/index.html`: + +```html + + + + + + + ... + + +``` + +The script must load before your Flutter app so that `compressData` and `decompressData` are available when the Dart code runs. + +## Architecture + +- **zstd.js** loads **zstd.wasm** and exposes global functions `compressData(inputData, compressionLevel)` and `decompressData(compressedData)`. +- **Dart** uses `dart:js_interop` (and `package:web`) to call these functions and convert between `Uint8List` and JS typed arrays. +- There is no FFI and no background isolate; compression/decompression run on the main thread. For large data, consider chunking or moving work to a Web Worker if the implementation supports it. + +## Usage + +Use the main package API; the web implementation is used automatically when running on web: + +```dart +import 'package:zstandard/zstandard.dart'; + +final zstandard = Zstandard(); +final compressed = await zstandard.compress(data, 3); +final decompressed = await zstandard.decompress(compressed!); +``` + +Or use the extensions: + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## Building zstd.js and zstd.wasm + +If you need to rebuild the WebAssembly artifacts: + +1. Install and activate the [Emscripten SDK](https://emscripten.org/). +2. Clone the [facebook/zstd](https://github.com/facebook/zstd) repository. +3. Run `emcc` on the zstd C sources with: + - WASM output + - Exported functions: `ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, `ZSTD_getFrameContentSize`, `malloc`, `free` +4. Add the wrapper functions `compressData` and `decompressData` in `zstd.js` (or a separate script) that allocate buffers, call the C functions, and return the result or null. + +Detailed commands and wrapper code are in the [zstandard_web README](https://github.com/landamessenger/zstandard/tree/master/zstandard_web). + +## Small Data Behavior + +For very small inputs (e.g. less than 9 bytes), the implementation may return the data unchanged for compress or decompress, as zstd has a minimum frame size. Check the package source for exact behavior. + +## Testing + +- **Unit tests**: From the package directory: `flutter test` (some tests may require a browser or mock the JS API). +- **Integration tests**: The example app has web integration tests (e.g. `example/integration_test/zstandard_web_integration_test.dart`) that run in the browser. + +## Performance characteristics + +- **Single-threaded**: Compression and decompression run on the main thread (no isolates on web). Large payloads can block the UI. +- **Throughput**: Generally slower than native; level 1–3 are faster than high levels. +- **Memory**: WASM heap usage scales with input and output; consider smaller chunks for large data. + +## Known limitations + +- Requires `zstd.js` and `zstd.wasm` to be deployed with your app and loaded before use. +- No isolate-based offloading; heavy work runs on the main thread. For large data, consider chunking or a Web Worker (see [Advanced usage](../guides/advanced-usage.md)). +- Behavior may differ slightly from native (e.g. small-data handling, error codes). Decompress failure may throw instead of returning null. + +## Troubleshooting + +- **compressData / decompressData is not defined**: Ensure `zstd.js` is included in `index.html` and loads before the Flutter app. Check the browser console for script errors. +- **WASM load failed**: Ensure `zstd.wasm` is served from the same origin or with correct CORS and that the path in `zstd.js` is correct. +- **Null from compress/decompress**: Check that input is valid and that the JS functions return a typed array or null; see [Common Issues](../troubleshooting/common-issues.md). + +## See Also + +- [Architecture — Web Implementation](../architecture/web-implementation.md) +- [API — Main](../api/main-api.md) diff --git a/docs/platforms/windows.md b/docs/platforms/windows.md new file mode 100644 index 0000000..f5f02d6 --- /dev/null +++ b/docs/platforms/windows.md @@ -0,0 +1,84 @@ +# Windows Platform Guide + +The **zstandard_windows** package provides the Windows implementation of the Zstandard Flutter plugin using FFI and the native zstd library. + +## Support + +| Architecture | Support | +|--------------|---------| +| x64 | Yes | +| arm64 | Yes | + +## Installation + +Add the main plugin to your app; the Windows implementation is included via the federated plugin: + +```yaml +dependencies: + zstandard: ^1.3.29 +``` + +No additional setup is required for normal use. The plugin registers the Windows implementation automatically when running on Windows. + +## Architecture + +- **Native layer**: The facebook/zstd C library is built with CMake under the package’s `windows/` (or `src/`) and produces a DLL (e.g. `zstandard_windows_plugin.dll`) that the Dart plugin loads via FFI. +- **Dart layer**: The package uses Dart FFI and generated bindings to call `ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, and `ZSTD_getFrameContentSize`. +- **Isolates**: The implementation may use a helper isolate for async compression/decompression. + +## Usage + +Use the main package API; the Windows implementation is used automatically: + +```dart +import 'package:zstandard/zstandard.dart'; + +final zstandard = Zstandard(); +final compressed = await zstandard.compress(data, 3); +final decompressed = await zstandard.decompress(compressed!); +``` + +Or use the extensions: + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## Building the Native Library + +If you are developing the zstandard_windows package: + +1. The native zstd source is under the package’s `src/` (or equivalent); the Windows build is typically under `windows/`. +2. CMake is used to build the zstd library and the plugin DLL. +3. FFI bindings are generated (e.g. with `ffigen`) from the zstd headers. + +See the package’s `windows/` and `src/` directories and the repo’s [Building](development/building.md) guide. + +## Testing + +- **Unit tests**: From the package directory: `flutter test` +- **Integration tests**: Run the example Windows app and execute `integration_test` from the example. + +## Performance characteristics + +- **Compression/decompression**: Typically runs in a background isolate. +- **Memory**: Scales with input and output size; high levels use more memory. +- **Throughput**: Level 1–3 fastest; level 22 slowest. Supports x64 and ARM64. + +## Known limitations + +- Only Windows is supported; for other platforms use the corresponding platform package. +- Very large inputs may use significant memory; consider chunking (see [Advanced usage](../guides/advanced-usage.md)). +- The DLL must be next to the executable or on the path when the app runs. + +## Troubleshooting + +- **DLL not found**: Ensure the DLL is built and placed where the plugin expects it (e.g. next to the executable or in a known path). Build the Windows app with `flutter build windows` or run from the IDE. +- **Crashes**: Verify inputs and null safety; see [Common Issues](../troubleshooting/common-issues.md). + +## See Also + +- [Architecture — FFI Implementation](../architecture/ffi-implementation.md) +- [Architecture — Isolate Pattern](../architecture/isolate-pattern.md) +- [API — Main](../api/main-api.md) diff --git a/docs/troubleshooting/common-issues.md b/docs/troubleshooting/common-issues.md new file mode 100644 index 0000000..73f91e2 --- /dev/null +++ b/docs/troubleshooting/common-issues.md @@ -0,0 +1,68 @@ +# Common Issues + +Frequently encountered issues and how to resolve them. + +## compress or decompress returns null + +**Cause**: Compression or decompression failed. For decompress, the input is often not valid Zstandard data (corrupted, truncated, or never compressed with zstd). + +**What to do**: +- Check that the input is non-null and, for decompress, that it is a complete zstd frame. +- If decompressing data from a file or network, verify the source (e.g. file not truncated, correct format). +- Log or handle the null in your code; see [Error handling](../guides/error-handling.md). + +## Extension on null returns null + +**Cause**: The extension is on `Uint8List?`. When the receiver is null, `compress()` and `decompress()` return null by design. + +**What to do**: Use null-aware code: `final c = await maybeData?.compress();` and check `c != null` before use. + +## Web: "compressData is not defined" or similar + +**Cause**: The web implementation requires `zstd.js` (and `zstd.wasm`) to be loaded before the app runs. The script was not included or failed to load. + +**What to do**: +- Add `` in `web/index.html` (before your app script). +- Ensure `zstd.js` and `zstd.wasm` are in your `web/` directory and that the paths are correct. +- Open the browser console and fix any script load errors (e.g. 404, CORS). See [Platforms — Web](../platforms/web.md). + +## Native library not found (Android, iOS, macOS, Linux, Windows) + +**Cause**: The plugin’s native library (e.g. .so, .dylib, .dll) was not built or is not in the path the plugin expects. + +**What to do**: +- Build the app for the target platform (e.g. `flutter build apk`, `flutter run -d linux`). Do not assume copying a prebuilt binary is enough unless the plugin documents it. +- Ensure you are on a supported architecture (e.g. arm64, x64). Simulator/emulator architecture must match (e.g. iOS simulator x86_64/arm64). +- On Linux, set `LD_LIBRARY_PATH` if the .so is not next to the executable. On Windows, ensure the DLL is in the same directory as the executable or in PATH. + +## CLI: Library load error on desktop + +**Cause**: zstandard_cli uses precompiled native libraries. The library for your platform/architecture may be missing or incompatible. + +**What to do**: +- Run on a supported platform: macOS, Windows, or Linux, and x64 or arm64. +- Update the package: `dart pub upgrade zstandard_cli`. +- If the problem persists, open an issue with your OS and architecture (e.g. `uname -m`, Windows version). + +## Slow compression on large data + +**Cause**: Higher compression levels and larger inputs take more CPU time. + +**What to do**: +- Use a lower level (e.g. 1–3) for speed; see [Compression levels](../guides/compression-levels.md). +- On native platforms, the plugin should use a background isolate; ensure you are not blocking the main isolate elsewhere. +- Consider chunking very large data to limit peak memory and to show progress. + +## Version mismatch or dependency resolution errors + +**Cause**: Different packages in the repo (or your app) depend on different versions of zstandard or platform packages. + +**What to do**: +- Use a consistent version in your `pubspec.yaml` (e.g. `zstandard: ^1.3.29`). Run `flutter pub get` and check for resolution errors. +- If you depend on multiple packages from this repo, align their versions (e.g. all ^1.3.29). See [Migration guide](../guides/migration-guide.md). + +## See also + +- [Platform issues](platform-issues.md) +- [Debugging](debugging.md) +- [Error handling](../guides/error-handling.md) diff --git a/docs/troubleshooting/debugging.md b/docs/troubleshooting/debugging.md new file mode 100644 index 0000000..99b4f75 --- /dev/null +++ b/docs/troubleshooting/debugging.md @@ -0,0 +1,55 @@ +# Debugging + +Tips for debugging issues with the Zstandard plugin and CLI. + +## Enable logging + +The plugin may log errors or debug info. Check whether the implementation uses `dart:developer` log or `print` and enable verbose logging if available. On Flutter, you can use `debugPrint` in your app to trace input/output sizes and null returns. + +## Verify input and output + +- **Compress**: Log the length of the input and the compressed result. If the result is null, the implementation failed (check platform-specific logs). +- **Decompress**: Ensure the input is exactly the bytes that were produced by `compress` (same buffer, no truncation). Try decompressing in a minimal test (e.g. roundtrip in main). + +## Minimal repro + +- Isolate the issue: one platform, one call (e.g. `Zstandard().compress(data, 3)` with a small `Uint8List`). If it works in isolation, the problem may be with data source, size, or concurrency. +- Test with the **example** app in the repo. If the example works but your app doesn’t, compare dependencies, Flutter version, and how you call the API. +- For native crashes, get a minimal Dart snippet and the exact device/OS/architecture. + +## Platform-specific debugging + +- **Android**: Use `adb logcat` and filter by your app or “zstd”/“zstandard”. Look for `UnsatisfiedLinkError` or native stack traces. +- **iOS/macOS**: Use Xcode’s debugger and console. Check that the correct scheme and architecture are selected. +- **Linux/Windows**: Run from the terminal and check stderr. Use a debug build if needed (`flutter run -d linux` or `windows`). +- **Web**: Use the browser’s Developer Tools (Console, Network). Confirm `zstd.js` and `zstd.wasm` load (200 status). Step through or log in the JS if you suspect the wrapper. +- **CLI**: Run `dart test` in the zstandard_cli package. If tests pass, the library loads; then narrow down your usage (e.g. file path, data size). + +## Analyzer and tests + +- Run `flutter analyze` (or `dart analyze`) and fix all errors. Warnings may point to null or type issues that only show at runtime in some paths. +- Run unit tests: `flutter test` in the main and platform packages. Fix any failures; they often reveal API misuse or platform assumptions. + +## Native build issues + +- **CMake (Linux/Windows)**: Run CMake with verbose output to see which compiler and paths are used. Ensure the zstd source path in the plugin’s CMake is correct. +- **CocoaPods (iOS/macOS)**: Run `pod install` with `--verbose`. Check that the plugin’s podspec is found and that the native target is built. +- **Gradle (Android)**: Run `flutter build apk --verbose` and check the native build step; ensure NDK is installed and the ABI is correct. + +## Reporting a bug + +When opening an issue, include: + +- Package and version (e.g. zstandard 1.3.29) +- Platform (Android, iOS, macOS, Windows, Linux, web, CLI) +- Flutter/Dart version +- Minimal code that reproduces the issue +- Expected vs actual behavior +- Any error messages or logs (full stack trace for crashes) +- For native issues: OS version and architecture (e.g. arm64, x64) + +## See also + +- [Common issues](common-issues.md) +- [Platform issues](platform-issues.md) +- [Testing](../development/testing.md) diff --git a/docs/troubleshooting/platform-issues.md b/docs/troubleshooting/platform-issues.md new file mode 100644 index 0000000..16fed31 --- /dev/null +++ b/docs/troubleshooting/platform-issues.md @@ -0,0 +1,43 @@ +# Platform-Specific Issues + +Issues that are specific to one platform or environment. + +## Android + +- **UnsatisfiedLinkError / native library not found**: Build the app with `flutter build apk` or run from Android Studio so the plugin’s .so is built and packaged. Ensure the ABI (arm64-v8a, armeabi-v7a, x86_64) matches your device or emulator. +- **Crashes in compress/decompress**: Ensure you are not passing null where `Uint8List` is required and that the data is not corrupted (for decompress). Check logcat for native crashes. + +## iOS + +- **Symbol not found / dylib load**: Ensure the iOS target is built (e.g. run from Xcode or `flutter run -d ios`) and that you are targeting a supported architecture (arm64 device, x86_64/arm64 simulator). +- **CocoaPods issues**: Run `pod install` in the example’s `ios/` directory. Clear DerivedData if the plugin’s native code still doesn’t link. + +## macOS + +- **Library not loaded**: The plugin loads a dynamic library (e.g. .dylib or framework). Ensure the app is built with `flutter build macos` or run from Xcode. For Apple Silicon vs x64, use the correct build target. + +## Windows + +- **DLL not found**: The plugin expects its DLL (e.g. `zstandard_windows_plugin.dll`) to be loadable. Build with `flutter build windows`; the DLL should be next to the executable. If you deploy manually, copy the DLL as well. +- **Wrong architecture**: Build for the correct architecture (x64 or arm64). Mixing 32-bit and 64-bit can cause load failures. + +## Linux + +- **libzstandard_linux_plugin.so not found**: Build with `flutter build linux` or run with `flutter run -d linux`. The .so is produced by CMake. If you run the binary from another directory, set `LD_LIBRARY_PATH` to the directory containing the .so or place the .so next to the executable. + +## Web + +- **WASM or JS errors**: Ensure `zstd.js` and `zstd.wasm` are served from the same origin (or CORS is set correctly) and that the path in the script matches. Check the browser console and network tab. +- **compressData/decompressData undefined**: The script must load before the Flutter app. Put `` in `` and ensure it loads without errors. +- **Slow or blocking**: Web runs on the main thread. For large data, consider chunking or offloading to a Web Worker if you implement it. + +## CLI (macOS, Windows, Linux) + +- **Only desktop**: The CLI package does not run on mobile or web. Use the main Flutter plugin for those. +- **Library load failure**: Ensure you are on a supported OS and architecture (x64 or arm64). Update the package; if the problem persists, report the OS and arch. + +## See also + +- [Common issues](common-issues.md) +- [Debugging](debugging.md) +- [Platforms](../platforms/) — Per-platform setup diff --git a/mutation_test_config.xml b/mutation_test_config.xml new file mode 100644 index 0000000..3d3f773 --- /dev/null +++ b/mutation_test_config.xml @@ -0,0 +1,24 @@ + + + + + + lib + + + flutter test + + + **/*_generated.dart + **/*.g.dart + **/generated/** + + + + + + + + + + diff --git a/quality_summary.md b/quality_summary.md new file mode 100644 index 0000000..684f1d3 --- /dev/null +++ b/quality_summary.md @@ -0,0 +1,19 @@ +# Quality Summary + +Generated: 2026-03-15T20:49:55.220946 + +## Coverage + +Run `./scripts/collect_all_coverage.sh` first. + +## Test Packages + +- zstandard: has test/ +- zstandard_platform_interface: has test/ +- zstandard_android: has test/ +- zstandard_ios: has test/ +- zstandard_macos: has test/ +- zstandard_linux: has test/ +- zstandard_windows: has test/ +- zstandard_web: has test/ +- zstandard_cli: has test/ diff --git a/scripts/build_android.sh b/scripts/build_android.sh new file mode 100755 index 0000000..67339c6 --- /dev/null +++ b/scripts/build_android.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Build the Android plugin native library (libzstandard_android.so). +# Used to verify the NDK/CMake build; normally the library is built when +# building an app that depends on the plugin. +# Usage: from repo root, run: ./scripts/build_android.sh +# Requires: Android SDK, NDK, and (optionally) Gradle. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +PLUGIN="$ROOT/zstandard_android" + +if [[ ! -d "$PLUGIN/android" ]]; then + echo "zstandard_android not found." + exit 1 +fi + +echo "Building zstandard_android native library..." +cd "$PLUGIN/android" +if command -v ./gradlew >/dev/null 2>&1; then + ./gradlew assembleRelease + echo "Build complete. Outputs in build/intermediates." +else + echo "No Gradle wrapper. Run from a Flutter project or install Android SDK/NDK and add gradle wrapper." + echo "To only verify CMake: cd $PLUGIN/android && externalNativeBuild is triggered by Gradle." + exit 1 +fi diff --git a/scripts/build_ios.sh b/scripts/build_ios.sh new file mode 100755 index 0000000..6269027 --- /dev/null +++ b/scripts/build_ios.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Build the iOS plugin (CocoaPods). Normally built when building an app. +# Usage: from repo root, run: ./scripts/build_ios.sh +# Requires: macOS, Xcode, CocoaPods. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +if [[ "$(uname)" != "Darwin" ]]; then + echo "iOS build requires macOS." + exit 1 +fi + +echo "Building zstandard_ios (pod install in example)..." +cd "$ROOT/zstandard_ios/example/ios" +pod install +echo "Done. Open Runner.xcworkspace in Xcode to build the app." diff --git a/scripts/build_linux.sh b/scripts/build_linux.sh new file mode 100755 index 0000000..3f50a4c --- /dev/null +++ b/scripts/build_linux.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Build Linux precompiled zstd libraries for zstandard_cli (x64 and optionally ARM64). +# Uses the canonical zstd source at repo root zstd/. Run ./scripts/update_zstd.sh if needed. +# Usage: from repo root, run: ./scripts/build_linux.sh +# Requires: CMake, gcc, git. For ARM64: aarch64-linux-gnu-gcc or native ARM host. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +CLI="$ROOT/zstandard_cli" +ZSTD="$ROOT/zstd" +BIN="$CLI/lib/src/bin" +mkdir -p "$BIN" + +if [[ ! -d "$ZSTD" || ! -f "$ZSTD/zstd.h" ]]; then + echo "Error: Canonical zstd source not found at $ZSTD" + echo "Run: ./scripts/update_zstd.sh" + exit 1 +fi +echo "Using zstd from $ZSTD" + +echo "Building Linux x64..." +cd "$CLI/builders/linux_x64" +rm -rf build && mkdir build && cd build +cmake .. +cmake --build . --config Release +mv libzstandard_linux.so "$BIN/libzstandard_linux_x64.so" +cd .. && rm -rf build + +echo "Building Linux ARM64 (if toolchain available)..." +cd "$CLI/builders/linux_arm" +rm -rf build && mkdir build && cd build +if cmake -DCMAKE_TOOLCHAIN_FILE=../arm64-toolchain.cmake .. 2>/dev/null; then + cmake --build . --config Release + mv libzstandard_linux.so "$BIN/libzstandard_linux_arm64.so" + echo "ARM64 built." +else + echo "Skipping ARM64 (no toolchain or native ARM)." +fi +cd .. && rm -rf build + +echo "Done. Outputs in $BIN:" +ls -la "$BIN"/*.so 2>/dev/null || true diff --git a/scripts/build_macos.sh b/scripts/build_macos.sh new file mode 100755 index 0000000..3699af4 --- /dev/null +++ b/scripts/build_macos.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Build macOS precompiled zstd libraries for zstandard_cli (Intel + ARM, universal). +# Uses the canonical zstd source at repo root zstd/ (run ./scripts/sync_zstd_ios_macos.sh first or copy upstream lib/ into zstd/). +# Usage: from repo root, run: ./scripts/build_macos.sh +# Requires: CMake, Xcode command line tools. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +CLI="$ROOT/zstandard_cli" +ZSTD_SRC="$ROOT/zstd" +BIN="$CLI/lib/src/bin" +mkdir -p "$BIN" + +if [[ ! -d "$ZSTD_SRC" || ! -f "$ZSTD_SRC/zstd.h" ]]; then + echo "Error: Canonical zstd source not found at $ZSTD_SRC" + echo "Run: ./scripts/update_zstd.sh # fetches from github.com/facebook/zstd" + exit 1 +fi +echo "Using zstd from $ZSTD_SRC" + +echo "Building macOS Intel x64..." +cd "$CLI/builders/macos_intel" +rm -rf build && mkdir build && cd build +cmake -DCMAKE_OSX_ARCHITECTURES=x86_64 .. +cmake --build . --config Release +mv libzstandard_macos.dylib "$BIN/libzstandard_macos_intel.dylib" +cd .. && rm -rf build + +echo "Building macOS ARM64..." +cd "$CLI/builders/macos_arm" +rm -rf build && mkdir build && cd build +cmake -DCMAKE_OSX_ARCHITECTURES=arm64 .. +cmake --build . --config Release +mv libzstandard_macos.dylib "$BIN/libzstandard_macos_arm.dylib" +cd .. && rm -rf build + +echo "Creating universal binary or single-arch dylib..." +cd "$BIN" +ARCH_INTEL=$(lipo -info libzstandard_macos_intel.dylib 2>/dev/null | sed -n 's/.*: \([^ ]*\) .*/\1/p' | head -1) +ARCH_ARM=$(lipo -info libzstandard_macos_arm.dylib 2>/dev/null | sed -n 's/.*: \([^ ]*\) .*/\1/p' | head -1) +if [[ -n "$ARCH_INTEL" && -n "$ARCH_ARM" && "$ARCH_INTEL" != "$ARCH_ARM" ]]; then + lipo -create -output libzstandard_macos.dylib libzstandard_macos_intel.dylib libzstandard_macos_arm.dylib + rm -f libzstandard_macos_intel.dylib libzstandard_macos_arm.dylib +elif [[ -f libzstandard_macos_arm.dylib ]]; then + echo "Single architecture (arm64); using it as output." + mv libzstandard_macos_arm.dylib libzstandard_macos.dylib + rm -f libzstandard_macos_intel.dylib +elif [[ -f libzstandard_macos_intel.dylib ]]; then + echo "Single architecture (x86_64); using it as output." + mv libzstandard_macos_intel.dylib libzstandard_macos.dylib + rm -f libzstandard_macos_arm.dylib +else + echo "No dylib produced."; exit 1 +fi +lipo -info libzstandard_macos.dylib +echo "Done. Output: $BIN/libzstandard_macos.dylib" diff --git a/scripts/build_web_wasm.sh b/scripts/build_web_wasm.sh new file mode 100755 index 0000000..3e56da2 --- /dev/null +++ b/scripts/build_web_wasm.sh @@ -0,0 +1,138 @@ +#!/usr/bin/env bash +# Build zstd.js and zstd.wasm from the repo's zstd/ using Emscripten (emsdk), +# then copy them to zstandard_web/blob/ and zstandard_web/example/web/, and add the compressData/decompressData +# wrappers expected by the web plugin. +# +# Usage: from repo root, run: ./scripts/build_web_wasm.sh +# +# Requires: git. Downloads emsdk into a temporary directory and removes it +# after the build. The single source for zstd C code is zstd/ at repo root +# (same as Android, iOS, macOS, Windows, Linux, and CLI). +# +# See zstandard_web/README.md for usage of the generated files. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +ZSTD_ROOT="$ROOT/zstd" +OUT_BLOB="$ROOT/zstandard_web/blob" +OUT_EXAMPLE_WEB="$ROOT/zstandard_web/example/web" + +if [[ ! -d "$ZSTD_ROOT" || ! -f "$ZSTD_ROOT/zstd.h" ]]; then + echo "Error: Canonical zstd source not found at $ZSTD_ROOT (expected zstd.h)." + echo "Run: ./scripts/update_zstd.sh" + exit 1 +fi + +BUILD_DIR=$(mktemp -d) +trap 'rm -rf "$BUILD_DIR"' EXIT + +echo "Cloning emsdk into $BUILD_DIR ..." +git clone --depth 1 https://github.com/emscripten-core/emsdk.git "$BUILD_DIR/emsdk" + +echo "Installing and activating Emscripten (latest) ..." +cd "$BUILD_DIR/emsdk" +./emsdk install latest +./emsdk activate latest +# shellcheck source=/dev/null +source ./emsdk_env.sh + +echo "Building zstd with emcc from $ZSTD_ROOT ..." +cd "$ZSTD_ROOT" + +# Same exports as documented in zstandard_web/README.md; only common/compress/decompress (no legacy/dictBuilder). +COMMON_SRC=$(find common -name "*.c" 2>/dev/null | tr '\n' ' ') +COMPRESS_SRC=$(find compress -name "*.c" 2>/dev/null | tr '\n' ' ') +DECOMPRESS_SRC=$(find decompress -name "*.c" 2>/dev/null | tr '\n' ' ') + +emcc -O3 \ + $COMMON_SRC $COMPRESS_SRC $DECOMPRESS_SRC \ + -I. -Icommon -Icompress -Idecompress \ + -s WASM=1 \ + -s EXPORT_NAME="zstdWasmModule" \ + -s EXPORTED_FUNCTIONS="['_ZSTD_compress','_ZSTD_decompress','_malloc','_free','_ZSTD_getFrameContentSize','_ZSTD_compressBound']" \ + -s EXPORTED_RUNTIME_METHODS="['HEAPU8']" \ + -o zstd_generated.js + +if [[ ! -f zstd_generated.js || ! -f zstd_generated.wasm ]]; then + echo "Error: emcc did not produce zstd_generated.js / zstd_generated.wasm" + exit 1 +fi + +# Append the compressData/decompressData wrappers required by the web plugin (see zstandard_web/README.md). +cat >> zstd_generated.js << 'WRAPPER_JS' + +function compressData(inputData, compressionLevel) { + let inputPtr = Module._malloc(inputData.length); + Module.HEAPU8.set(inputData, inputPtr); + + let outputBufferSize = Module._ZSTD_compressBound(inputData.length); + let outputPtr = Module._malloc(outputBufferSize); + + let compressedSize = Module._ZSTD_compress( + outputPtr, + outputBufferSize, + inputPtr, + inputData.length, + compressionLevel + ); + + if (compressedSize < 0) { + console.error('Compression error, error code: ', compressedSize); + Module._free(inputPtr); + Module._free(outputPtr); + return null; + } else { + let compressedData = new Uint8Array(Module.HEAPU8.buffer, outputPtr, compressedSize); + let out = compressedData.slice(0); + Module._free(inputPtr); + Module._free(outputPtr); + return out; + } +} + +function decompressData(compressedData) { + let compressedPtr = Module._malloc(compressedData.length); + Module.HEAPU8.set(compressedData, compressedPtr); + + let decompressedSize = Module._ZSTD_getFrameContentSize(compressedPtr, compressedData.length); + if (decompressedSize === -1 || decompressedSize === -2) { + console.error('Error in obtaining the original size of the data'); + Module._free(compressedPtr); + return null; + } + + let decompressedPtr = Module._malloc(decompressedSize); + + let resultSize = Module._ZSTD_decompress( + decompressedPtr, + decompressedSize, + compressedPtr, + compressedData.length + ); + + if (resultSize < 0) { + console.error('Decompression error, error code: ', resultSize); + Module._free(compressedPtr); + Module._free(decompressedPtr); + return null; + } else { + let decompressedData = new Uint8Array(Module.HEAPU8.buffer, decompressedPtr, resultSize); + let out = decompressedData.slice(0); + Module._free(compressedPtr); + Module._free(decompressedPtr); + return out; + } +} +WRAPPER_JS + +mkdir -p "$OUT_BLOB" "$OUT_EXAMPLE_WEB" +cp zstd_generated.wasm "$OUT_BLOB/zstd.wasm" +cp zstd_generated.wasm "$OUT_EXAMPLE_WEB/zstd.wasm" +cp zstd_generated.js "$OUT_BLOB/zstd.js" +cp zstd_generated.js "$OUT_EXAMPLE_WEB/zstd.js" +rm -f "$ZSTD_ROOT/zstd_generated.js" "$ZSTD_ROOT/zstd_generated.wasm" + +echo "Done. zstd.js and zstd.wasm have been written to:" +echo " - $OUT_BLOB/" +echo " - $OUT_EXAMPLE_WEB/" +echo "Built from the same zstd/ source used by Android, iOS, macOS, Windows, Linux, and CLI." diff --git a/scripts/build_windows.bat b/scripts/build_windows.bat new file mode 100644 index 0000000..ece7287 --- /dev/null +++ b/scripts/build_windows.bat @@ -0,0 +1,50 @@ +@echo off +REM Build Windows precompiled zstd libraries for zstandard_cli (x64 and ARM64). +REM Usage: from repo root, run: scripts\build_windows.bat +REM Requires: CMake, Visual Studio 2022, git. + +set ROOT=%~dp0.. +set CLI=%ROOT%\zstandard_cli +set BIN=%CLI%\lib\src\bin + +if not exist "%BIN%" mkdir "%BIN%" + +echo Fetching zstd sources into zstandard_cli/src... +cd /d "%CLI%" +if exist "src\zstd.h" ( + echo Using existing zstandard_cli/src. +) else ( + if exist zstd rmdir /S /Q zstd + if exist src rmdir /S /Q src + git clone --depth 1 https://github.com/facebook/zstd.git + mkdir src + xcopy zstd\lib src\ /E /I + rmdir /S /Q zstd +) + +echo Building Windows x64... +cd /d "%CLI%\builders\windows_x64" +if exist build rmdir /S /Q build +mkdir build +cd build +cmake -G "Visual Studio 17 2022" -A x64 .. +cmake --build . --config Release +cd Release +move zstandard_windows.dll "%BIN%\zstandard_windows_x64.dll" +cd ..\.. +rmdir /S /Q build + +echo Building Windows ARM64... +cd /d "%CLI%\builders\windows_arm" +if exist build rmdir /S /Q build +mkdir build +cd build +cmake -G "Visual Studio 17 2022" -A ARM64 .. +cmake --build . --config Release +cd Release +move zstandard_windows.dll "%BIN%\zstandard_windows_arm64.dll" +cd ..\.. +rmdir /S /Q build + +echo Done. Outputs in %BIN% +dir "%BIN%\*.dll" diff --git a/scripts/check_coverage.sh b/scripts/check_coverage.sh new file mode 100644 index 0000000..07c57a7 --- /dev/null +++ b/scripts/check_coverage.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Check that coverage meets the required threshold (default 95%) for each package. +# Usage: ./scripts/check_coverage.sh [threshold_percent] +# Run from repo root. Requires lcov (brew install lcov / apt-get install lcov). + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" +THRESHOLD="${1:-95}" + +check_package() { + local dir="$1" + local use_flutter="$2" + if [[ ! -d "$dir" ]]; then + echo "Skip $dir (not found)" + return 0 + fi + echo "---- Checking coverage for $dir (threshold ${THRESHOLD}%) ----" + if [[ "$use_flutter" == "1" ]]; then + (cd "$dir" && flutter test --coverage) || return 1 + else + (cd "$dir" && dart test --coverage=coverage) || return 1 + (cd "$dir" && dart run coverage:format_coverage --lcov --in=coverage --out=coverage/lcov.info --report-on=lib) || return 1 + fi + if ! command -v lcov &>/dev/null; then + echo "Warning: lcov not installed; cannot verify threshold. Install with: brew install lcov (macOS) or apt-get install lcov (Linux)" + return 0 + fi + local summary + summary=$(lcov --summary "$dir/coverage/lcov.info" 2>&1) + local pct + pct=$(echo "$summary" | grep "lines" | awk '{print $2}' | sed 's/%//') + if [[ -z "$pct" ]]; then + echo "Could not parse coverage for $dir" + return 1 + fi + echo "Coverage for $dir: ${pct}%" + if (( $(echo "$pct < $THRESHOLD" | bc -l 2>/dev/null || echo 0) )); then + echo "ERROR: Coverage ${pct}% is below threshold ${THRESHOLD}%" + return 1 + fi + return 0 +} + +FAILED=0 +for pkg in zstandard zstandard_platform_interface zstandard_android zstandard_ios zstandard_macos zstandard_linux zstandard_windows zstandard_web; do + check_package "$pkg" 1 || FAILED=1 +done +check_package "zstandard_cli" 0 || FAILED=1 + +if [[ $FAILED -eq 1 ]]; then + echo "One or more packages are below the coverage threshold." + exit 1 +fi +echo "All packages meet the coverage threshold (${THRESHOLD}%)." diff --git a/scripts/check_performance_regression.dart b/scripts/check_performance_regression.dart new file mode 100644 index 0000000..8209245 --- /dev/null +++ b/scripts/check_performance_regression.dart @@ -0,0 +1,77 @@ +// ignore_for_file: avoid_print +/// Checks benchmark results against a baseline. Fails if throughput regresses beyond threshold. +/// Usage: dart run scripts/check_performance_regression.dart --baseline=path/baseline.json --current=path/current.json [--threshold=0.10] +/// Threshold is the allowed fractional regression (default 0.10 = 10%). + +import 'dart:convert'; +import 'dart:io'; + +void main(List args) { + String? baselinePath; + String? currentPath; + double threshold = 0.10; + + for (final arg in args) { + if (arg.startsWith('--baseline=')) { + baselinePath = arg.split('=').last; + } else if (arg.startsWith('--current=')) { + currentPath = arg.split('=').last; + } else if (arg.startsWith('--threshold=')) { + threshold = double.tryParse(arg.split('=').last) ?? 0.10; + } + } + + if (baselinePath == null || currentPath == null) { + print('Usage: dart run scripts/check_performance_regression.dart --baseline=BASELINE.json --current=CURRENT.json [--threshold=0.10]'); + exit(1); + } + + final baselineFile = File(baselinePath); + final currentFile = File(currentPath); + if (!baselineFile.existsSync()) { + print('Baseline file not found: $baselinePath'); + exit(1); + } + if (!currentFile.existsSync()) { + print('Current file not found: $currentPath'); + exit(1); + } + + final baseline = jsonDecode(baselineFile.readAsStringSync()) as Map; + final current = jsonDecode(currentFile.readAsStringSync()) as Map; + + final baselineResults = (baseline['results'] as List).cast>(); + final currentResults = (current['results'] as List).cast>(); + + if (baselineResults.length != currentResults.length) { + print('Result count mismatch: baseline ${baselineResults.length}, current ${currentResults.length}'); + exit(1); + } + + var failed = false; + for (var i = 0; i < baselineResults.length; i++) { + final name = baselineResults[i]['name'] as String; + final baseCompress = (baselineResults[i]['compress_throughput_mbps'] as num).toDouble(); + final baseDecompress = (baselineResults[i]['decompress_throughput_mbps'] as num).toDouble(); + final currCompress = (currentResults[i]['compress_throughput_mbps'] as num).toDouble(); + final currDecompress = (currentResults[i]['decompress_throughput_mbps'] as num).toDouble(); + + final compressRegress = baseCompress > 0 ? (baseCompress - currCompress) / baseCompress : 0.0; + final decompressRegress = baseDecompress > 0 ? (baseDecompress - currDecompress) / baseDecompress : 0.0; + + if (compressRegress > threshold) { + print('REGRESSION $name: compress ${currCompress.toStringAsFixed(2)} MB/s (baseline $baseCompress, ${(compressRegress * 100).toStringAsFixed(1)}% regression)'); + failed = true; + } + if (decompressRegress > threshold) { + print('REGRESSION $name: decompress ${currDecompress.toStringAsFixed(2)} MB/s (baseline $baseDecompress, ${(decompressRegress * 100).toStringAsFixed(1)}% regression)'); + failed = true; + } + } + + if (failed) { + print('Performance regression detected (threshold ${(threshold * 100).toInt()}%)'); + exit(1); + } + print('No performance regression detected.'); +} diff --git a/scripts/collect_all_coverage.sh b/scripts/collect_all_coverage.sh new file mode 100644 index 0000000..660f3e6 --- /dev/null +++ b/scripts/collect_all_coverage.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Collect coverage from all packages into a single directory for reporting. +# Run from repo root. Creates coverage/ with lcov.info from each package. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" +mkdir -p coverage_all + +for pkg in zstandard zstandard_platform_interface zstandard_android zstandard_ios zstandard_macos zstandard_linux zstandard_windows zstandard_web; do + if [ ! -d "$pkg" ]; then continue; fi + echo "Collecting coverage from $pkg..." + (cd "$pkg" && flutter test --coverage 2>/dev/null) || true + if [ -f "$pkg/coverage/lcov.info" ]; then + cp "$pkg/coverage/lcov.info" "coverage_all/${pkg}.lcov.info" 2>/dev/null || true + fi +done + +if [ -d "zstandard_cli" ]; then + (cd zstandard_cli && dart test --coverage=coverage 2>/dev/null) || true + (cd zstandard_cli && dart run coverage:format_coverage --lcov -i coverage -o coverage/lcov.info --packages=.dart_tool/package_config.json 2>/dev/null) || true + if [ -f "zstandard_cli/coverage/lcov.info" ]; then + cp zstandard_cli/coverage/lcov.info coverage_all/zstandard_cli.lcov.info 2>/dev/null || true + fi +fi + +echo "Coverage files in coverage_all/" +ls -la coverage_all/ 2>/dev/null || true diff --git a/scripts/coverage_report.bat b/scripts/coverage_report.bat new file mode 100644 index 0000000..3f6adbe --- /dev/null +++ b/scripts/coverage_report.bat @@ -0,0 +1,29 @@ +@echo off +REM Generate coverage reports for zstandard packages. +REM Usage: from repo root, run: scripts\coverage_report.bat +REM Output: each package's coverage/ folder; no merge on Windows unless lcov is installed. + +set ROOT=%~dp0.. +cd /d "%ROOT%" + +if not exist coverage mkdir coverage + +for %%p in (zstandard zstandard_platform_interface zstandard_android zstandard_ios zstandard_macos zstandard_linux zstandard_windows zstandard_web) do ( + if exist "%%p" ( + echo ---- Coverage: %%p ---- + cd "%%p" + flutter test --coverage + cd "%ROOT%" + ) +) + +if exist zstandard_cli ( + echo ---- Coverage: zstandard_cli ---- + cd zstandard_cli + dart test --coverage=coverage + dart run coverage:format_coverage --lcov -i coverage -o coverage/lcov.info --packages=.dart_tool/package_config.json + cd "%ROOT%" +) + +echo Coverage files are in each package's coverage\ directory. +echo For merged report, use WSL or install lcov and run scripts/coverage_report.sh diff --git a/scripts/coverage_report.sh b/scripts/coverage_report.sh new file mode 100755 index 0000000..f383fa0 --- /dev/null +++ b/scripts/coverage_report.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Generate combined coverage report for zstandard packages. +# Usage: from repo root, run: ./scripts/coverage_report.sh +# Output: coverage/ directory with lcov.info and optionally HTML report. +# Requires: Flutter SDK, Dart SDK, and (for HTML) lcov or genhtml. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" +mkdir -p coverage +COVERAGE_DIR="$ROOT/coverage" +LCOV_ARGS=() + +# Run tests with coverage per package and collect lcov +for pkg in zstandard zstandard_platform_interface zstandard_android zstandard_ios zstandard_macos zstandard_linux zstandard_windows zstandard_web; do + if [[ ! -d "$pkg" ]]; then continue; fi + echo "---- Coverage: $pkg ----" + (cd "$pkg" && flutter test --coverage 2>/dev/null) || true + if [[ -f "$pkg/coverage/lcov.info" ]]; then + LCOV_ARGS+=("--add-tracefile" "$pkg/coverage/lcov.info") + fi +done + +# CLI: dart test --coverage then format to lcov +if [[ -d "zstandard_cli" ]]; then + echo "---- Coverage: zstandard_cli ----" + (cd zstandard_cli && dart test --coverage=coverage 2>/dev/null && dart run coverage:format_coverage --lcov -i coverage -o coverage/lcov.info --packages=.dart_tool/package_config.json 2>/dev/null) || true + if [[ -f "zstandard_cli/coverage/lcov.info" ]]; then + LCOV_ARGS+=("--add-tracefile" "zstandard_cli/coverage/lcov.info") + fi +fi + +# Merge all lcov files if we have any +if [[ ${#LCOV_ARGS[@]} -gt 0 ]]; then + if command -v lcov >/dev/null 2>&1; then + lcov "${LCOV_ARGS[@]}" --output-file "$COVERAGE_DIR/lcov.info" --ignore-errors source,gcov + echo "Merged lcov written to $COVERAGE_DIR/lcov.info" + if command -v genhtml >/dev/null 2>&1; then + genhtml "$COVERAGE_DIR/lcov.info" -o "$COVERAGE_DIR/html" --ignore-errors source + echo "HTML report: $COVERAGE_DIR/html/index.html" + else + echo "Install lcov (genhtml) for HTML report." + fi + else + echo "Install lcov to merge coverage. Per-package coverage is in each package's coverage/ directory." + fi +else + echo "No coverage files generated. Run tests with --coverage in each package first." +fi diff --git a/scripts/ensure_macos_framework.sh b/scripts/ensure_macos_framework.sh new file mode 100755 index 0000000..5a4be58 --- /dev/null +++ b/scripts/ensure_macos_framework.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Ensure macOS native framework is built so tests can load it. +# Runs flutter build macos from the plugin example if framework is missing. +# Usage: from repo root, ./scripts/ensure_macos_framework.sh + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" + +EXAMPLE="$ROOT/zstandard_macos/example" +PLUGIN="$ROOT/zstandard_macos" +FRAMEWORK_NAME="zstandard_macos.framework" + +# Check common locations for the built framework +framework_found() { + [[ -d "$PLUGIN/macos/$FRAMEWORK_NAME" ]] || \ + [[ -d "$EXAMPLE/build/macos/Build/Products/Debug/$FRAMEWORK_NAME" ]] || \ + [[ -d "$EXAMPLE/.dart_tool/flutter_build/"*"/macos/$FRAMEWORK_NAME" ]] 2>/dev/null || \ + [[ -d "$EXAMPLE/macos/Flutter/ephemeral/.symlinks/plugins/zstandard_macos/macos/$FRAMEWORK_NAME" ]] 2>/dev/null +} + +if framework_found; then + echo "macOS framework already present." + exit 0 +fi + +echo "Building macOS framework (flutter build macos --debug)..." +cd "$EXAMPLE" +flutter pub get +flutter build macos --debug +cd "$ROOT" + +if framework_found; then + echo "macOS framework built successfully." +else + echo "Warning: Framework may be in a different path; integration tests will load it from the app bundle." >&2 +fi diff --git a/scripts/generate_quality_report.dart b/scripts/generate_quality_report.dart new file mode 100644 index 0000000..7862a21 --- /dev/null +++ b/scripts/generate_quality_report.dart @@ -0,0 +1,50 @@ +// ignore_for_file: avoid_print +/// Generates a simple quality summary from coverage and test results. +/// Usage: dart run scripts/generate_quality_report.dart +/// Reads coverage_all/*.lcov.info if present and writes quality_summary.md + +import 'dart:io'; + +void main() { + final root = Directory.current.path; + final coverageDir = Directory('$root/coverage_all'); + final buffer = StringBuffer(); + buffer.writeln('# Quality Summary'); + buffer.writeln(''); + buffer.writeln('Generated: ${DateTime.now().toIso8601String()}'); + buffer.writeln(''); + + if (coverageDir.existsSync()) { + buffer.writeln('## Coverage'); + buffer.writeln(''); + for (final f in coverageDir.listSync()) { + if (f is File && f.path.endsWith('.lcov.info')) { + final name = f.uri.pathSegments.last.replaceAll('.lcov.info', ''); + buffer.writeln('- $name: lcov collected'); + } + } + } else { + buffer.writeln('## Coverage'); + buffer.writeln(''); + buffer.writeln('Run `./scripts/collect_all_coverage.sh` first.'); + } + + buffer.writeln(''); + buffer.writeln('## Test Packages'); + buffer.writeln(''); + final packages = [ + 'zstandard', 'zstandard_platform_interface', 'zstandard_android', + 'zstandard_ios', 'zstandard_macos', 'zstandard_linux', 'zstandard_windows', + 'zstandard_web', 'zstandard_cli' + ]; + for (final p in packages) { + final testDir = Directory('$root/$p/test'); + final pubspec = File('$root/$p/pubspec.yaml'); + if (pubspec.existsSync()) { + buffer.writeln('- $p: ${testDir.existsSync() ? "has test/" : "no test/"}'); + } + } + + File('$root/quality_summary.md').writeAsStringSync(buffer.toString()); + print('Wrote quality_summary.md'); +} diff --git a/scripts/manage_android_emulator.sh b/scripts/manage_android_emulator.sh new file mode 100755 index 0000000..5b3f7bb --- /dev/null +++ b/scripts/manage_android_emulator.sh @@ -0,0 +1,244 @@ +#!/usr/bin/env bash +# Manage Android emulator for integration tests. +# Usage: ./scripts/manage_android_emulator.sh +# Commands: create | start | stop | status | device-id +# +# Requires: Android SDK with platform-tools and emulator. Set ANDROID_HOME or ANDROID_SDK_ROOT. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" + +# Architecture: on Apple Silicon (arm64) use arm64-v8a system image; on Intel (x86_64) use x86_64. +# Using x86_64 on M1/M2 often causes the emulator process to exit immediately. +ARCH=$(uname -m) +if [[ "$ARCH" == "arm64" ]]; then + SYSIMG_ABI="arm64-v8a" + DEFAULT_AVD_SUFFIX="arm64" +else + SYSIMG_ABI="x86_64" + DEFAULT_AVD_SUFFIX="x86_64" +fi + +AVD_NAME="${ZSTANDARD_AVD_NAME:-zstandard_test_${DEFAULT_AVD_SUFFIX}}" +PID_FILE="${ROOT}/.android_emulator.pid" +EMULATOR_LOG="${ROOT}/.android_emulator.log" +API_LEVEL="${ZSTANDARD_AVD_API_LEVEL:-30}" +BOOT_TIMEOUT="${ZSTANDARD_AVD_BOOT_TIMEOUT:-240}" +DEVICE_READY_TIMEOUT="${ZSTANDARD_AVD_DEVICE_READY_TIMEOUT:-60}" +SYSIMG_PKG="system-images;android-${API_LEVEL};google_apis;${SYSIMG_ABI}" + +# Log to stderr so that "device-id" and "start" stdout contain only the device id for capture. +log() { echo "[android-emulator] $*" >&2; } +log_step() { echo "[android-emulator] [step] $*" >&2; } +log_wait() { echo "[android-emulator] [wait ${1}s] $2" >&2; } + +# Resolve SDK path +log "Using command: ${1:-unknown}" +if [[ -n "$ANDROID_HOME" ]]; then + SDK="$ANDROID_HOME" + log "SDK from ANDROID_HOME: $SDK" +elif [[ -n "$ANDROID_SDK_ROOT" ]]; then + SDK="$ANDROID_SDK_ROOT" + log "SDK from ANDROID_SDK_ROOT: $SDK" +else + echo "Error: Set ANDROID_HOME or ANDROID_SDK_ROOT" >&2 + exit 1 +fi + +EMULATOR="${SDK}/emulator/emulator" +ADB="${SDK}/platform-tools/adb" +AVDMANAGER="${SDK}/cmdline-tools/latest/bin/avdmanager" +SDKMANAGER="${SDK}/cmdline-tools/latest/bin/sdkmanager" + +log "arch=$ARCH abi=$SYSIMG_ABI AVD=$AVD_NAME system_image=$SYSIMG_PKG" +log "adb=$ADB emulator=$EMULATOR" + +if [[ ! -x "$ADB" ]]; then + echo "Error: adb not found at $ADB. Install platform-tools." >&2 + exit 1 +fi + +if [[ ! -x "$EMULATOR" ]]; then + echo "Error: emulator not found at $EMULATOR. Install emulator package." >&2 + exit 1 +fi + +cmd_create() { + log_step "Checking if AVD $AVD_NAME exists..." + if "$AVDMANAGER" list avd 2>/dev/null | grep -q "Name: $AVD_NAME"; then + log "AVD $AVD_NAME already exists." + return 0 + fi + log_step "Creating AVD $AVD_NAME (API $API_LEVEL, $SYSIMG_ABI)..." + if [[ -x "$SDKMANAGER" ]]; then + log "Installing system image: $SYSIMG_PKG" + "$SDKMANAGER" --install "$SYSIMG_PKG" 2>/dev/null || true + fi + log "Running avdmanager create avd (device pixel_4)..." + echo no | "$AVDMANAGER" create avd --force -n "$AVD_NAME" \ + -k "$SYSIMG_PKG" \ + -d "pixel_4" 2>/dev/null || { + log "Create failed. Install the image manually: $SDKMANAGER --install \"$SYSIMG_PKG\"" + log "List available: $SDKMANAGER --list | grep system-images" + exit 1 + } + log "AVD $AVD_NAME created." +} + +cmd_stop() { + log_step "Stopping emulator..." + if [[ -f "$PID_FILE" ]]; then + local pid + pid=$(cat "$PID_FILE") + if kill -0 "$pid" 2>/dev/null; then + log "Killing emulator process PID $pid" + kill "$pid" 2>/dev/null || true + sleep 2 + kill -9 "$pid" 2>/dev/null || true + fi + rm -f "$PID_FILE" + log "Removed PID file." + fi + "$ADB" emu kill 2>/dev/null || true + log "Emulator stopped." +} + +cmd_status() { + if [[ -f "$PID_FILE" ]]; then + local pid + pid=$(cat "$PID_FILE") + if kill -0 "$pid" 2>/dev/null; then + log "Emulator running (PID $pid)." + "$ADB" devices 2>/dev/null | head -5 + return 0 + fi + log "PID file present but process $pid not running; removing PID file." + rm -f "$PID_FILE" + fi + log "Emulator not running." + return 1 +} + +cmd_device_id() { + local dev + dev=$("$ADB" devices -l 2>/dev/null | grep -E "emulator-[0-9]+|device " | head -1 | awk '{print $1}') + if [[ -n "$dev" ]]; then + echo "$dev" + return 0 + fi + echo "Error: No device/emulator found. Start emulator first." >&2 + return 1 +} + +wait_for_boot() { + log_step "Phase 1: waiting for emulator to appear in 'adb devices' (timeout ${DEVICE_READY_TIMEOUT}s)" + local elapsed=0 + while [[ $elapsed -lt $DEVICE_READY_TIMEOUT ]]; do + local dev_out + dev_out=$("$ADB" devices -l 2>/dev/null || true) + if echo "$dev_out" | grep -qE "emulator-[0-9]+.*device"; then + log "Emulator visible in adb devices at ${elapsed}s." + echo "$dev_out" | head -5 | while read -r line; do log " $line"; done + break + fi + if [[ -f "$PID_FILE" ]]; then + local pid + pid=$(cat "$PID_FILE") + if kill -0 "$pid" 2>/dev/null; then + log_wait "$elapsed" "emulator process alive (PID $pid), adb devices: $(echo "$dev_out" | head -3 | tr '\n' ' ')" + else + log "Emulator process (PID $pid) has exited. Last adb devices: $dev_out" + if [[ -f "$EMULATOR_LOG" ]]; then + log "Last 40 lines of emulator log ($EMULATOR_LOG):" + tail -40 "$EMULATOR_LOG" | while read -r line; do echo "[android-emulator] $line"; done + fi + echo "Error: Emulator process died before device appeared. See $EMULATOR_LOG for emulator output." >&2 + return 1 + fi + else + log_wait "$elapsed" "no PID file yet; adb devices: $(echo "$dev_out" | head -3 | tr '\n' ' ')" + fi + sleep 3 + elapsed=$((elapsed + 3)) + done + if ! "$ADB" devices 2>/dev/null | grep -qE "emulator-[0-9]+.*device"; then + log "Final adb devices:" + "$ADB" devices -l 2>/dev/null | while read -r line; do log " $line"; done + echo "Error: Emulator did not appear within ${DEVICE_READY_TIMEOUT}s." >&2 + return 1 + fi + + log_step "Phase 2: waiting for sys.boot_completed=1 (timeout ${BOOT_TIMEOUT}s)" + elapsed=0 + while [[ $elapsed -lt $BOOT_TIMEOUT ]]; do + local completed + completed=$("$ADB" shell getprop sys.boot_completed 2>/dev/null | tr -d '\r\n ' || echo "?") + if [[ "$completed" == "1" ]]; then + log "Boot completed at ${elapsed}s." + sleep 3 + return 0 + fi + if [[ $((elapsed % 15)) -eq 0 && $elapsed -gt 0 ]]; then + log_wait "$elapsed" "sys.boot_completed='$completed' (waiting for '1')" + fi + sleep 5 + elapsed=$((elapsed + 5)) + done + log "Last sys.boot_completed: $("$ADB" shell getprop sys.boot_completed 2>/dev/null || echo "?")" + echo "Error: Emulator did not boot within ${BOOT_TIMEOUT}s." >&2 + return 1 +} + +cmd_start() { + log_step "Checking current status..." + if cmd_status >/dev/null 2>&1; then + log "Emulator already running." + cmd_device_id + return 0 + fi + log_step "Stopping any previous emulator and starting adb server..." + cmd_stop 2>/dev/null || true + "$ADB" start-server 2>/dev/null || true + log "adb server started." + + log_step "Checking AVD list..." + if ! "$AVDMANAGER" list avd 2>/dev/null | grep -q "Name: $AVD_NAME"; then + log "AVD $AVD_NAME not found, creating..." + cmd_create + else + log "AVD $AVD_NAME exists." + fi + + log_step "Launching emulator: $EMULATOR" + log " AVD=$AVD_NAME | ABI=$SYSIMG_ABI | log=$EMULATOR_LOG" + : > "$EMULATOR_LOG" + # Use only flags supported by current emulator (36.x); -no-accelerometer/-no-gyroscope/-no-sensors were removed. + "$EMULATOR" -avd "$AVD_NAME" -no-window -gpu swiftshader_indirect -no-snapshot -no-audio -no-boot-anim \ + >> "$EMULATOR_LOG" 2>&1 & + local pid=$! + echo $pid > "$PID_FILE" + log "Emulator started with PID $pid (stderr/stdout -> $EMULATOR_LOG)" + log_step "Sleeping 8s before polling adb..." + sleep 8 + wait_for_boot + log_step "Getting device id for flutter..." + cmd_device_id +} + +case "${1:-}" in + create) cmd_create ;; + start) cmd_start ;; + stop) cmd_stop ;; + status) cmd_status ;; + device-id) cmd_device_id ;; + *) + echo "Usage: $0 {create|start|stop|status|device-id}" >&2 + echo " create - Create AVD if not present" >&2 + echo " start - Start emulator (create if needed), wait for boot" >&2 + echo " stop - Stop emulator" >&2 + echo " status - Print running status" >&2 + echo " device-id - Print device ID for flutter test -d" >&2 + exit 1 + ;; +esac diff --git a/scripts/manage_ios_simulator.sh b/scripts/manage_ios_simulator.sh new file mode 100755 index 0000000..ce1781f --- /dev/null +++ b/scripts/manage_ios_simulator.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +# Manage iOS Simulator for integration tests. +# Usage: ./scripts/manage_ios_simulator.sh +# Commands: start | stop | list | status | device-id +# +# Requires: Xcode and xcrun (macOS only). + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" + +# Prefer iPhone 15 or similar; fallback to first available iPhone +DEVICE_NAME="${ZSTANDARD_IOS_DEVICE:-iPhone 16}" +BOOT_TIMEOUT="${ZSTANDARD_IOS_BOOT_TIMEOUT:-60}" + +cmd_list() { + echo "Available simulators:" + xcrun simctl list devices available 2>/dev/null | grep -E "iPhone|iPad" | head -20 +} + +cmd_status() { + local booted + booted=$(xcrun simctl list devices | grep -E "Booted" | head -1) + if [[ -n "$booted" ]]; then + echo "Simulator running: $booted" + return 0 + fi + echo "No simulator booted." + return 1 +} + +cmd_device_id() { + # Flutter expects device ID from `flutter devices`; iOS simulators show as "iPhone XX (mobile)" + # When running with -d, we can use id from simctl + local uuid + uuid=$(xcrun simctl list devices | grep -E "Booted" | head -1 | sed -n 's/.*(\([A-F0-9-]*\)) (Booted).*/\1/p') + if [[ -n "$uuid" ]]; then + echo "$uuid" + return 0 + fi + # Return device name for flutter (e.g. "iPhone 16") as fallback + echo "$DEVICE_NAME" + return 0 +} + +wait_for_boot() { + echo "Waiting for simulator to boot (timeout ${BOOT_TIMEOUT}s)..." + local elapsed=0 + while [[ $elapsed -lt $BOOT_TIMEOUT ]]; do + if xcrun simctl list devices | grep -q "Booted"; then + echo "Simulator booted." + return 0 + fi + sleep 3 + elapsed=$((elapsed + 3)) + done + echo "Error: Simulator did not boot within ${BOOT_TIMEOUT}s." >&2 + return 1 +} + +cmd_start() { + if xcrun simctl list devices | grep -q "Booted"; then + echo "Simulator already booted." + cmd_device_id + return 0 + fi + # Find device by name (e.g. "iPhone 16") and boot by UUID + local uuid + uuid=$(xcrun simctl list devices available | grep "$DEVICE_NAME" | head -1 | sed -n 's/.*(\([A-F0-9-]*\)).*/\1/p') + if [[ -z "$uuid" ]]; then + # Fallback: any available iPhone + uuid=$(xcrun simctl list devices available | grep "iPhone" | head -1 | sed -n 's/.*(\([A-F0-9-]*\)).*/\1/p') + fi + if [[ -z "$uuid" ]]; then + echo "Error: No available iPhone simulator found. Run 'xcrun simctl list devices available'." >&2 + exit 1 + fi + echo "Booting simulator $DEVICE_NAME ($uuid)..." + xcrun simctl boot "$uuid" 2>/dev/null || true + wait_for_boot + cmd_device_id +} + +cmd_stop() { + local uuid + uuid=$(xcrun simctl list devices | grep -E "Booted" | head -1 | sed -n 's/.*(\([A-F0-9-]*\)) (Booted).*/\1/p') + if [[ -n "$uuid" ]]; then + echo "Shutting down simulator $uuid..." + xcrun simctl shutdown "$uuid" + echo "Simulator stopped." + else + echo "No simulator booted." + fi +} + +case "${1:-}" in + start) cmd_start ;; + stop) cmd_stop ;; + list) cmd_list ;; + status) cmd_status ;; + device-id) cmd_device_id ;; + *) + echo "Usage: $0 {start|stop|list|status|device-id}" >&2 + echo " start - Boot iOS simulator (default: $DEVICE_NAME)" >&2 + echo " stop - Shutdown booted simulator" >&2 + echo " list - List available simulators" >&2 + echo " status - Print boot status" >&2 + echo " device-id - Print device ID for flutter test -d" >&2 + exit 1 + ;; +esac diff --git a/scripts/regenerate_bindings.sh b/scripts/regenerate_bindings.sh new file mode 100755 index 0000000..b2bbbc2 --- /dev/null +++ b/scripts/regenerate_bindings.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Regenerate FFI bindings from the C headers in each platform package. +# Run this after updating the zstd C source (e.g. after sync_zstd_ios_macos.sh). +# +# Usage: from repo root, run: ./scripts/regenerate_bindings.sh +# +# Requires: dart run ffigen (and LLVM/clang for ffigen). Do not modify the +# native zstd C code by hand; update source then sync, then run this script. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +PACKAGES=( + "zstandard_android" + "zstandard_ios" + "zstandard_macos" + "zstandard_linux" + "zstandard_windows" + "zstandard_cli" +) + +echo "Regenerating FFI bindings (ffigen) for all platform packages..." +for pkg in "${PACKAGES[@]}"; do + dir="$ROOT/$pkg" + if [[ -f "$dir/ffigen.yaml" ]]; then + echo " $pkg" + (cd "$dir" && dart run ffigen --config ffigen.yaml) + else + echo " $pkg (no ffigen.yaml, skip)" + fi +done +echo "Done. Commit any changed *_bindings_generated.dart files." diff --git a/scripts/run_all_macos.sh b/scripts/run_all_macos.sh new file mode 100755 index 0000000..ce64b54 --- /dev/null +++ b/scripts/run_all_macos.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +# Run all preparación, build and test steps that are runnable on macOS, in sequence. +# Usage: from repo root, run: ./scripts/run_all_macos.sh +# +# Steps: +# 1. Sync/update zstd library (iOS + macOS) +# 2. Regenerate FFI bindings +# 3. Build Android (example app APK; builds plugin native lib via Gradle) +# 4. Build CLI (macOS dylibs for zstandard_cli) +# 5. Build iOS (example app) +# 6. Build web (example app) +# 7. Build macOS (example app) +# 8. Test Android (zstandard_android) +# 9. Test CLI (zstandard_cli) +# 10. Test iOS (zstandard_ios) +# 11. Test web (zstandard_web) +# 12. Test macOS (zstandard_macos) +# +# Requires: macOS, Flutter SDK, Dart SDK, Xcode, CocoaPods, Android SDK/NDK (for Android), +# CMake (for CLI build). Stops on first failure (set -e). + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" + +if [[ "$(uname)" != "Darwin" ]]; then + echo "This script is intended for macOS." + exit 1 +fi + +EXAMPLE="$ROOT/zstandard/example" + +step() { + echo "" + echo "========== $1 ==========" +} + +# 1. Preparar/actualizar librería zstd +step "1/12 — Sync zstd (iOS + macOS)" +./scripts/sync_zstd_ios_macos.sh + +# 2. Actualizar bindings +step "2/12 — Regenerate bindings" +./scripts/regenerate_bindings.sh + +# 3. Compilar Android (example app; el plugin no tiene gradlew, se compila con la app) +step "3/12 — Build Android" +cd "$EXAMPLE" && flutter build apk && cd "$ROOT" + +# 4. Compilar CLI (dylibs macOS para zstandard_cli) +step "4/12 — Build CLI (macOS native libs)" +./scripts/build_macos.sh + +# 5. Compilar iOS (example app) +step "5/12 — Build iOS" +cd "$EXAMPLE/ios" && pod install && cd "$ROOT" +cd "$EXAMPLE" && flutter build ios --simulator --no-codesign && cd "$ROOT" + +# 6. Compilar web +step "6/12 — Build web" +cd "$EXAMPLE" && flutter build web && cd "$ROOT" + +# 7. Compilar macOS (example app) +step "7/12 — Build macOS" +cd "$EXAMPLE" && flutter build macos && cd "$ROOT" + +# 8. Test Android +step "8/12 — Test Android" +cd "$ROOT/zstandard_android" && flutter test && cd "$ROOT" + +# 9. Test CLI +step "9/12 — Test CLI" +cd "$ROOT/zstandard_cli" && dart test && cd "$ROOT" + +# 10. Test iOS +step "10/12 — Test iOS" +cd "$ROOT/zstandard_ios" && flutter test && cd "$ROOT" + +# 11. Test web +step "11/12 — Test web" +cd "$ROOT/zstandard_web" && flutter test && cd "$ROOT" + +# 12. Test macOS +step "12/12 — Test macOS" +cd "$ROOT/zstandard_macos" && flutter test && cd "$ROOT" + +echo "" +echo "========== All steps completed successfully ==========" diff --git a/scripts/run_benchmark.sh b/scripts/run_benchmark.sh new file mode 100755 index 0000000..be27e02 --- /dev/null +++ b/scripts/run_benchmark.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Run the zstandard_cli compression benchmark. +# Usage: from repo root, run: ./scripts/run_benchmark.sh +# Use output as baseline for regression detection. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT/zstandard_cli" +dart run benchmark/compression_benchmark.dart diff --git a/scripts/run_mutation_test.sh b/scripts/run_mutation_test.sh new file mode 100644 index 0000000..d479c55 --- /dev/null +++ b/scripts/run_mutation_test.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Run mutation testing on a single package or all packages. +# Usage: ./scripts/run_mutation_test.sh [package_name] +# If package_name is omitted, runs on zstandard only (main package). +# Use "all" to run on all packages (takes a long time). +# Requires: mutation_test dev_dependency in each package. +# Config: mutation_test_config.xml at repo root (Flutter) or in zstandard_cli (dart test). + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" + +run_mutation() { + local dir="$1" + local config="$2" + if [[ ! -d "$dir" ]]; then + echo "Skip $dir (not found)" + return 0 + fi + echo "---- Mutation testing $dir ----" + if (cd "$dir" && dart run mutation_test "$config"); then + echo "OK $dir" + return 0 + else + echo "FAILED $dir (mutation score below threshold or error)" + return 1 + fi +} + +PKG="${1:-zstandard}" + +if [[ "$PKG" == "all" ]]; then + FAILED=0 + for pkg in zstandard zstandard_platform_interface zstandard_android zstandard_ios zstandard_macos zstandard_linux zstandard_windows zstandard_web; do + run_mutation "$pkg" "../mutation_test_config.xml" || FAILED=1 + done + run_mutation "zstandard_cli" "mutation_test_config.xml" || FAILED=1 + exit $FAILED +else + if [[ "$PKG" == "zstandard_cli" ]]; then + run_mutation "zstandard_cli" "mutation_test_config.xml" + else + run_mutation "$PKG" "../mutation_test_config.xml" + fi +fi diff --git a/scripts/sync_zstd_ios_macos.sh b/scripts/sync_zstd_ios_macos.sh new file mode 100755 index 0000000..ac20a30 --- /dev/null +++ b/scripts/sync_zstd_ios_macos.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash +# Sync the canonical zstd C source from the repo root zstd/ into the iOS and/or +# macOS plugin Class trees so CocoaPods can see the sources (it only globs +# inside the pod directory). Single source of truth remains zstd/ at repo root. +# +# Usage: +# ./scripts/sync_zstd_ios_macos.sh ios # sync only to zstandard_ios/ios/Classes/zstd/ +# ./scripts/sync_zstd_ios_macos.sh macos # sync only to zstandard_macos/macos/Classes/zstd/ +# ./scripts/sync_zstd_ios_macos.sh # sync both (e.g. when run manually from repo root) +# +# Each pod runs this with its platform in before_compile/before_headers and removes its copy in a script phase with execution_position :any (as late as possible). + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +SRC="$ROOT/zstd" +IOS_ZSTD="$ROOT/zstandard_ios/ios/Classes/zstd" +MACOS_ZSTD="$ROOT/zstandard_macos/macos/Classes/zstd" + +if [[ ! -d "$SRC" || ! -f "$SRC/zstd.h" ]]; then + echo "Error: Canonical zstd source not found at $SRC (expected zstd.h and subdirs)." + echo "Run: ./scripts/update_zstd.sh # fetches from github.com/facebook/zstd" + exit 1 +fi + +sync_ios() { + echo "Syncing zstd from $SRC -> iOS $IOS_ZSTD" + rm -rf "$IOS_ZSTD" + mkdir -p "$IOS_ZSTD" + rsync -a "$SRC/" "$IOS_ZSTD/" + if [[ -f "$IOS_ZSTD/module.modulemap" ]]; then + rm -f "$IOS_ZSTD/module.modulemap" + echo " Removed module.modulemap from iOS copy (legacy ZSTD_parameters conflict)." + fi +} + +sync_macos() { + echo "Syncing zstd from $SRC -> macOS $MACOS_ZSTD" + rm -rf "$MACOS_ZSTD" + mkdir -p "$MACOS_ZSTD" + rsync -a "$SRC/" "$MACOS_ZSTD/" + if [[ -f "$MACOS_ZSTD/module.modulemap" ]]; then + rm -f "$MACOS_ZSTD/module.modulemap" + echo " Removed module.modulemap from macOS copy (not used by the pod)." + fi +} + +case "${1:-}" in + ios) + sync_ios + ;; + macos) + sync_macos + ;; + *) + sync_ios + sync_macos + echo "Done. iOS and macOS plugin Class trees are in sync with zstd/." + ;; +esac diff --git a/scripts/test_all.bat b/scripts/test_all.bat new file mode 100644 index 0000000..081715b --- /dev/null +++ b/scripts/test_all.bat @@ -0,0 +1,32 @@ +@echo off +REM Run all unit tests across zstandard packages. +REM Usage: from repo root, run: scripts\test_all.bat +REM Requires: Flutter SDK (for Flutter packages), Dart SDK (for CLI). + +set ROOT=%~dp0.. +cd /d "%ROOT%" +set FAILED=0 + +for %%p in (zstandard zstandard_platform_interface zstandard_android zstandard_ios zstandard_macos zstandard_linux zstandard_windows zstandard_web) do ( + if exist "%%p" ( + echo ---- Testing %%p ---- + cd "%%p" + flutter test + if errorlevel 1 set FAILED=1 + cd "%ROOT%" + ) +) + +if exist zstandard_cli ( + echo ---- Testing zstandard_cli ---- + cd zstandard_cli + dart test + if errorlevel 1 set FAILED=1 + cd "%ROOT%" +) + +if %FAILED% neq 0 ( + echo One or more packages had test failures. + exit /b 1 +) +echo All tests passed. diff --git a/scripts/test_all.sh b/scripts/test_all.sh new file mode 100755 index 0000000..aa1a278 --- /dev/null +++ b/scripts/test_all.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# Run all unit tests across zstandard packages. +# Usage: from repo root, run: ./scripts/test_all.sh +# Requires: Flutter SDK (for Flutter packages), Dart SDK (for CLI). + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" +FAILED=0 + +run_test() { + local dir="$1" + local runner="$2" + if [[ ! -d "$dir" ]]; then + echo "Skip $dir (not found)" + return 0 + fi + echo "---- Testing $dir ----" + if (cd "$dir" && $runner); then + echo "OK $dir" + return 0 + else + echo "FAILED $dir" + FAILED=1 + return 1 + fi +} + +# Flutter packages +for pkg in zstandard zstandard_platform_interface zstandard_android zstandard_ios zstandard_macos zstandard_linux zstandard_windows zstandard_web; do + run_test "$pkg" "flutter test" || true +done + +# CLI is pure Dart +run_test "zstandard_cli" "dart test" || true + +if [[ $FAILED -eq 1 ]]; then + echo "One or more packages had test failures." + exit 1 +fi +echo "All tests passed." diff --git a/scripts/test_all_integration.sh b/scripts/test_all_integration.sh new file mode 100755 index 0000000..4ead1d0 --- /dev/null +++ b/scripts/test_all_integration.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +# Run all tests with full coverage (no skipeos). Uses integration tests and Chrome for web. +# Usage: from repo root, ./scripts/test_all_integration.sh +# Requires: Flutter SDK, Dart SDK. On macOS: Xcode (iOS/macOS), Android SDK (Android), Chrome (web). +# Optional: ZSTANDARD_SKIP_ANDROID=1 to skip Android (e.g. if emulator is slow or unavailable). + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" +SCRIPT_DIR="$ROOT/scripts" +FAILED=0 + +run() { + if "$@"; then + echo "OK: $*" + return 0 + else + echo "FAILED: $*" + FAILED=1 + return 1 + fi +} + +echo "=== Running integration tests (no skipeos) ===" + +# 1. Pure Dart / VM tests +echo "" +echo "1/8 Testing platform interface..." +run bash -c "cd $ROOT/zstandard_platform_interface && flutter test" + +echo "" +echo "2/8 Testing main package..." +run bash -c "cd $ROOT/zstandard && flutter test" + +echo "" +echo "3/8 Testing CLI..." +run bash -c "cd $ROOT/zstandard_cli && dart test" + +# 2. Android (emulator; skip with ZSTANDARD_SKIP_ANDROID=1 or increase ZSTANDARD_AVD_BOOT_TIMEOUT) +echo "" +echo "4/8 Testing Android (integration tests on emulator)..." +if [[ -n "$ZSTANDARD_SKIP_ANDROID" ]]; then + echo "Skipped (ZSTANDARD_SKIP_ANDROID=1)." +else + run "$SCRIPT_DIR/test_android_integration.sh" || true +fi + +# 3. iOS (simulator) +echo "" +echo "5/8 Testing iOS (integration tests on simulator)..." +if [[ "$(uname)" == "Darwin" ]]; then + run "$SCRIPT_DIR/test_ios_integration.sh" || true +else + echo "Skipped (iOS requires macOS)." +fi + +# 4. macOS (framework + integration tests) +echo "" +echo "6/8 Testing macOS (build framework + integration tests)..." +run "$SCRIPT_DIR/test_macos_integration.sh" || true + +# 5. Web (Chrome) +echo "" +echo "7/8 Testing Web (Chrome)..." +run "$SCRIPT_DIR/test_web_integration.sh" || true + +# 6. Linux (when on Linux) +echo "" +echo "8/8 Testing Linux / Windows..." +if [[ "$(uname -s)" == "Linux" ]]; then + run "$SCRIPT_DIR/test_linux_integration.sh" || true +elif [[ "$(uname -s)" == *"MINGW"* ]] || [[ "$(uname -s)" == *"MSYS"* ]]; then + run bash -c "cd \"$ROOT\" && cmd //c 'scripts\\\\test_windows_integration.bat'" || true +else + echo "Linux/Windows: run on native OS: ./scripts/test_linux_integration.sh or scripts\\test_windows_integration.bat" +fi + +echo "" +if [[ $FAILED -eq 1 ]]; then + echo "=== One or more test suites failed. ===" + exit 1 +fi +echo "=== All tests completed successfully. ===" diff --git a/scripts/test_android_integration.sh b/scripts/test_android_integration.sh new file mode 100755 index 0000000..65ef736 --- /dev/null +++ b/scripts/test_android_integration.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Run Android integration tests. Starts emulator if needed, runs tests. +# Usage: from repo root, ./scripts/test_android_integration.sh +# Requires: ANDROID_HOME or ANDROID_SDK_ROOT, Flutter SDK. +# Skip: ZSTANDARD_SKIP_ANDROID=1 or unset ANDROID_HOME/ANDROID_SDK_ROOT. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" + +EXIT_CODE=0 +SCRIPT_DIR="$ROOT/scripts" + +if [[ -n "$ZSTANDARD_SKIP_ANDROID" ]]; then + echo "Android integration tests skipped (ZSTANDARD_SKIP_ANDROID=1)." + exit 0 +fi + +if [[ -z "$ANDROID_HOME" && -z "$ANDROID_SDK_ROOT" ]]; then + echo "Android integration tests skipped (ANDROID_HOME/ANDROID_SDK_ROOT not set)." + exit 0 +fi + +# Start emulator if not already running +if ! "$SCRIPT_DIR/manage_android_emulator.sh" status 2>/dev/null; then + echo "Starting Android emulator (this can take 2–4 minutes on first boot)..." + if ! DEVICE_ID=$("$SCRIPT_DIR/manage_android_emulator.sh" start); then + echo "Android emulator failed to start. To skip Android: ZSTANDARD_SKIP_ANDROID=1 ./scripts/test_all_integration.sh" + echo "To increase boot timeout: ZSTANDARD_AVD_BOOT_TIMEOUT=300 ./scripts/test_android_integration.sh" + exit 1 + fi +else + DEVICE_ID=$("$SCRIPT_DIR/manage_android_emulator.sh" device-id) +fi + +echo "Running integration tests on device: $DEVICE_ID" +if (cd "$ROOT/zstandard_android/example" && flutter test integration_test/ -d "$DEVICE_ID"); then + echo "Android integration tests passed." +else + EXIT_CODE=1 + echo "Android integration tests failed." +fi + +exit $EXIT_CODE diff --git a/scripts/test_ios_integration.sh b/scripts/test_ios_integration.sh new file mode 100755 index 0000000..46528d1 --- /dev/null +++ b/scripts/test_ios_integration.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Run iOS integration tests. Boots simulator if needed, runs tests. +# Usage: from repo root, ./scripts/test_ios_integration.sh +# Requires: Xcode, Flutter SDK (macOS only). + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" + +EXIT_CODE=0 +SCRIPT_DIR="$ROOT/scripts" + +# Start simulator if not already booted +if ! "$SCRIPT_DIR/manage_ios_simulator.sh" status 2>/dev/null; then + echo "Booting iOS simulator..." + "$SCRIPT_DIR/manage_ios_simulator.sh" start +fi + +# Flutter accepts device id from flutter devices (e.g. UUID or "iPhone 16") +DEVICE_ID=$("$SCRIPT_DIR/manage_ios_simulator.sh" device-id) +echo "Running integration tests on device: $DEVICE_ID" + +if (cd "$ROOT/zstandard_ios/example" && flutter test integration_test/ -d "$DEVICE_ID"); then + echo "iOS integration tests passed." +else + EXIT_CODE=1 + echo "iOS integration tests failed." +fi + +exit $EXIT_CODE diff --git a/scripts/test_linux_integration.sh b/scripts/test_linux_integration.sh new file mode 100755 index 0000000..704b1ec --- /dev/null +++ b/scripts/test_linux_integration.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# Run Linux integration tests on the current machine. +# Usage: from repo root, ./scripts/test_linux_integration.sh +# Requires: Linux, Flutter SDK with Linux desktop support. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" + +if [[ "$(uname -s)" != "Linux" ]]; then + echo "Linux integration tests require Linux. Skipped." + exit 0 +fi + +EXIT_CODE=0 + +echo "Running Linux integration tests..." +if (cd "$ROOT/zstandard_linux/example" && flutter test integration_test/ -d linux); then + echo "Linux integration tests passed." +else + EXIT_CODE=1 + echo "Linux integration tests failed." +fi + +exit $EXIT_CODE diff --git a/scripts/test_macos_integration.sh b/scripts/test_macos_integration.sh new file mode 100755 index 0000000..25a07b1 --- /dev/null +++ b/scripts/test_macos_integration.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Run macOS integration tests. Ensures native framework is built, then runs tests. +# Usage: from repo root, ./scripts/test_macos_integration.sh +# Requires: macOS (Darwin), Xcode, Flutter SDK. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" +SCRIPT_DIR="$ROOT/scripts" + +if [[ "$(uname -s)" != "Darwin" ]]; then + echo "macOS integration tests require Darwin. Skipped." + exit 0 +fi + +EXIT_CODE=0 + +"$SCRIPT_DIR/ensure_macos_framework.sh" +echo "Running macOS integration tests..." +if (cd "$ROOT/zstandard_macos/example" && flutter test integration_test/ -d macos); then + echo "macOS integration tests passed." +else + EXIT_CODE=1 + echo "macOS integration tests failed." +fi + +exit $EXIT_CODE diff --git a/scripts/test_web_integration.sh b/scripts/test_web_integration.sh new file mode 100755 index 0000000..07d03ab --- /dev/null +++ b/scripts/test_web_integration.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +# Run web tests in Chrome: unit tests (flutter test -d chrome) and integration +# tests (flutter drive with ChromeDriver + web-server). +# +# Usage: from repo root, ./scripts/test_web_integration.sh +# +# Requires: +# - Flutter SDK +# - Chrome browser +# - ChromeDriver on PATH (port 4444 for integration tests). +# Install: e.g. brew install chromedriver, or +# npx @puppeteer/browsers install chromedriver@stable +# See: https://docs.flutter.dev/testing/integration-tests#web + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT" + +EXIT_CODE=0 +CHROMEDRIVER_PID="" +CHROMEDRIVER_PORT=4444 + +# If we start ChromeDriver, stop it on exit. +cleanup_chromedriver() { + if [[ -n "$CHROMEDRIVER_PID" ]] && kill -0 "$CHROMEDRIVER_PID" 2>/dev/null; then + kill "$CHROMEDRIVER_PID" 2>/dev/null || true + wait "$CHROMEDRIVER_PID" 2>/dev/null || true + fi +} +trap cleanup_chromedriver EXIT + +# --- Unit tests (Chrome), if the package has any --- +WEB_TEST_COUNT=$(find "$ROOT/zstandard_web/test" -name "*_test.dart" 2>/dev/null | wc -l | tr -d ' ') +if [[ "$WEB_TEST_COUNT" -gt 0 ]]; then + echo "Running zstandard_web unit tests in Chrome..." + if (cd "$ROOT/zstandard_web" && flutter test -d chrome --coverage 2>/dev/null || flutter test -d chrome); then + echo "Web unit tests passed." + else + EXIT_CODE=1 + echo "Web unit tests failed." + fi +else + echo "No zstandard_web unit tests (coverage in example/integration_test); see zstandard_web/test/README.md." +fi + +# --- Integration tests (flutter drive + ChromeDriver + web-server) --- +if [[ -d "$ROOT/zstandard_web/example/integration_test" ]] && [[ -d "$ROOT/zstandard_web/example/test_driver" ]]; then + if ! command -v chromedriver &>/dev/null; then + echo "ChromeDriver not found on PATH. Skipping web integration tests." + echo "Install with: brew install chromedriver (or see https://docs.flutter.dev/testing/integration-tests#web)" + else + # On macOS, remove quarantine so Gatekeeper doesn't block chromedriver (avoids security popup) + if [[ "$(uname -s)" = Darwin ]]; then + CHROMEDRIVER_BIN=$(command -v chromedriver) + while [[ -L "$CHROMEDRIVER_BIN" ]]; do + NEXT=$(readlink "$CHROMEDRIVER_BIN") + [[ "$NEXT" != /* ]] && NEXT="$(dirname "$CHROMEDRIVER_BIN")/$NEXT" + CHROMEDRIVER_BIN=$NEXT + done + if [[ -f "$CHROMEDRIVER_BIN" ]] && xattr "$CHROMEDRIVER_BIN" 2>/dev/null | grep -q com.apple.quarantine; then + xattr -d com.apple.quarantine "$CHROMEDRIVER_BIN" 2>/dev/null || true + fi + fi + + # Start ChromeDriver if nothing is listening on 4444 + if ! lsof -i:"$CHROMEDRIVER_PORT" &>/dev/null; then + chromedriver --port="$CHROMEDRIVER_PORT" & + CHROMEDRIVER_PID=$! + # Wait until port is listening (up to 10s) + for _ in 1 2 3 4 5 6 7 8 9 10; do + sleep 1 + lsof -i:"$CHROMEDRIVER_PORT" &>/dev/null && break + done + if ! lsof -i:"$CHROMEDRIVER_PORT" &>/dev/null; then + echo "ChromeDriver did not bind to port $CHROMEDRIVER_PORT. Skipping integration tests." + echo "Start it manually in another terminal: chromedriver --port=4444" + fi + else + echo "Using existing ChromeDriver on port $CHROMEDRIVER_PORT." + fi + + if lsof -i:"$CHROMEDRIVER_PORT" &>/dev/null; then + # Brief wait so ChromeDriver is fully ready before flutter drive starts the server and tests + sleep 2 + echo "Running zstandard_web example integration tests (flutter drive -d web-server)..." + DRIVE_OUTPUT=$(mktemp -t flutter_drive_XXXXXX.txt) + (cd "$ROOT/zstandard_web/example" && flutter drive \ + --driver=test_driver/integration_test.dart \ + --target=integration_test/zstandard_web_integration_test.dart \ + -d web-server \ + --web-port=8080) > "$DRIVE_OUTPUT" 2>&1 + DRIVE_EXIT=$? + # Flutter drive can exit 0 even when compilation fails; detect known failure output + if [[ $DRIVE_EXIT -ne 0 ]] || grep -qE "Failed to compile|Dart compiler exited unexpectedly|SessionNotCreatedException|Unable to start a WebDriver session" "$DRIVE_OUTPUT"; then + EXIT_CODE=1 + echo "Web integration tests failed." + cat "$DRIVE_OUTPUT" + else + echo "Web integration tests passed." + cat "$DRIVE_OUTPUT" + fi + rm -f "$DRIVE_OUTPUT" + fi + fi +fi + +exit $EXIT_CODE diff --git a/scripts/test_windows_integration.bat b/scripts/test_windows_integration.bat new file mode 100644 index 0000000..ecc3bae --- /dev/null +++ b/scripts/test_windows_integration.bat @@ -0,0 +1,27 @@ +@echo off +REM Run Windows integration tests on the current machine. +REM Usage: from repo root, scripts\test_windows_integration.bat +REM Requires: Windows, Flutter SDK with Windows desktop support. + +setlocal +set ROOT=%~dp0.. +cd /d "%ROOT%" + +where flutter >nul 2>&1 +if errorlevel 1 ( + echo Flutter not found on PATH. + exit /b 1 +) + +echo Running Windows integration tests... +cd zstandard_windows\example +flutter test integration_test/ -d windows +set EXIT_CODE=%errorlevel% +cd /d "%ROOT%" + +if %EXIT_CODE% equ 0 ( + echo Windows integration tests passed. +) else ( + echo Windows integration tests failed. +) +exit /b %EXIT_CODE% diff --git a/scripts/update_zstd.sh b/scripts/update_zstd.sh new file mode 100755 index 0000000..eea62d1 --- /dev/null +++ b/scripts/update_zstd.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Update the canonical zstd source at repo root (zstd/) from the official repo. +# Usage: from repo root, run: ./scripts/update_zstd.sh +# Optional: ./scripts/update_zstd.sh v1.5.6 (tag or branch; default: dev) +# +# Requires: git. After this, run ./scripts/sync_zstd_ios_macos.sh and optionally +# ./scripts/regenerate_bindings.sh. + +set -e +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +ZSTD_DIR="$ROOT/zstd" +REF="${1:-dev}" + +echo "Fetching zstd from https://github.com/facebook/zstd.git (ref: $REF)..." +TMP="$ROOT/.zstd_upstream" +rm -rf "$TMP" +git clone --depth 1 --branch "$REF" https://github.com/facebook/zstd.git "$TMP" + +mkdir -p "$ZSTD_DIR" +echo "Copying lib/ into $ZSTD_DIR ..." +rsync -a --delete "$TMP/lib/" "$ZSTD_DIR/" +rm -rf "$TMP" + +if [[ ! -f "$ZSTD_DIR/zstd.h" ]]; then + echo "Error: zstd.h not found after copy." + exit 1 +fi +echo "Done. zstd/ is now in sync with facebook/zstd @ $REF." +echo "Next: run ./scripts/sync_zstd_ios_macos.sh" diff --git a/zstandard/example/README.md b/zstandard/example/README.md index ebc202b..7c6e41f 100644 --- a/zstandard/example/README.md +++ b/zstandard/example/README.md @@ -1,16 +1,43 @@ -# zstandard_example +# Zstandard Example -Demonstrates how to use the zstandard plugin. +This example demonstrates how to use the [zstandard](https://pub.dev/packages/zstandard) Flutter plugin for compression and decompression on all supported platforms. -## Getting Started +## What it demonstrates -This project is a starting point for a Flutter application. +- **Compression**: Compress raw bytes using the Zstandard algorithm with a configurable compression level (1–22). +- **Decompression**: Decompress previously compressed data back to the original bytes. +- **Extension methods**: Use `Uint8List?.compress()` and `Uint8List?.decompress()` for a concise API. +- **Platform detection**: The plugin automatically uses the correct implementation (Android, iOS, macOS, Windows, Linux, or web) based on the current platform. -A few resources to get you started if this is your first Flutter project: +## Running the example -- [Lab: Write your first Flutter app](https://docs.flutter.dev/get-started/codelab) -- [Cookbook: Useful Flutter samples](https://docs.flutter.dev/cookbook) +From this directory (`zstandard/example`): -For help getting started with Flutter development, view the -[online documentation](https://docs.flutter.dev/), which offers tutorials, -samples, guidance on mobile development, and a full API reference. +```bash +flutter pub get +flutter run +``` + +Then select your target platform (e.g. Android, iOS, macOS, Windows, Linux, or Chrome for web). + +For web, ensure `zstd.js` and `zstd.wasm` are in the app’s `web/` directory and that `web/index.html` includes ``. See the [web platform guide](../../docs/platforms/web.md) for details. + +## Integration tests + +To run integration tests (requires a device or emulator for the target platform): + +```bash +flutter test integration_test/ +``` + +## Documentation + +- [Getting started](../../docs/guides/getting-started.md) +- [API reference](../../docs/api/main-api.md) +- [Platform guides](../../docs/platforms/) +- [Troubleshooting](../../docs/troubleshooting/common-issues.md) + +## Resources + +- [Flutter documentation](https://docs.flutter.dev/) +- [Zstandard (zstd) algorithm](https://github.com/facebook/zstd) diff --git a/zstandard/example/integration_test/plugin_integration_test.dart b/zstandard/example/integration_test/plugin_integration_test.dart index 449fdc9..ddd359f 100644 --- a/zstandard/example/integration_test/plugin_integration_test.dart +++ b/zstandard/example/integration_test/plugin_integration_test.dart @@ -6,20 +6,106 @@ // For more information about Flutter integration tests, please see // https://flutter.dev/to/integration-testing +import 'dart:typed_data'; import 'package:flutter_test/flutter_test.dart'; import 'package:integration_test/integration_test.dart'; - import 'package:zstandard/zstandard.dart'; void main() { IntegrationTestWidgetsFlutterBinding.ensureInitialized(); + late Zstandard plugin; + + setUp(() { + plugin = Zstandard(); + }); + testWidgets('getPlatformVersion test', (WidgetTester tester) async { - final Zstandard plugin = Zstandard(); final String? version = await plugin.getPlatformVersion(); - // The version string depends on the host platform running the test, so - // just assert that some non-empty string is returned. expect(version?.isNotEmpty, true); }); + + group('Compression roundtrip', () { + test('roundtrip small data level 3', () async { + final data = Uint8List.fromList(List.generate(100, (i) => i % 256)); + final compressed = await plugin.compress(data, 3); + expect(compressed, isNotNull); + final decompressed = await plugin.decompress(compressed!); + expect(decompressed, isNotNull); + expect(decompressed, data); + }); + + test('roundtrip with level 1', () async { + final data = Uint8List.fromList(List.generate(500, (i) => i % 256)); + final compressed = await plugin.compress(data, 1); + expect(compressed, isNotNull); + final decompressed = await plugin.decompress(compressed!); + expect(decompressed, data); + }); + + test('roundtrip with level 22', () async { + final data = Uint8List.fromList(List.generate(500, (i) => i % 256)); + final compressed = await plugin.compress(data, 22); + expect(compressed, isNotNull); + final decompressed = await plugin.decompress(compressed!); + expect(decompressed, data); + }); + + test('roundtrip empty data', () async { + final data = Uint8List(0); + final compressed = await plugin.compress(data, 3); + expect(compressed, isNotNull); + final decompressed = await plugin.decompress(compressed!); + expect(decompressed, isNotNull); + expect(decompressed!.length, 0); + }); + + test('roundtrip medium data (10KB)', () async { + final data = Uint8List.fromList(List.generate(10000, (i) => i % 256)); + final compressed = await plugin.compress(data, 3); + expect(compressed, isNotNull); + expect(compressed!.length, lessThanOrEqualTo(data.length + 256)); + final decompressed = await plugin.decompress(compressed); + expect(decompressed, data); + }); + + test('roundtrip repeated pattern', () async { + final data = Uint8List.fromList(List.filled(1000, 0x42)); + final compressed = await plugin.compress(data, 10); + expect(compressed, isNotNull); + final decompressed = await plugin.decompress(compressed!); + expect(decompressed, data); + }); + }); + + group('Decompress invalid input', () { + test('corrupted data returns null', () async { + final corrupted = Uint8List.fromList([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + final result = await plugin.decompress(corrupted); + expect(result, isNull); + }); + + test('random bytes return null', () async { + final random = Uint8List.fromList(List.generate(50, (i) => (i * 7) % 256)); + final result = await plugin.decompress(random); + expect(result, isNull); + }); + }); + + group('Extension methods', () { + test('compress extension roundtrip', () async { + final data = Uint8List.fromList(List.generate(200, (i) => i % 256)); + final compressed = await data.compress(compressionLevel: 3); + expect(compressed, isNotNull); + final decompressed = await compressed!.decompress(); + expect(decompressed, data); + }); + + test('null extension returns null', () async { + Uint8List? nullData; + final compressed = await nullData.compress(); + expect(compressed, isNull); + }); + }); } diff --git a/zstandard/example/ios/Flutter/AppFrameworkInfo.plist b/zstandard/example/ios/Flutter/AppFrameworkInfo.plist index 1dc6cf7..391a902 100644 --- a/zstandard/example/ios/Flutter/AppFrameworkInfo.plist +++ b/zstandard/example/ios/Flutter/AppFrameworkInfo.plist @@ -20,7 +20,5 @@ ???? CFBundleVersion 1.0 - MinimumOSVersion - 13.0 diff --git a/zstandard/example/ios/Runner/AppDelegate.swift b/zstandard/example/ios/Runner/AppDelegate.swift index 6266644..c30b367 100644 --- a/zstandard/example/ios/Runner/AppDelegate.swift +++ b/zstandard/example/ios/Runner/AppDelegate.swift @@ -2,12 +2,15 @@ import Flutter import UIKit @main -@objc class AppDelegate: FlutterAppDelegate { +@objc class AppDelegate: FlutterAppDelegate, FlutterImplicitEngineDelegate { override func application( _ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]? ) -> Bool { - GeneratedPluginRegistrant.register(with: self) return super.application(application, didFinishLaunchingWithOptions: launchOptions) } + + func didInitializeImplicitFlutterEngine(_ engineBridge: FlutterImplicitEngineBridge) { + GeneratedPluginRegistrant.register(with: engineBridge.pluginRegistry) + } } diff --git a/zstandard/example/ios/Runner/Info.plist b/zstandard/example/ios/Runner/Info.plist index 2c46e17..663b538 100644 --- a/zstandard/example/ios/Runner/Info.plist +++ b/zstandard/example/ios/Runner/Info.plist @@ -2,6 +2,8 @@ + CADisableMinimumFrameDurationOnPhone + CFBundleDevelopmentRegion $(DEVELOPMENT_LANGUAGE) CFBundleDisplayName @@ -24,6 +26,29 @@ $(FLUTTER_BUILD_NUMBER) LSRequiresIPhoneOS + UIApplicationSceneManifest + + UIApplicationSupportsMultipleScenes + + UISceneConfigurations + + UIWindowSceneSessionRoleApplication + + + UISceneClassName + UIWindowScene + UISceneConfigurationName + flutter + UISceneDelegateClassName + FlutterSceneDelegate + UISceneStoryboardFile + Main + + + + + UIApplicationSupportsIndirectInputEvents + UILaunchStoryboardName LaunchScreen UIMainStoryboardFile @@ -41,9 +66,5 @@ UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight - CADisableMinimumFrameDurationOnPhone - - UIApplicationSupportsIndirectInputEvents - diff --git a/zstandard/example/macos/Runner.xcodeproj/project.pbxproj b/zstandard/example/macos/Runner.xcodeproj/project.pbxproj index 5405416..c61e801 100644 --- a/zstandard/example/macos/Runner.xcodeproj/project.pbxproj +++ b/zstandard/example/macos/Runner.xcodeproj/project.pbxproj @@ -240,7 +240,7 @@ 33CC10EB2044A3C60003C045 /* Resources */, 33CC110E2044A8840003C045 /* Bundle Framework */, 3399D490228B24CF009A79C7 /* ShellScript */, - D01C54AD78991D6BF3249F99 /* [CP] Embed Pods Frameworks */, + 0DC47FEBBDA2E92A1D16A5DE /* [CP] Embed Pods Frameworks */, ); buildRules = ( ); @@ -345,6 +345,23 @@ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; + 0DC47FEBBDA2E92A1D16A5DE /* [CP] Embed Pods Frameworks */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-input-files.xcfilelist", + ); + name = "[CP] Embed Pods Frameworks"; + outputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-output-files.xcfilelist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks.sh\"\n"; + showEnvVarsInLog = 0; + }; 1FF5FAFBF391D963D257EED2 /* [CP] Check Pods Manifest.lock */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; @@ -405,23 +422,6 @@ shellPath = /bin/sh; shellScript = "\"$FLUTTER_ROOT\"/packages/flutter_tools/bin/macos_assemble.sh && touch Flutter/ephemeral/tripwire"; }; - D01C54AD78991D6BF3249F99 /* [CP] Embed Pods Frameworks */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputFileListPaths = ( - "${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-input-files.xcfilelist", - ); - name = "[CP] Embed Pods Frameworks"; - outputFileListPaths = ( - "${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-output-files.xcfilelist", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks.sh\"\n"; - showEnvVarsInLog = 0; - }; /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ diff --git a/zstandard/example/pubspec.yaml b/zstandard/example/pubspec.yaml index 5b34838..00b31c0 100644 --- a/zstandard/example/pubspec.yaml +++ b/zstandard/example/pubspec.yaml @@ -13,6 +13,21 @@ environment: # dependencies can be manually updated by changing the version numbers below to # the latest version available on pub.dev. To see which dependencies have newer # versions available, run `flutter pub outdated`. +dependency_overrides: + # Use local plugins from repo when developing (e.g. macOS zstd symbol fix). + zstandard_android: + path: ../../zstandard_android + zstandard_macos: + path: ../../zstandard_macos + zstandard_ios: + path: ../../zstandard_ios + zstandard_web: + path: ../../zstandard_web + zstandard_windows: + path: ../../zstandard_windows + zstandard_linux: + path: ../../zstandard_linux + dependencies: flutter: sdk: flutter diff --git a/zstandard/lib/src/zstandard_ext.dart b/zstandard/lib/src/zstandard_ext.dart index db18d7c..e287c10 100644 --- a/zstandard/lib/src/zstandard_ext.dart +++ b/zstandard/lib/src/zstandard_ext.dart @@ -2,13 +2,30 @@ import 'dart:typed_data'; import 'package:zstandard/zstandard.dart'; +/// Extension methods on [Uint8List?] for Zstandard compression and decompression. +/// +/// When the receiver is null, both [compress] and [decompress] return null +/// without calling the platform. +/// +/// Example: +/// ```dart +/// final compressed = await data.compress(compressionLevel: 5); +/// final decompressed = await compressed?.decompress(); +/// ``` extension ZstandardExt on Uint8List? { + /// Compresses this byte list using the given [compressionLevel]. + /// + /// Default [compressionLevel] is 3. Range is 1–22. + /// Returns null if the receiver is null or compression failed. Future compress({int compressionLevel = 3}) async { var data = this; if (data == null) return null; return Zstandard().compress(data, compressionLevel); } + /// Decompresses this byte list (must be Zstandard-compressed data). + /// + /// Returns null if the receiver is null or decompression failed. Future decompress() async { var data = this; if (data == null) return null; diff --git a/zstandard/lib/zstandard.dart b/zstandard/lib/zstandard.dart index cecca87..12ec302 100644 --- a/zstandard/lib/zstandard.dart +++ b/zstandard/lib/zstandard.dart @@ -6,22 +6,54 @@ import 'package:zstandard_platform_interface/zstandard_platform_interface.dart'; export 'src/zstandard_ext.dart'; +/// Main entry point for Zstandard compression and decompression in Flutter. +/// +/// Use [Zstandard] to compress and decompress [Uint8List] data on all supported +/// platforms (Android, iOS, macOS, Windows, Linux, web). The implementation +/// is selected automatically based on the current platform. +/// +/// Example: +/// ```dart +/// final zstandard = Zstandard(); +/// final compressed = await zstandard.compress(data, 3); +/// final decompressed = await zstandard.decompress(compressed!); +/// ``` +/// +/// See also [ZstandardExt] for extension methods on [Uint8List?]. class Zstandard { static Zstandard? _instance; Zstandard._internal(); + /// Creates or returns the singleton [Zstandard] instance. factory Zstandard() { _instance ??= Zstandard._internal(); return _instance!; } + /// The currently registered platform implementation. + /// + /// Typically used only for testing (e.g. to set a mock) or to call + /// [getPlatformVersion]. Prefer [compress] and [decompress] for normal use. ZstandardPlatform get instance => ZstandardImpl().instance; + /// Returns a platform-specific version or identifier string. + /// + /// May be null if the platform does not provide one. Useful for debugging + /// or display (e.g. "Android 14", "macOS 14.0"). Future getPlatformVersion() => instance.getPlatformVersion(); + /// Compresses [data] using Zstandard with the given [compressionLevel]. + /// + /// [compressionLevel] must be between 1 (fastest) and 22 (best ratio). + /// Returns the compressed bytes, or null if compression failed. Future compress(Uint8List data, int compressionLevel) => instance.compress(data, compressionLevel); - Future decompress(Uint8List data) => instance.decompress(data); + /// Decompresses Zstandard-compressed [data]. + /// + /// Returns the decompressed bytes, or null if decompression failed + /// (e.g. invalid or corrupted input). + Future decompress(Uint8List data) => + instance.decompress(data); } diff --git a/zstandard/pubspec.yaml b/zstandard/pubspec.yaml index 69b2b16..c324d5b 100644 --- a/zstandard/pubspec.yaml +++ b/zstandard/pubspec.yaml @@ -39,6 +39,8 @@ dev_dependencies: flutter_lints: ^5.0.0 flutter_test: sdk: flutter + kiri_check: ^1.3.1 mockito: ^5.4.4 + mutation_test: ^1.8.0 plugin_platform_interface: ^2.1.8 test: ^1.16.3 diff --git a/zstandard/test/error_handling_test.dart b/zstandard/test/error_handling_test.dart new file mode 100644 index 0000000..bfa1fb3 --- /dev/null +++ b/zstandard/test/error_handling_test.dart @@ -0,0 +1,115 @@ +import 'dart:typed_data'; + +import 'package:flutter_test/flutter_test.dart'; +import 'package:plugin_platform_interface/plugin_platform_interface.dart'; +import 'package:zstandard/zstandard.dart'; +import 'package:zstandard_platform_interface/zstandard_platform_interface.dart'; + +/// Mock that returns null for decompress (simulates corrupted/invalid input). +class MockDecompressFails with MockPlatformInterfaceMixin implements ZstandardPlatform { + @override + Future getPlatformVersion() => Future.value('Mock 1.0'); + + @override + Future compress(Uint8List data, int compressionLevel) async { + return Uint8List.fromList([0x28, 0xb5, 0x2f, 0xfd, 0x00, 0x00, 0x01, 0x00, 0x00]); + } + + @override + Future decompress(Uint8List data) async => null; +} + +/// Mock that returns null for compress (simulates compression failure). +class MockCompressFails with MockPlatformInterfaceMixin implements ZstandardPlatform { + @override + Future getPlatformVersion() => Future.value('Mock 1.0'); + + @override + Future compress(Uint8List data, int compressionLevel) async => null; + + @override + Future decompress(Uint8List data) async => + Future.value(Uint8List.fromList([1, 2, 3])); +} + +void main() { + TestWidgetsFlutterBinding.ensureInitialized(); + + group('Error handling — decompress returns null', () { + ZstandardPlatform? saved; + + setUp(() { + Zstandard().instance; + saved = ZstandardPlatform.instance; + ZstandardPlatform.instance = MockDecompressFails(); + }); + + tearDown(() { + ZstandardPlatform.instance = saved!; + }); + + test('decompress invalid data returns null', () async { + final z = Zstandard(); + final corrupted = Uint8List.fromList([1, 2, 3, 4, 5]); + final result = await z.decompress(corrupted); + expect(result, isNull); + }); + + test('extension decompress on invalid data returns null', () async { + final corrupted = Uint8List.fromList([1, 2, 3, 4, 5]); + final result = await corrupted.decompress(); + expect(result, isNull); + }); + }); + + group('Error handling — compress returns null', () { + ZstandardPlatform? saved; + + setUp(() { + Zstandard().instance; + saved = ZstandardPlatform.instance; + ZstandardPlatform.instance = MockCompressFails(); + }); + + tearDown(() { + ZstandardPlatform.instance = saved!; + }); + + test('compress failure returns null', () async { + final z = Zstandard(); + final data = Uint8List.fromList([1, 2, 3, 4, 5]); + final result = await z.compress(data, 3); + expect(result, isNull); + }); + + test('extension compress failure returns null', () async { + final data = Uint8List.fromList([1, 2, 3, 4, 5]); + final result = await data.compress(compressionLevel: 3); + expect(result, isNull); + }); + }); + + group('Edge cases — null and empty', () { + ZstandardPlatform? saved; + + setUp(() { + Zstandard().instance; + saved = ZstandardPlatform.instance; + ZstandardPlatform.instance = MockDecompressFails(); + }); + + tearDown(() { + ZstandardPlatform.instance = saved!; + }); + + test('extension compress on null returns null', () async { + const Uint8List? data = null; + expect(await data.compress(), isNull); + }); + + test('extension decompress on null returns null', () async { + const Uint8List? data = null; + expect(await data.decompress(), isNull); + }); + }); +} diff --git a/zstandard/test/properties_test.dart b/zstandard/test/properties_test.dart new file mode 100644 index 0000000..ccf7192 --- /dev/null +++ b/zstandard/test/properties_test.dart @@ -0,0 +1,78 @@ +import 'dart:typed_data'; + +import 'package:flutter_test/flutter_test.dart'; +import 'package:kiri_check/kiri_check.dart'; +import 'package:plugin_platform_interface/plugin_platform_interface.dart'; +import 'package:zstandard/zstandard.dart'; +import 'package:zstandard_platform_interface/zstandard_platform_interface.dart'; + +/// Mock platform that implements identity roundtrip for property tests. +class RoundtripMockPlatform with MockPlatformInterfaceMixin implements ZstandardPlatform { + @override + Future getPlatformVersion() => Future.value('mock'); + + @override + Future compress(Uint8List data, int compressionLevel) async { + return Uint8List.fromList(List.from(data)); + } + + @override + Future decompress(Uint8List data) async { + return Uint8List.fromList(List.from(data)); + } +} + +void main() { + TestWidgetsFlutterBinding.ensureInitialized(); + + group('Property-based tests', () { + property('roundtrip with mock: decompress(compress(x)) == x', () { + final saved = ZstandardPlatform.instance; + Zstandard().instance; + ZstandardPlatform.instance = RoundtripMockPlatform(); + addTearDown(() { + ZstandardPlatform.instance = saved; + }); + forAll( + binary(minLength: 0, maxLength: 2000), + (List data) async { + final input = Uint8List.fromList(data); + final compressed = await Zstandard().compress(input, 3); + if (compressed == null) return; + final decompressed = await Zstandard().decompress(compressed); + expect(decompressed, isNotNull); + expect(decompressed!.length, input.length); + expect(List.from(decompressed), data); + }, + maxExamples: 200, + ); + }); + + property('determinism with mock: same input and level produce same output', () { + final saved = ZstandardPlatform.instance; + Zstandard().instance; + ZstandardPlatform.instance = RoundtripMockPlatform(); + addTearDown(() { + ZstandardPlatform.instance = saved; + }); + forAll( + combine2( + binary(minLength: 1, maxLength: 500), + integer(min: 1, max: 22), + ), + (tuple) async { + final data = tuple.$1; + final level = tuple.$2; + final input = Uint8List.fromList(data); + final compressed1 = await Zstandard().compress(input, level); + final compressed2 = await Zstandard().compress(input, level); + expect(compressed1, isNotNull); + expect(compressed2, isNotNull); + expect(compressed1!.length, compressed2!.length); + expect(List.from(compressed1), List.from(compressed2)); + }, + maxExamples: 100, + ); + }); + }); +} diff --git a/zstandard/test/zstandard_advanced_test.dart b/zstandard/test/zstandard_advanced_test.dart new file mode 100644 index 0000000..d898021 --- /dev/null +++ b/zstandard/test/zstandard_advanced_test.dart @@ -0,0 +1,176 @@ +import 'dart:typed_data'; + +import 'package:flutter_test/flutter_test.dart'; +import 'package:plugin_platform_interface/plugin_platform_interface.dart'; +import 'package:zstandard/zstandard.dart'; +import 'package:zstandard_platform_interface/zstandard_platform_interface.dart'; + +/// Mock that echoes data for roundtrip-style tests. +class EchoMockPlatform with MockPlatformInterfaceMixin implements ZstandardPlatform { + @override + Future getPlatformVersion() => Future.value('EchoMock 1.0'); + + @override + Future compress(Uint8List data, int compressionLevel) async { + return Uint8List.fromList(List.from(data)); + } + + @override + Future decompress(Uint8List data) async { + return Uint8List.fromList(List.from(data)); + } +} + +void main() { + TestWidgetsFlutterBinding.ensureInitialized(); + + group('Compression levels boundary tests', () { + ZstandardPlatform? saved; + + setUp(() { + Zstandard().instance; + saved = ZstandardPlatform.instance; + ZstandardPlatform.instance = EchoMockPlatform(); + }); + + tearDown(() { + ZstandardPlatform.instance = saved!; + }); + + test('level 1 forwards correctly', () async { + final data = Uint8List.fromList([1, 2, 3]); + final result = await Zstandard().compress(data, 1); + expect(result, isNotNull); + expect(result!.length, 3); + }); + + test('level 3 forwards correctly', () async { + final data = Uint8List.fromList([1, 2, 3]); + final result = await Zstandard().compress(data, 3); + expect(result, isNotNull); + }); + + test('level 22 forwards correctly', () async { + final data = Uint8List.fromList([1, 2, 3]); + final result = await Zstandard().compress(data, 22); + expect(result, isNotNull); + }); + }); + + group('Data size edge cases', () { + ZstandardPlatform? saved; + + setUp(() { + Zstandard().instance; + saved = ZstandardPlatform.instance; + ZstandardPlatform.instance = EchoMockPlatform(); + }); + + tearDown(() { + ZstandardPlatform.instance = saved!; + }); + + test('empty data roundtrip', () async { + final data = Uint8List(0); + final compressed = await Zstandard().compress(data, 3); + expect(compressed, isNotNull); + expect(compressed!.length, 0); + final decompressed = await Zstandard().decompress(compressed); + expect(decompressed, isNotNull); + expect(decompressed!.length, 0); + }); + + test('1 byte data roundtrip', () async { + final data = Uint8List.fromList([42]); + final compressed = await Zstandard().compress(data, 3); + expect(compressed, isNotNull); + final decompressed = await Zstandard().decompress(compressed!); + expect(decompressed, equals(data)); + }); + + test('small data roundtrip', () async { + final data = Uint8List.fromList([1, 2, 3, 4, 5, 6, 7, 8]); + final compressed = await Zstandard().compress(data, 3); + expect(compressed, isNotNull); + final decompressed = await Zstandard().decompress(compressed!); + expect(decompressed, equals(data)); + }); + + test('medium data (10KB) roundtrip', () async { + final data = Uint8List.fromList(List.generate(10 * 1024, (i) => i % 256)); + final compressed = await Zstandard().compress(data, 3); + expect(compressed, isNotNull); + final decompressed = await Zstandard().decompress(compressed!); + expect(decompressed, equals(data)); + }); + }); + + group('Data pattern tests', () { + ZstandardPlatform? saved; + + setUp(() { + Zstandard().instance; + saved = ZstandardPlatform.instance; + ZstandardPlatform.instance = EchoMockPlatform(); + }); + + tearDown(() { + ZstandardPlatform.instance = saved!; + }); + + test('highly compressible pattern (repeated bytes)', () async { + final data = Uint8List.fromList(List.filled(1000, 42)); + final compressed = await Zstandard().compress(data, 3); + expect(compressed, isNotNull); + expect(compressed!.length, 1000); + }); + + test('sequential data', () async { + final data = Uint8List.fromList(List.generate(500, (i) => i % 256)); + final compressed = await Zstandard().compress(data, 3); + expect(compressed, isNotNull); + }); + }); + + group('Concurrent operations', () { + ZstandardPlatform? saved; + + setUp(() { + Zstandard().instance; + saved = ZstandardPlatform.instance; + ZstandardPlatform.instance = EchoMockPlatform(); + }); + + tearDown(() { + ZstandardPlatform.instance = saved!; + }); + + test('multiple compressions in parallel', () async { + final futures = List.generate(10, (i) { + final data = Uint8List.fromList(List.generate(1000, (j) => (i + j) % 256)); + return data.compress(compressionLevel: 3); + }); + final results = await Future.wait(futures); + expect(results.every((r) => r != null), isTrue); + expect(results.length, 10); + }); + + test('compression and decompression interleaved', () async { + final z = Zstandard(); + final data = Uint8List.fromList([1, 2, 3, 4, 5]); + final f1 = z.compress(data, 1); + final f2 = z.compress(data, 3); + final f3 = z.compress(data, 22); + final c1 = await f1; + final c2 = await f2; + final c3 = await f3; + expect(c1, isNotNull); + expect(c2, isNotNull); + expect(c3, isNotNull); + final d1 = await z.decompress(c1!); + final d2 = await z.decompress(c2!); + expect(d1, equals(data)); + expect(d2, equals(data)); + }); + }); +} diff --git a/zstandard/test/zstandard_test.dart b/zstandard/test/zstandard_test.dart new file mode 100644 index 0000000..0f1d794 --- /dev/null +++ b/zstandard/test/zstandard_test.dart @@ -0,0 +1,140 @@ +import 'dart:typed_data'; + +import 'package:flutter_test/flutter_test.dart'; +import 'package:plugin_platform_interface/plugin_platform_interface.dart'; +import 'package:zstandard/zstandard.dart'; +import 'package:zstandard_platform_interface/zstandard_platform_interface.dart'; +import 'package:zstandard/src/platform_manager.dart'; + +class MockZstandardPlatform with MockPlatformInterfaceMixin implements ZstandardPlatform { + @override + Future getPlatformVersion() => Future.value('MockPlatform 1.0'); + + @override + Future compress(Uint8List data, int compressionLevel) async { + return Uint8List.fromList([0x7f, 0x7f, 0x7f]); // fake compressed + } + + @override + Future decompress(Uint8List data) async { + return Uint8List.fromList([1, 2, 3, 4, 5]); // fake decompressed + } +} + +void main() { + TestWidgetsFlutterBinding.ensureInitialized(); + + group('Zstandard singleton', () { + test('Zstandard() returns the same instance', () { + final a = Zstandard(); + final b = Zstandard(); + expect(identical(a, b), isTrue); + }); + }); + + group('Zstandard with mock platform', () { + ZstandardPlatform? savedInstance; + + setUp(() { + // Trigger registration so ZstandardImpl caches; then override with mock. + Zstandard().instance; + savedInstance = ZstandardPlatform.instance; + ZstandardPlatform.instance = MockZstandardPlatform(); + }); + + tearDown(() { + ZstandardPlatform.instance = savedInstance!; + }); + + test('getPlatformVersion returns mock value', () async { + final z = Zstandard(); + final version = await z.getPlatformVersion(); + expect(version, 'MockPlatform 1.0'); + }); + + test('compress forwards to platform and returns result', () async { + final z = Zstandard(); + final data = Uint8List.fromList([1, 2, 3, 4, 5]); + final compressed = await z.compress(data, 3); + expect(compressed, isNotNull); + expect(compressed, Uint8List.fromList([0x7f, 0x7f, 0x7f])); + }); + + test('decompress forwards to platform and returns result', () async { + final z = Zstandard(); + final data = Uint8List.fromList([0x7f, 0x7f, 0x7f]); + final decompressed = await z.decompress(data); + expect(decompressed, isNotNull); + expect(decompressed, Uint8List.fromList([1, 2, 3, 4, 5])); + }); + + test('instance returns registered platform', () { + final z = Zstandard(); + expect(z.instance, isA()); + }); + }); + + group('ZstandardExt extension', () { + ZstandardPlatform? savedInstance; + + setUp(() { + Zstandard().instance; // trigger registration + savedInstance = ZstandardPlatform.instance; + ZstandardPlatform.instance = MockZstandardPlatform(); + }); + + tearDown(() { + ZstandardPlatform.instance = savedInstance!; + }); + + test('compress on null returns null', () async { + const Uint8List? data = null; + final compressed = await data.compress(); + expect(compressed, isNull); + }); + + test('decompress on null returns null', () async { + const Uint8List? data = null; + final decompressed = await data.decompress(); + expect(decompressed, isNull); + }); + + test('compress on non-null forwards to platform', () async { + final data = Uint8List.fromList([1, 2, 3]); + final compressed = await data.compress(compressionLevel: 5); + expect(compressed, isNotNull); + expect(compressed, Uint8List.fromList([0x7f, 0x7f, 0x7f])); + }); + + test('decompress on non-null forwards to platform', () async { + final data = Uint8List.fromList([0x7f, 0x7f, 0x7f]); + final decompressed = await data.decompress(); + expect(decompressed, isNotNull); + expect(decompressed, Uint8List.fromList([1, 2, 3, 4, 5])); + }); + + test('compress uses default compression level 3', () async { + final data = Uint8List.fromList([10, 20, 30]); + final compressed = await data.compress(); + expect(compressed, isNotNull); + }); + }); + + group('PlatformManager', () { + test('singleton returns same instance', () { + final a = PlatformManager(); + final b = PlatformManager(); + expect(identical(a, b), isTrue); + }); + + test('isDesktop is true when any desktop platform', () { + final pm = PlatformManager(); + expect(pm.isDesktop, pm.isWindows || pm.isLinux || pm.isMacOS); + }); + + test('isWeb is a boolean', () { + final pm = PlatformManager(); + expect(pm.isWeb, isA()); + }); + }); +} diff --git a/zstandard_android/README.md b/zstandard_android/README.md index dc25209..a558a9a 100644 --- a/zstandard_android/README.md +++ b/zstandard_android/README.md @@ -2,20 +2,68 @@ # zstandard_android -The Android implementation of [`zstandard`](https://pub.dev/packages/zstandard). +The Android implementation of the [zstandard](https://pub.dev/packages/zstandard) Flutter plugin. Uses FFI and the native Zstandard C library. + +## Installation + +Add the main plugin to your app; this package is included automatically via the federated plugin: + +```yaml +dependencies: + zstandard: ^1.3.29 +``` + +No extra Gradle or native setup is required for normal use. ## Usage +Use the main [zstandard](https://pub.dev/packages/zstandard) API; the Android implementation is selected automatically on Android: + ```dart -void act() async { - final zstandard = ZstandardAndroid(); +import 'package:zstandard/zstandard.dart'; - Uint8List original = Uint8List.fromList([...]); +void main() async { + final zstandard = Zstandard(); + final data = Uint8List.fromList([1, 2, 3, 4, 5]); - Uint8List? compressed = await zstandard.compress(original); - - Uint8List? decompressed = await zstandard.decompress(compressed ?? Uint8List(0)); + final compressed = await zstandard.compress(data, 3); + final decompressed = await zstandard.decompress(compressed ?? Uint8List(0)); } ``` +Or use the extension methods: + +```dart +final compressed = await data.compress(compressionLevel: 3); +final decompressed = await compressed?.decompress(); +``` + +## API + +- **ZstandardAndroid()** — Creates the Android platform implementation. +- **compress(Uint8List data, int compressionLevel)** — Compresses `data` (level 1–22). Returns compressed bytes or `null`. +- **decompress(Uint8List data)** — Decompresses zstd-compressed data. Returns decompressed bytes or `null`. +- **getPlatformVersion()** — Returns a platform identifier string. + +## Architecture + +This package uses Dart FFI to load `libzstandard_android.so` and call the Zstandard C API (`ZSTD_compress`, `ZSTD_decompress`, `ZSTD_compressBound`, `ZSTD_getFrameContentSize`). Heavy work may run in a background isolate to keep the UI responsive. + +## Testing + +From the package directory: + +```bash +flutter test +``` + +Unit tests run only on Android (they are skipped on other platforms). For full integration tests, run the main [zstandard](https://pub.dev/packages/zstandard) example app on an Android device or emulator. + +## Troubleshooting + +- **Library not found**: Ensure you build and run the app for Android (e.g. `flutter run` or `flutter build apk`) so the native library is compiled and packaged. +- **Crashes**: Verify inputs are non-null and valid; for decompress, ensure the data is a valid zstd frame. + +See the [documentation](https://github.com/landamessenger/zstandard/tree/master/docs) for more. +

diff --git a/zstandard_android/android/build.gradle b/zstandard_android/android/build.gradle index 24b8619..4212247 100644 --- a/zstandard_android/android/build.gradle +++ b/zstandard_android/android/build.gradle @@ -12,7 +12,7 @@ buildscript { dependencies { // The Android Gradle Plugin knows how to build native code with the NDK. - classpath("com.android.tools.build:gradle:8.1.0") + classpath("com.android.tools.build:gradle:8.2.2") classpath("org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version") } } @@ -45,7 +45,7 @@ android { // Invoke the shared CMake build with the Android Gradle Plugin. externalNativeBuild { cmake { - path = "../src/CMakeLists.txt" + path = "../zstd_build/CMakeLists.txt" // The default CMake version for the Android Gradle Plugin is 3.10.2. // https://developer.android.com/studio/projects/install-ndk#vanilla_cmake @@ -72,6 +72,7 @@ android { defaultConfig { minSdk = 21 + testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner" // Support 16KB page size for Android 15+ externalNativeBuild { @@ -82,3 +83,9 @@ android { } } } + +dependencies { + androidTestImplementation "androidx.test:runner:1.5.2" + androidTestImplementation "androidx.test.ext:junit:1.1.5" + androidTestImplementation "androidx.test:rules:1.5.0" +} diff --git a/zstandard_android/android/src/androidTest/kotlin/com/landamessenger/zstandard_android/ZstandardAndroidComprehensiveTest.kt b/zstandard_android/android/src/androidTest/kotlin/com/landamessenger/zstandard_android/ZstandardAndroidComprehensiveTest.kt new file mode 100644 index 0000000..4bfea31 --- /dev/null +++ b/zstandard_android/android/src/androidTest/kotlin/com/landamessenger/zstandard_android/ZstandardAndroidComprehensiveTest.kt @@ -0,0 +1,45 @@ +package com.landamessenger.zstandard_android + +import androidx.test.platform.app.InstrumentationRegistry +import androidx.test.ext.junit.runners.AndroidJUnit4 +import io.flutter.plugin.common.MethodChannel +import org.junit.Assert.assertNotNull +import org.junit.Assert.assertTrue +import org.junit.Test +import org.junit.runner.RunWith + +/** + * Additional instrumented tests for the zstandard_android plugin. + * Verifies plugin type, instantiation, and that the native library can be loaded + * in an Android context (actual compression is tested from Dart/FFI). + */ +@RunWith(AndroidJUnit4::class) +class ZstandardAndroidComprehensiveTest { + + @Test + fun pluginImplementsFlutterPlugin() { + val plugin = ZstandardAndroidPlugin() + assertTrue(plugin is io.flutter.embedding.engine.plugins.FlutterPlugin) + } + + @Test + fun pluginImplementsMethodCallHandler() { + val plugin = ZstandardAndroidPlugin() + assertTrue(plugin is MethodChannel.MethodCallHandler) + } + + @Test + fun instrumentationContextIsValid() { + val context = InstrumentationRegistry.getInstrumentation().targetContext + assertNotNull(context) + assertNotNull(context.packageName) + } + + @Test + fun multiplePluginInstancesCanBeCreated() { + val plugin1 = ZstandardAndroidPlugin() + val plugin2 = ZstandardAndroidPlugin() + assertNotNull(plugin1) + assertNotNull(plugin2) + } +} diff --git a/zstandard_android/android/src/androidTest/kotlin/com/landamessenger/zstandard_android/ZstandardAndroidInstrumentedTest.kt b/zstandard_android/android/src/androidTest/kotlin/com/landamessenger/zstandard_android/ZstandardAndroidInstrumentedTest.kt new file mode 100644 index 0000000..7b40c21 --- /dev/null +++ b/zstandard_android/android/src/androidTest/kotlin/com/landamessenger/zstandard_android/ZstandardAndroidInstrumentedTest.kt @@ -0,0 +1,28 @@ +package com.landamessenger.zstandard_android + +import androidx.test.platform.app.InstrumentationRegistry +import androidx.test.ext.junit.runners.AndroidJUnit4 +import org.junit.Assert.assertNotNull +import org.junit.Test +import org.junit.runner.RunWith + +/** + * Instrumented tests for the zstandard_android plugin. + * Compression and decompression are exercised by Dart code via FFI; these tests + * verify the native library and plugin are loadable in an Android context. + */ +@RunWith(AndroidJUnit4::class) +class ZstandardAndroidInstrumentedTest { + + @Test + fun contextIsNotNull() { + val context = InstrumentationRegistry.getInstrumentation().targetContext + assertNotNull(context) + } + + @Test + fun pluginClassCanBeInstantiated() { + val plugin = ZstandardAndroidPlugin() + assertNotNull(plugin) + } +} diff --git a/zstandard_android/example/android/settings.gradle b/zstandard_android/example/android/settings.gradle index b9e43bd..e07c1cb 100644 --- a/zstandard_android/example/android/settings.gradle +++ b/zstandard_android/example/android/settings.gradle @@ -18,7 +18,7 @@ pluginManagement { plugins { id "dev.flutter.flutter-plugin-loader" version "1.0.0" - id "com.android.application" version "8.1.0" apply false + id "com.android.application" version "8.2.2" apply false id "org.jetbrains.kotlin.android" version "1.8.22" apply false } diff --git a/zstandard_android/example/integration_test/android_compression_integration_test.dart b/zstandard_android/example/integration_test/android_compression_integration_test.dart new file mode 100644 index 0000000..7d25c52 --- /dev/null +++ b/zstandard_android/example/integration_test/android_compression_integration_test.dart @@ -0,0 +1,78 @@ +import 'dart:typed_data'; + +import 'package:flutter_test/flutter_test.dart'; +import 'package:integration_test/integration_test.dart'; +import 'package:leak_tracker/leak_tracker.dart'; +import 'package:leak_tracker_testing/leak_tracker_testing.dart'; +import 'package:zstandard_android/zstandard_android.dart'; + +/// Android integration tests: run on device/emulator. No platform skips. +void main() { + IntegrationTestWidgetsFlutterBinding.ensureInitialized(); + + group('ZstandardAndroid', () { + late ZstandardAndroid zstandard; + + setUp(() { + zstandard = ZstandardAndroid(); + }); + + test('compress and decompress small data', () async { + final data = Uint8List.fromList([1, 2, 3, 4, 5]); + final compressed = await zstandard.compress(data, 3); + expect(compressed, isNotNull); + final decompressed = await zstandard.decompress(compressed!); + expect(decompressed, equals(data)); + }); + + test('compress and decompress large data', () async { + final data = Uint8List.fromList(List.generate(100000, (i) => i % 256)); + final compressed = await zstandard.compress(data, 3); + expect(compressed, isNotNull); + final decompressed = await zstandard.decompress(compressed!); + expect(decompressed, equals(data)); + }); + + test('compress and decompress empty data', () async { + final data = Uint8List(0); + final compressed = await zstandard.compress(data, 3); + expect(compressed, isNotNull); + final decompressed = await zstandard.decompress(compressed!); + expect(decompressed, equals(data)); + }); + + test('compress with levels 1, 3, 10, 22', () async { + final data = Uint8List.fromList(List.filled(1000, 42)); + for (final level in [1, 3, 10, 22]) { + final compressed = await zstandard.compress(data, level); + expect(compressed, isNotNull); + final decompressed = await zstandard.decompress(compressed!); + expect(decompressed, equals(data)); + } + }); + + test('decompress corrupted data returns null', () async { + final corrupted = Uint8List.fromList([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + final result = await zstandard.decompress(corrupted); + expect(result, isNull); + }); + + test('decompress random bytes returns null', () async { + final random = Uint8List.fromList(List.generate(64, (i) => (i * 31) % 256)); + final result = await zstandard.decompress(random); + expect(result, isNull); + }); + + test('compress and decompress do not leak', () async { + final data = Uint8List.fromList([1, 2, 3, 4, 5]); + final compressed = await zstandard.compress(data, 3); + expect(compressed, isNotNull); + final decompressed = await zstandard.decompress(compressed!); + expect(decompressed, equals(data)); + if (LeakTracking.isStarted) { + final leaks = await LeakTracking.collectLeaks(); + expect(leaks, isLeakFree); + } + }); + }); +} diff --git a/zstandard_android/example/integration_test/android_properties_integration_test.dart b/zstandard_android/example/integration_test/android_properties_integration_test.dart new file mode 100644 index 0000000..dc7abf1 --- /dev/null +++ b/zstandard_android/example/integration_test/android_properties_integration_test.dart @@ -0,0 +1,32 @@ +import 'dart:typed_data'; + +import 'package:flutter_test/flutter_test.dart'; +import 'package:integration_test/integration_test.dart'; +import 'package:kiri_check/kiri_check.dart'; +import 'package:zstandard_android/zstandard_android.dart'; + +/// Property-based integration tests for Android. Run on device/emulator. No skips. +void main() { + IntegrationTestWidgetsFlutterBinding.ensureInitialized(); + + group('Property-based tests', () { + property( + 'roundtrip: decompress(compress(x)) == x', + () { + forAll( + binary(minLength: 0, maxLength: 1000), + (List data) async { + final input = Uint8List.fromList(data); + final z = ZstandardAndroid(); + final compressed = await z.compress(input, 3); + if (compressed == null) return; + final decompressed = await z.decompress(compressed); + expect(decompressed, isNotNull); + expect(List.from(decompressed!), data); + }, + maxExamples: 100, + ); + }, + ); + }); +} diff --git a/zstandard_android/example/pubspec.yaml b/zstandard_android/example/pubspec.yaml index 3bed8a6..32ed44d 100644 --- a/zstandard_android/example/pubspec.yaml +++ b/zstandard_android/example/pubspec.yaml @@ -17,6 +17,11 @@ dev_dependencies: flutter_test: sdk: flutter flutter_lints: ^4.0.0 + integration_test: + sdk: flutter + kiri_check: ^1.3.1 + leak_tracker: ^11.0.0 + leak_tracker_testing: ^3.0.2 flutter: uses-material-design: true diff --git a/zstandard_android/ffigen.yaml b/zstandard_android/ffigen.yaml index de758ae..64dcd88 100644 --- a/zstandard_android/ffigen.yaml +++ b/zstandard_android/ffigen.yaml @@ -1,15 +1,15 @@ # Run with `dart run ffigen --config ffigen.yaml`. name: ZstandardAndroidBindings description: | - Bindings for `src/zstandard_android.h`. + Bindings for zstd.h from repo root zstd/. Regenerate bindings with `dart run ffigen --config ffigen.yaml`. output: 'lib/zstandard_android_bindings_generated.dart' headers: entry-points: - - 'src/zstd.h' + - '../zstd/zstd.h' include-directives: - - 'src/zstd.h' + - '../zstd/zstd.h' preamble: | // ignore_for_file: always_specify_types // ignore_for_file: camel_case_types diff --git a/zstandard_android/lib/zstandard_android.dart b/zstandard_android/lib/zstandard_android.dart index 993f87e..a2f1cc2 100644 --- a/zstandard_android/lib/zstandard_android.dart +++ b/zstandard_android/lib/zstandard_android.dart @@ -22,13 +22,20 @@ final DynamicLibrary _dylib = () { final ZstandardAndroidBindings _bindings = ZstandardAndroidBindings(_dylib); +/// Android implementation of [ZstandardPlatform] using FFI and the native zstd library. +/// +/// Loads libzstandard_android.so and uses ZSTD_compress, ZSTD_decompress, +/// ZSTD_compressBound, and ZSTD_getFrameContentSize. The main [zstandard] +/// plugin registers this implementation automatically on Android. class ZstandardAndroid extends ZstandardPlatform { - /// A constructor that allows tests to override the window object used by the plugin. + /// Creates the Android platform implementation. ZstandardAndroid(); final methodChannel = const MethodChannel('plugins.flutter.io/zstandard'); /// Registers this class as the default instance of [ZstandardPlatform]. + /// + /// Called by the main plugin when running on Android. static void registerWith() { ZstandardPlatform.instance = ZstandardAndroid(); } @@ -71,15 +78,21 @@ class ZstandardAndroid extends ZstandardPlatform { @override Future decompress(Uint8List data) async { + const int contentSizeUnknown = -1; + const int contentSizeError = -2; + final int compressedSize = data.lengthInBytes; final Pointer src = malloc.allocate(compressedSize); src.asTypedList(compressedSize).setAll(0, data); final int decompressedSizeExpected = - _bindings.ZSTD_getDecompressedSize(src.cast(), compressedSize); - final int dstCapacity = decompressedSizeExpected > 0 - ? decompressedSizeExpected - : compressedSize * 20; + _bindings.ZSTD_getFrameContentSize(src.cast(), compressedSize); + final int dstCapacity = + (decompressedSizeExpected != contentSizeUnknown && + decompressedSizeExpected != contentSizeError && + decompressedSizeExpected > 0) + ? decompressedSizeExpected + : compressedSize * 20; final Pointer dst = malloc.allocate(dstCapacity); try { @@ -90,11 +103,10 @@ class ZstandardAndroid extends ZstandardPlatform { compressedSize, ); - if (decompressedSize > 0) { - return Uint8List.fromList(dst.asTypedList(decompressedSize)); - } else { + if (decompressedSize < 0) { return null; } + return Uint8List.fromList(dst.asTypedList(decompressedSize)); } finally { malloc.free(src); malloc.free(dst); diff --git a/zstandard_android/lib/zstandard_android_bindings_generated.dart b/zstandard_android/lib/zstandard_android_bindings_generated.dart index d053d0d..fa0744d 100644 --- a/zstandard_android/lib/zstandard_android_bindings_generated.dart +++ b/zstandard_android/lib/zstandard_android_bindings_generated.dart @@ -8,14 +8,14 @@ // ignore_for_file: type=lint import 'dart:ffi' as ffi; -/// Bindings for `src/zstandard_android.h`. +/// Bindings for zstd.h from repo root zstd/. /// /// Regenerate bindings with `dart run ffigen --config ffigen.yaml`. /// class ZstandardAndroidBindings { /// Holds the symbol lookup function. final ffi.Pointer Function(String symbolName) - _lookup; + _lookup; /// The symbols are looked up in [dynamicLibrary]. ZstandardAndroidBindings(ffi.DynamicLibrary dynamicLibrary) @@ -24,7 +24,7 @@ class ZstandardAndroidBindings { /// The symbols are looked up with [lookup]. ZstandardAndroidBindings.fromLookup( ffi.Pointer Function(String symbolName) - lookup) + lookup) : _lookup = lookup; /// ! ZSTD_versionNumber() : @@ -34,10 +34,10 @@ class ZstandardAndroidBindings { } late final _ZSTD_versionNumberPtr = - _lookup>( - 'ZSTD_versionNumber'); + _lookup>( + 'ZSTD_versionNumber'); late final _ZSTD_versionNumber = - _ZSTD_versionNumberPtr.asFunction(); + _ZSTD_versionNumberPtr.asFunction(); /// ! ZSTD_versionString() : /// Return runtime library version, like "1.4.5". Requires v1.3.0+. @@ -46,12 +46,12 @@ class ZstandardAndroidBindings { } late final _ZSTD_versionStringPtr = - _lookup Function()>>( - 'ZSTD_versionString'); + _lookup Function()>>( + 'ZSTD_versionString'); late final _ZSTD_versionString = - _ZSTD_versionStringPtr.asFunction Function()>(); + _ZSTD_versionStringPtr.asFunction Function()>(); - /// Simple API + /// Simple Core API /// / /// /*! ZSTD_compress() : /// Compresses `src` content as a single zstd compressed frame into already allocated `dst`. @@ -60,12 +60,12 @@ class ZstandardAndroidBindings { /// @return : compressed size written into `dst` (<= `dstCapacity), /// or an error code if it fails (which can be tested using ZSTD_isError()). int ZSTD_compress( - ffi.Pointer dst, - int dstCapacity, - ffi.Pointer src, - int srcSize, - int compressionLevel, - ) { + ffi.Pointer dst, + int dstCapacity, + ffi.Pointer src, + int srcSize, + int compressionLevel, + ) { return _ZSTD_compress( dst, dstCapacity, @@ -93,11 +93,11 @@ class ZstandardAndroidBindings { /// @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), /// or an errorCode if it fails (which can be tested using ZSTD_isError()). int ZSTD_decompress( - ffi.Pointer dst, - int dstCapacity, - ffi.Pointer src, - int compressedSize, - ) { + ffi.Pointer dst, + int dstCapacity, + ffi.Pointer src, + int compressedSize, + ) { return _ZSTD_decompress( dst, dstCapacity, @@ -114,9 +114,9 @@ class ZstandardAndroidBindings { int Function(ffi.Pointer, int, ffi.Pointer, int)>(); int ZSTD_getFrameContentSize( - ffi.Pointer src, - int srcSize, - ) { + ffi.Pointer src, + int srcSize, + ) { return _ZSTD_getFrameContentSize( src, srcSize, @@ -130,16 +130,16 @@ class ZstandardAndroidBindings { late final _ZSTD_getFrameContentSize = _ZSTD_getFrameContentSizePtr .asFunction, int)>(); - /// ! ZSTD_getDecompressedSize() : - /// NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). + /// ! ZSTD_getDecompressedSize() (obsolete): + /// This function is now obsolete, in favor of ZSTD_getFrameContentSize(). /// Both functions work the same way, but ZSTD_getDecompressedSize() blends /// "empty", "unknown" and "error" results to the same return value (0), /// while ZSTD_getFrameContentSize() gives them separate return values. /// @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. int ZSTD_getDecompressedSize( - ffi.Pointer src, - int srcSize, - ) { + ffi.Pointer src, + int srcSize, + ) { return _ZSTD_getDecompressedSize( src, srcSize, @@ -159,10 +159,15 @@ class ZstandardAndroidBindings { /// @return : the compressed size of the first frame starting at `src`, /// suitable to pass as `srcSize` to `ZSTD_decompress` or similar, /// or an error code if input is invalid + /// Note 1: this method is called _find*() because it's not enough to read the header, + /// it may have to scan through the frame's content, to reach its end. + /// Note 2: this method also works with Skippable Frames. In which case, + /// it returns the size of the complete skippable frame, + /// which is always equal to its content size + 8 bytes for headers. int ZSTD_findFrameCompressedSize( - ffi.Pointer src, - int srcSize, - ) { + ffi.Pointer src, + int srcSize, + ) { return _ZSTD_findFrameCompressedSize( src, srcSize, @@ -170,63 +175,78 @@ class ZstandardAndroidBindings { } late final _ZSTD_findFrameCompressedSizePtr = _lookup< - ffi + ffi .NativeFunction, ffi.Size)>>( 'ZSTD_findFrameCompressedSize'); late final _ZSTD_findFrameCompressedSize = _ZSTD_findFrameCompressedSizePtr .asFunction, int)>(); int ZSTD_compressBound( - int srcSize, - ) { + int srcSize, + ) { return _ZSTD_compressBound( srcSize, ); } late final _ZSTD_compressBoundPtr = - _lookup>( - 'ZSTD_compressBound'); + _lookup>( + 'ZSTD_compressBound'); late final _ZSTD_compressBound = - _ZSTD_compressBoundPtr.asFunction(); + _ZSTD_compressBoundPtr.asFunction(); - /// ZSTD_isError() : + /// ====== Error helper functions ======*/ + /// /* ZSTD_isError() : /// Most ZSTD_* functions returning a size_t value can be tested for error, /// using ZSTD_isError(). /// @return 1 if error, 0 otherwise int ZSTD_isError( - int code, - ) { + int result, + ) { return _ZSTD_isError( - code, + result, ); } late final _ZSTD_isErrorPtr = - _lookup>( - 'ZSTD_isError'); + _lookup>( + 'ZSTD_isError'); late final _ZSTD_isError = _ZSTD_isErrorPtr.asFunction(); + int ZSTD_getErrorCode( + int functionResult, + ) { + return _ZSTD_getErrorCode( + functionResult, + ); + } + + late final _ZSTD_getErrorCodePtr = + _lookup>( + 'ZSTD_getErrorCode'); + late final _ZSTD_getErrorCode = + _ZSTD_getErrorCodePtr.asFunction(); + ffi.Pointer ZSTD_getErrorName( - int code, - ) { + int result, + ) { return _ZSTD_getErrorName( - code, + result, ); } late final _ZSTD_getErrorNamePtr = - _lookup Function(ffi.Size)>>( - 'ZSTD_getErrorName'); + _lookup Function(ffi.Size)>>( + 'ZSTD_getErrorName'); late final _ZSTD_getErrorName = - _ZSTD_getErrorNamePtr.asFunction Function(int)>(); + _ZSTD_getErrorNamePtr.asFunction Function(int)>(); int ZSTD_minCLevel() { return _ZSTD_minCLevel(); } late final _ZSTD_minCLevelPtr = - _lookup>('ZSTD_minCLevel'); + _lookup>('ZSTD_minCLevel'); late final _ZSTD_minCLevel = _ZSTD_minCLevelPtr.asFunction(); int ZSTD_maxCLevel() { @@ -234,7 +254,7 @@ class ZstandardAndroidBindings { } late final _ZSTD_maxCLevelPtr = - _lookup>('ZSTD_maxCLevel'); + _lookup>('ZSTD_maxCLevel'); late final _ZSTD_maxCLevel = _ZSTD_maxCLevelPtr.asFunction(); int ZSTD_defaultCLevel() { @@ -242,33 +262,33 @@ class ZstandardAndroidBindings { } late final _ZSTD_defaultCLevelPtr = - _lookup>('ZSTD_defaultCLevel'); + _lookup>('ZSTD_defaultCLevel'); late final _ZSTD_defaultCLevel = - _ZSTD_defaultCLevelPtr.asFunction(); + _ZSTD_defaultCLevelPtr.asFunction(); ffi.Pointer ZSTD_createCCtx() { return _ZSTD_createCCtx(); } late final _ZSTD_createCCtxPtr = - _lookup Function()>>( - 'ZSTD_createCCtx'); + _lookup Function()>>( + 'ZSTD_createCCtx'); late final _ZSTD_createCCtx = - _ZSTD_createCCtxPtr.asFunction Function()>(); + _ZSTD_createCCtxPtr.asFunction Function()>(); int ZSTD_freeCCtx( - ffi.Pointer cctx, - ) { + ffi.Pointer cctx, + ) { return _ZSTD_freeCCtx( cctx, ); } late final _ZSTD_freeCCtxPtr = - _lookup)>>( - 'ZSTD_freeCCtx'); + _lookup)>>( + 'ZSTD_freeCCtx'); late final _ZSTD_freeCCtx = - _ZSTD_freeCCtxPtr.asFunction)>(); + _ZSTD_freeCCtxPtr.asFunction)>(); /// ! ZSTD_compressCCtx() : /// Same as ZSTD_compress(), using an explicit ZSTD_CCtx. @@ -276,15 +296,15 @@ class ZstandardAndroidBindings { /// this function compresses at the requested compression level, /// __ignoring any other advanced parameter__ . /// If any advanced parameter was set using the advanced API, - /// they will all be reset. Only `compressionLevel` remains. + /// they will all be reset. Only @compressionLevel remains. int ZSTD_compressCCtx( - ffi.Pointer cctx, - ffi.Pointer dst, - int dstCapacity, - ffi.Pointer src, - int srcSize, - int compressionLevel, - ) { + ffi.Pointer cctx, + ffi.Pointer dst, + int dstCapacity, + ffi.Pointer src, + int srcSize, + int compressionLevel, + ) { return _ZSTD_compressCCtx( cctx, dst, @@ -313,36 +333,36 @@ class ZstandardAndroidBindings { } late final _ZSTD_createDCtxPtr = - _lookup Function()>>( - 'ZSTD_createDCtx'); + _lookup Function()>>( + 'ZSTD_createDCtx'); late final _ZSTD_createDCtx = - _ZSTD_createDCtxPtr.asFunction Function()>(); + _ZSTD_createDCtxPtr.asFunction Function()>(); int ZSTD_freeDCtx( - ffi.Pointer dctx, - ) { + ffi.Pointer dctx, + ) { return _ZSTD_freeDCtx( dctx, ); } late final _ZSTD_freeDCtxPtr = - _lookup)>>( - 'ZSTD_freeDCtx'); + _lookup)>>( + 'ZSTD_freeDCtx'); late final _ZSTD_freeDCtx = - _ZSTD_freeDCtxPtr.asFunction)>(); + _ZSTD_freeDCtxPtr.asFunction)>(); /// ! ZSTD_decompressDCtx() : /// Same as ZSTD_decompress(), /// requires an allocated ZSTD_DCtx. /// Compatible with sticky parameters (see below). int ZSTD_decompressDCtx( - ffi.Pointer dctx, - ffi.Pointer dst, - int dstCapacity, - ffi.Pointer src, - int srcSize, - ) { + ffi.Pointer dctx, + ffi.Pointer dst, + int dstCapacity, + ffi.Pointer src, + int srcSize, + ) { return _ZSTD_decompressDCtx( dctx, dst, @@ -371,18 +391,18 @@ class ZstandardAndroidBindings { /// - an error status field, which must be tested using ZSTD_isError() /// - lower and upper bounds, both inclusive ZSTD_bounds ZSTD_cParam_getBounds( - int cParam, - ) { + int cParam, + ) { return _ZSTD_cParam_getBounds( cParam, ); } late final _ZSTD_cParam_getBoundsPtr = - _lookup>( - 'ZSTD_cParam_getBounds'); + _lookup>( + 'ZSTD_cParam_getBounds'); late final _ZSTD_cParam_getBounds = - _ZSTD_cParam_getBoundsPtr.asFunction(); + _ZSTD_cParam_getBoundsPtr.asFunction(); /// ! ZSTD_CCtx_setParameter() : /// Set one compression parameter, selected by enum ZSTD_cParameter. @@ -395,10 +415,10 @@ class ZstandardAndroidBindings { /// new parameters will be active for next job only (after a flush()). /// @return : an error code (which can be tested using ZSTD_isError()). int ZSTD_CCtx_setParameter( - ffi.Pointer cctx, - int param, - int value, - ) { + ffi.Pointer cctx, + int param, + int value, + ) { return _ZSTD_CCtx_setParameter( cctx, param, @@ -428,9 +448,9 @@ class ZstandardAndroidBindings { /// or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end), /// this value is automatically overridden by srcSize instead. int ZSTD_CCtx_setPledgedSrcSize( - ffi.Pointer cctx, - int pledgedSrcSize, - ) { + ffi.Pointer cctx, + int pledgedSrcSize, + ) { return _ZSTD_CCtx_setPledgedSrcSize( cctx, pledgedSrcSize, @@ -458,9 +478,9 @@ class ZstandardAndroidBindings { /// otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) /// - Both : similar to resetting the session, followed by resetting parameters. int ZSTD_CCtx_reset( - ffi.Pointer cctx, - int reset, - ) { + ffi.Pointer cctx, + int reset, + ) { return _ZSTD_CCtx_reset( cctx, reset, @@ -486,12 +506,12 @@ class ZstandardAndroidBindings { /// @return : compressed size written into `dst` (<= `dstCapacity), /// or an error code if it fails (which can be tested using ZSTD_isError()). int ZSTD_compress2( - ffi.Pointer cctx, - ffi.Pointer dst, - int dstCapacity, - ffi.Pointer src, - int srcSize, - ) { + ffi.Pointer cctx, + ffi.Pointer dst, + int dstCapacity, + ffi.Pointer src, + int srcSize, + ) { return _ZSTD_compress2( cctx, dst, @@ -516,18 +536,18 @@ class ZstandardAndroidBindings { /// - an error status field, which must be tested using ZSTD_isError() /// - both lower and upper bounds, inclusive ZSTD_bounds ZSTD_dParam_getBounds( - int dParam, - ) { + int dParam, + ) { return _ZSTD_dParam_getBounds( dParam, ); } late final _ZSTD_dParam_getBoundsPtr = - _lookup>( - 'ZSTD_dParam_getBounds'); + _lookup>( + 'ZSTD_dParam_getBounds'); late final _ZSTD_dParam_getBounds = - _ZSTD_dParam_getBoundsPtr.asFunction(); + _ZSTD_dParam_getBoundsPtr.asFunction(); /// ! ZSTD_DCtx_setParameter() : /// Set one compression parameter, selected by enum ZSTD_dParameter. @@ -536,10 +556,10 @@ class ZstandardAndroidBindings { /// Setting a parameter is only possible during frame initialization (before starting decompression). /// @return : 0, or an error code (which can be tested using ZSTD_isError()). int ZSTD_DCtx_setParameter( - ffi.Pointer dctx, - int param, - int value, - ) { + ffi.Pointer dctx, + int param, + int value, + ) { return _ZSTD_DCtx_setParameter( dctx, param, @@ -560,9 +580,9 @@ class ZstandardAndroidBindings { /// Parameters can only be reset when no active frame is being decompressed. /// @return : 0, or an error code, which can be tested with ZSTD_isError() int ZSTD_DCtx_reset( - ffi.Pointer dctx, - int reset, - ) { + ffi.Pointer dctx, + int reset, + ) { return _ZSTD_DCtx_reset( dctx, reset, @@ -582,22 +602,22 @@ class ZstandardAndroidBindings { } late final _ZSTD_createCStreamPtr = - _lookup Function()>>( - 'ZSTD_createCStream'); + _lookup Function()>>( + 'ZSTD_createCStream'); late final _ZSTD_createCStream = - _ZSTD_createCStreamPtr.asFunction Function()>(); + _ZSTD_createCStreamPtr.asFunction Function()>(); int ZSTD_freeCStream( - ffi.Pointer zcs, - ) { + ffi.Pointer zcs, + ) { return _ZSTD_freeCStream( zcs, ); } late final _ZSTD_freeCStreamPtr = - _lookup)>>( - 'ZSTD_freeCStream'); + _lookup)>>( + 'ZSTD_freeCStream'); late final _ZSTD_freeCStream = _ZSTD_freeCStreamPtr.asFunction< int Function(ffi.Pointer)>(); @@ -628,11 +648,11 @@ class ZstandardAndroidBindings { /// which can be done explicitly (ZSTD_CCtx_reset()), /// or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx()) int ZSTD_compressStream2( - ffi.Pointer cctx, - ffi.Pointer output, - ffi.Pointer input, - int endOp, - ) { + ffi.Pointer cctx, + ffi.Pointer output, + ffi.Pointer input, + int endOp, + ) { return _ZSTD_compressStream2( cctx, output, @@ -666,18 +686,18 @@ class ZstandardAndroidBindings { } late final _ZSTD_CStreamInSizePtr = - _lookup>('ZSTD_CStreamInSize'); + _lookup>('ZSTD_CStreamInSize'); late final _ZSTD_CStreamInSize = - _ZSTD_CStreamInSizePtr.asFunction(); + _ZSTD_CStreamInSizePtr.asFunction(); int ZSTD_CStreamOutSize() { return _ZSTD_CStreamOutSize(); } late final _ZSTD_CStreamOutSizePtr = - _lookup>('ZSTD_CStreamOutSize'); + _lookup>('ZSTD_CStreamOutSize'); late final _ZSTD_CStreamOutSize = - _ZSTD_CStreamOutSizePtr.asFunction(); + _ZSTD_CStreamOutSizePtr.asFunction(); /// ! /// Equivalent to: @@ -689,9 +709,9 @@ class ZstandardAndroidBindings { /// Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API /// to compress with a dictionary. int ZSTD_initCStream( - ffi.Pointer zcs, - int compressionLevel, - ) { + ffi.Pointer zcs, + int compressionLevel, + ) { return _ZSTD_initCStream( zcs, compressionLevel, @@ -711,10 +731,10 @@ class ZstandardAndroidBindings { /// the next read size (if non-zero and not an error). ZSTD_compressStream2() /// returns the minimum nb of bytes left to flush (if non-zero and not an error). int ZSTD_compressStream( - ffi.Pointer zcs, - ffi.Pointer output, - ffi.Pointer input, - ) { + ffi.Pointer zcs, + ffi.Pointer output, + ffi.Pointer input, + ) { return _ZSTD_compressStream( zcs, output, @@ -734,9 +754,9 @@ class ZstandardAndroidBindings { /// ! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). int ZSTD_flushStream( - ffi.Pointer zcs, - ffi.Pointer output, - ) { + ffi.Pointer zcs, + ffi.Pointer output, + ) { return _ZSTD_flushStream( zcs, output, @@ -752,9 +772,9 @@ class ZstandardAndroidBindings { /// ! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). int ZSTD_endStream( - ffi.Pointer zcs, - ffi.Pointer output, - ) { + ffi.Pointer zcs, + ffi.Pointer output, + ) { return _ZSTD_endStream( zcs, output, @@ -774,22 +794,22 @@ class ZstandardAndroidBindings { } late final _ZSTD_createDStreamPtr = - _lookup Function()>>( - 'ZSTD_createDStream'); + _lookup Function()>>( + 'ZSTD_createDStream'); late final _ZSTD_createDStream = - _ZSTD_createDStreamPtr.asFunction Function()>(); + _ZSTD_createDStreamPtr.asFunction Function()>(); int ZSTD_freeDStream( - ffi.Pointer zds, - ) { + ffi.Pointer zds, + ) { return _ZSTD_freeDStream( zds, ); } late final _ZSTD_freeDStreamPtr = - _lookup)>>( - 'ZSTD_freeDStream'); + _lookup)>>( + 'ZSTD_freeDStream'); late final _ZSTD_freeDStream = _ZSTD_freeDStreamPtr.asFunction< int Function(ffi.Pointer)>(); @@ -801,16 +821,16 @@ class ZstandardAndroidBindings { /// ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); /// ZSTD_DCtx_refDDict(zds, NULL); int ZSTD_initDStream( - ffi.Pointer zds, - ) { + ffi.Pointer zds, + ) { return _ZSTD_initDStream( zds, ); } late final _ZSTD_initDStreamPtr = - _lookup)>>( - 'ZSTD_initDStream'); + _lookup)>>( + 'ZSTD_initDStream'); late final _ZSTD_initDStream = _ZSTD_initDStreamPtr.asFunction< int Function(ffi.Pointer)>(); @@ -836,10 +856,10 @@ class ZstandardAndroidBindings { /// which can be done explicitly (`ZSTD_DCtx_reset()`), /// or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`) int ZSTD_decompressStream( - ffi.Pointer zds, - ffi.Pointer output, - ffi.Pointer input, - ) { + ffi.Pointer zds, + ffi.Pointer output, + ffi.Pointer input, + ) { return _ZSTD_decompressStream( zds, output, @@ -862,18 +882,18 @@ class ZstandardAndroidBindings { } late final _ZSTD_DStreamInSizePtr = - _lookup>('ZSTD_DStreamInSize'); + _lookup>('ZSTD_DStreamInSize'); late final _ZSTD_DStreamInSize = - _ZSTD_DStreamInSizePtr.asFunction(); + _ZSTD_DStreamInSizePtr.asFunction(); int ZSTD_DStreamOutSize() { return _ZSTD_DStreamOutSize(); } late final _ZSTD_DStreamOutSizePtr = - _lookup>('ZSTD_DStreamOutSize'); + _lookup>('ZSTD_DStreamOutSize'); late final _ZSTD_DStreamOutSize = - _ZSTD_DStreamOutSizePtr.asFunction(); + _ZSTD_DStreamOutSizePtr.asFunction(); /// Simple dictionary API /// / @@ -885,15 +905,15 @@ class ZstandardAndroidBindings { /// It's intended for a dictionary used only once. /// Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. int ZSTD_compress_usingDict( - ffi.Pointer ctx, - ffi.Pointer dst, - int dstCapacity, - ffi.Pointer src, - int srcSize, - ffi.Pointer dict, - int dictSize, - int compressionLevel, - ) { + ffi.Pointer ctx, + ffi.Pointer dst, + int dstCapacity, + ffi.Pointer src, + int srcSize, + ffi.Pointer dict, + int dictSize, + int compressionLevel, + ) { return _ZSTD_compress_usingDict( ctx, dst, @@ -928,14 +948,14 @@ class ZstandardAndroidBindings { /// It's intended for a dictionary used only once. /// Note : When `dict == NULL || dictSize < 8` no dictionary is used. int ZSTD_decompress_usingDict( - ffi.Pointer dctx, - ffi.Pointer dst, - int dstCapacity, - ffi.Pointer src, - int srcSize, - ffi.Pointer dict, - int dictSize, - ) { + ffi.Pointer dctx, + ffi.Pointer dst, + int dstCapacity, + ffi.Pointer src, + int srcSize, + ffi.Pointer dict, + int dictSize, + ) { return _ZSTD_decompress_usingDict( dctx, dst, @@ -958,9 +978,9 @@ class ZstandardAndroidBindings { ffi.Pointer, ffi.Size)>>('ZSTD_decompress_usingDict'); late final _ZSTD_decompress_usingDict = - _ZSTD_decompress_usingDictPtr.asFunction< - int Function(ffi.Pointer, ffi.Pointer, int, - ffi.Pointer, int, ffi.Pointer, int)>(); + _ZSTD_decompress_usingDictPtr.asFunction< + int Function(ffi.Pointer, ffi.Pointer, int, + ffi.Pointer, int, ffi.Pointer, int)>(); /// ! ZSTD_createCDict() : /// When compressing multiple messages or blocks using the same dictionary, @@ -975,10 +995,10 @@ class ZstandardAndroidBindings { /// This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, /// expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. ffi.Pointer ZSTD_createCDict( - ffi.Pointer dictBuffer, - int dictSize, - int compressionLevel, - ) { + ffi.Pointer dictBuffer, + int dictSize, + int compressionLevel, + ) { return _ZSTD_createCDict( dictBuffer, dictSize, @@ -997,18 +1017,18 @@ class ZstandardAndroidBindings { /// Function frees memory allocated by ZSTD_createCDict(). /// If a NULL pointer is passed, no operation is performed. int ZSTD_freeCDict( - ffi.Pointer CDict, - ) { + ffi.Pointer CDict, + ) { return _ZSTD_freeCDict( CDict, ); } late final _ZSTD_freeCDictPtr = - _lookup)>>( - 'ZSTD_freeCDict'); + _lookup)>>( + 'ZSTD_freeCDict'); late final _ZSTD_freeCDict = - _ZSTD_freeCDictPtr.asFunction)>(); + _ZSTD_freeCDictPtr.asFunction)>(); /// ! ZSTD_compress_usingCDict() : /// Compression using a digested Dictionary. @@ -1016,13 +1036,13 @@ class ZstandardAndroidBindings { /// Note : compression level is _decided at dictionary creation time_, /// and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) int ZSTD_compress_usingCDict( - ffi.Pointer cctx, - ffi.Pointer dst, - int dstCapacity, - ffi.Pointer src, - int srcSize, - ffi.Pointer cdict, - ) { + ffi.Pointer cctx, + ffi.Pointer dst, + int dstCapacity, + ffi.Pointer src, + int srcSize, + ffi.Pointer cdict, + ) { return _ZSTD_compress_usingCDict( cctx, dst, @@ -1043,17 +1063,17 @@ class ZstandardAndroidBindings { ffi.Size, ffi.Pointer)>>('ZSTD_compress_usingCDict'); late final _ZSTD_compress_usingCDict = - _ZSTD_compress_usingCDictPtr.asFunction< - int Function(ffi.Pointer, ffi.Pointer, int, - ffi.Pointer, int, ffi.Pointer)>(); + _ZSTD_compress_usingCDictPtr.asFunction< + int Function(ffi.Pointer, ffi.Pointer, int, + ffi.Pointer, int, ffi.Pointer)>(); /// ! ZSTD_createDDict() : /// Create a digested dictionary, ready to start decompression operation without startup delay. /// dictBuffer can be released after DDict creation, as its content is copied inside DDict. ffi.Pointer ZSTD_createDDict( - ffi.Pointer dictBuffer, - int dictSize, - ) { + ffi.Pointer dictBuffer, + int dictSize, + ) { return _ZSTD_createDDict( dictBuffer, dictSize, @@ -1071,30 +1091,30 @@ class ZstandardAndroidBindings { /// Function frees memory allocated with ZSTD_createDDict() /// If a NULL pointer is passed, no operation is performed. int ZSTD_freeDDict( - ffi.Pointer ddict, - ) { + ffi.Pointer ddict, + ) { return _ZSTD_freeDDict( ddict, ); } late final _ZSTD_freeDDictPtr = - _lookup)>>( - 'ZSTD_freeDDict'); + _lookup)>>( + 'ZSTD_freeDDict'); late final _ZSTD_freeDDict = - _ZSTD_freeDDictPtr.asFunction)>(); + _ZSTD_freeDDictPtr.asFunction)>(); /// ! ZSTD_decompress_usingDDict() : /// Decompression using a digested Dictionary. /// Recommended when same dictionary is used multiple times. int ZSTD_decompress_usingDDict( - ffi.Pointer dctx, - ffi.Pointer dst, - int dstCapacity, - ffi.Pointer src, - int srcSize, - ffi.Pointer ddict, - ) { + ffi.Pointer dctx, + ffi.Pointer dst, + int dstCapacity, + ffi.Pointer src, + int srcSize, + ffi.Pointer ddict, + ) { return _ZSTD_decompress_usingDDict( dctx, dst, @@ -1115,18 +1135,18 @@ class ZstandardAndroidBindings { ffi.Size, ffi.Pointer)>>('ZSTD_decompress_usingDDict'); late final _ZSTD_decompress_usingDDict = - _ZSTD_decompress_usingDDictPtr.asFunction< - int Function(ffi.Pointer, ffi.Pointer, int, - ffi.Pointer, int, ffi.Pointer)>(); + _ZSTD_decompress_usingDDictPtr.asFunction< + int Function(ffi.Pointer, ffi.Pointer, int, + ffi.Pointer, int, ffi.Pointer)>(); /// ! ZSTD_getDictID_fromDict() : Requires v1.4.0+ /// Provides the dictID stored within dictionary. /// if @return == 0, the dictionary is not conformant with Zstandard specification. /// It can still be loaded, but as a content-only dictionary. int ZSTD_getDictID_fromDict( - ffi.Pointer dict, - int dictSize, - ) { + ffi.Pointer dict, + int dictSize, + ) { return _ZSTD_getDictID_fromDict( dict, dictSize, @@ -1145,15 +1165,15 @@ class ZstandardAndroidBindings { /// If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. /// Non-conformant dictionaries can still be loaded, but as content-only dictionaries. int ZSTD_getDictID_fromCDict( - ffi.Pointer cdict, - ) { + ffi.Pointer cdict, + ) { return _ZSTD_getDictID_fromCDict( cdict, ); } late final _ZSTD_getDictID_fromCDictPtr = _lookup< - ffi + ffi .NativeFunction)>>( 'ZSTD_getDictID_fromCDict'); late final _ZSTD_getDictID_fromCDict = _ZSTD_getDictID_fromCDictPtr @@ -1164,15 +1184,15 @@ class ZstandardAndroidBindings { /// If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. /// Non-conformant dictionaries can still be loaded, but as content-only dictionaries. int ZSTD_getDictID_fromDDict( - ffi.Pointer ddict, - ) { + ffi.Pointer ddict, + ) { return _ZSTD_getDictID_fromDDict( ddict, ); } late final _ZSTD_getDictID_fromDDictPtr = _lookup< - ffi + ffi .NativeFunction)>>( 'ZSTD_getDictID_fromDDict'); late final _ZSTD_getDictID_fromDDict = _ZSTD_getDictID_fromDDictPtr @@ -1189,9 +1209,9 @@ class ZstandardAndroidBindings { /// - This is not a Zstandard frame. /// When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. int ZSTD_getDictID_fromFrame( - ffi.Pointer src, - int srcSize, - ) { + ffi.Pointer src, + int srcSize, + ) { return _ZSTD_getDictID_fromFrame( src, srcSize, @@ -1227,10 +1247,10 @@ class ZstandardAndroidBindings { /// If you want to employ LDM on some large dictionary content, /// prefer employing ZSTD_CCtx_refPrefix() described below. int ZSTD_CCtx_loadDictionary( - ffi.Pointer cctx, - ffi.Pointer dict, - int dictSize, - ) { + ffi.Pointer cctx, + ffi.Pointer dict, + int dictSize, + ) { return _ZSTD_CCtx_loadDictionary( cctx, dict, @@ -1243,8 +1263,8 @@ class ZstandardAndroidBindings { ffi.Size Function(ffi.Pointer, ffi.Pointer, ffi.Size)>>('ZSTD_CCtx_loadDictionary'); late final _ZSTD_CCtx_loadDictionary = - _ZSTD_CCtx_loadDictionaryPtr.asFunction< - int Function(ffi.Pointer, ffi.Pointer, int)>(); + _ZSTD_CCtx_loadDictionaryPtr.asFunction< + int Function(ffi.Pointer, ffi.Pointer, int)>(); /// ! ZSTD_CCtx_refCDict() : Requires v1.4.0+ /// Reference a prepared dictionary, to be used for all future compressed frames. @@ -1259,9 +1279,9 @@ class ZstandardAndroidBindings { /// Referencing a new dictionary effectively "discards" any previous one. /// Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. int ZSTD_CCtx_refCDict( - ffi.Pointer cctx, - ffi.Pointer cdict, - ) { + ffi.Pointer cctx, + ffi.Pointer cdict, + ) { return _ZSTD_CCtx_refCDict( cctx, cdict, @@ -1295,10 +1315,10 @@ class ZstandardAndroidBindings { /// Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). /// Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. int ZSTD_CCtx_refPrefix( - ffi.Pointer cctx, - ffi.Pointer prefix, - int prefixSize, - ) { + ffi.Pointer cctx, + ffi.Pointer prefix, + int prefixSize, + ) { return _ZSTD_CCtx_refPrefix( cctx, prefix, @@ -1328,10 +1348,10 @@ class ZstandardAndroidBindings { /// Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of /// how dictionary content is loaded and interpreted. int ZSTD_DCtx_loadDictionary( - ffi.Pointer dctx, - ffi.Pointer dict, - int dictSize, - ) { + ffi.Pointer dctx, + ffi.Pointer dict, + int dictSize, + ) { return _ZSTD_DCtx_loadDictionary( dctx, dict, @@ -1344,8 +1364,8 @@ class ZstandardAndroidBindings { ffi.Size Function(ffi.Pointer, ffi.Pointer, ffi.Size)>>('ZSTD_DCtx_loadDictionary'); late final _ZSTD_DCtx_loadDictionary = - _ZSTD_DCtx_loadDictionaryPtr.asFunction< - int Function(ffi.Pointer, ffi.Pointer, int)>(); + _ZSTD_DCtx_loadDictionaryPtr.asFunction< + int Function(ffi.Pointer, ffi.Pointer, int)>(); /// ! ZSTD_DCtx_refDDict() : Requires v1.4.0+ /// Reference a prepared dictionary, to be used to decompress next frames. @@ -1364,9 +1384,9 @@ class ZstandardAndroidBindings { /// Special: referencing a NULL DDict means "return to no-dictionary mode". /// Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. int ZSTD_DCtx_refDDict( - ffi.Pointer dctx, - ffi.Pointer ddict, - ) { + ffi.Pointer dctx, + ffi.Pointer ddict, + ) { return _ZSTD_DCtx_refDDict( dctx, ddict, @@ -1396,10 +1416,10 @@ class ZstandardAndroidBindings { /// Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. /// A full dictionary is more costly, as it requires building tables. int ZSTD_DCtx_refPrefix( - ffi.Pointer dctx, - ffi.Pointer prefix, - int prefixSize, - ) { + ffi.Pointer dctx, + ffi.Pointer prefix, + int prefixSize, + ) { return _ZSTD_DCtx_refPrefix( dctx, prefix, @@ -1418,88 +1438,144 @@ class ZstandardAndroidBindings { /// These functions give the _current_ memory usage of selected object. /// Note that object memory usage can evolve (increase or decrease) over time. int ZSTD_sizeof_CCtx( - ffi.Pointer cctx, - ) { + ffi.Pointer cctx, + ) { return _ZSTD_sizeof_CCtx( cctx, ); } late final _ZSTD_sizeof_CCtxPtr = - _lookup)>>( - 'ZSTD_sizeof_CCtx'); + _lookup)>>( + 'ZSTD_sizeof_CCtx'); late final _ZSTD_sizeof_CCtx = - _ZSTD_sizeof_CCtxPtr.asFunction)>(); + _ZSTD_sizeof_CCtxPtr.asFunction)>(); int ZSTD_sizeof_DCtx( - ffi.Pointer dctx, - ) { + ffi.Pointer dctx, + ) { return _ZSTD_sizeof_DCtx( dctx, ); } late final _ZSTD_sizeof_DCtxPtr = - _lookup)>>( - 'ZSTD_sizeof_DCtx'); + _lookup)>>( + 'ZSTD_sizeof_DCtx'); late final _ZSTD_sizeof_DCtx = - _ZSTD_sizeof_DCtxPtr.asFunction)>(); + _ZSTD_sizeof_DCtxPtr.asFunction)>(); int ZSTD_sizeof_CStream( - ffi.Pointer zcs, - ) { + ffi.Pointer zcs, + ) { return _ZSTD_sizeof_CStream( zcs, ); } late final _ZSTD_sizeof_CStreamPtr = - _lookup)>>( - 'ZSTD_sizeof_CStream'); + _lookup)>>( + 'ZSTD_sizeof_CStream'); late final _ZSTD_sizeof_CStream = _ZSTD_sizeof_CStreamPtr.asFunction< int Function(ffi.Pointer)>(); int ZSTD_sizeof_DStream( - ffi.Pointer zds, - ) { + ffi.Pointer zds, + ) { return _ZSTD_sizeof_DStream( zds, ); } late final _ZSTD_sizeof_DStreamPtr = - _lookup)>>( - 'ZSTD_sizeof_DStream'); + _lookup)>>( + 'ZSTD_sizeof_DStream'); late final _ZSTD_sizeof_DStream = _ZSTD_sizeof_DStreamPtr.asFunction< int Function(ffi.Pointer)>(); int ZSTD_sizeof_CDict( - ffi.Pointer cdict, - ) { + ffi.Pointer cdict, + ) { return _ZSTD_sizeof_CDict( cdict, ); } late final _ZSTD_sizeof_CDictPtr = - _lookup)>>( - 'ZSTD_sizeof_CDict'); + _lookup)>>( + 'ZSTD_sizeof_CDict'); late final _ZSTD_sizeof_CDict = - _ZSTD_sizeof_CDictPtr.asFunction)>(); + _ZSTD_sizeof_CDictPtr.asFunction)>(); int ZSTD_sizeof_DDict( - ffi.Pointer ddict, - ) { + ffi.Pointer ddict, + ) { return _ZSTD_sizeof_DDict( ddict, ); } late final _ZSTD_sizeof_DDictPtr = - _lookup)>>( - 'ZSTD_sizeof_DDict'); + _lookup)>>( + 'ZSTD_sizeof_DDict'); late final _ZSTD_sizeof_DDict = - _ZSTD_sizeof_DDictPtr.asFunction)>(); + _ZSTD_sizeof_DDictPtr.asFunction)>(); +} + +/// -********************************************* +/// Error codes list +/// -********************************************* +/// Error codes _values_ are pinned down since v1.3.1 only. +/// Therefore, don't rely on values if you may link to any version < v1.3.1. +/// +/// Only values < 100 are considered stable. +/// +/// note 1 : this API shall be used with static linking only. +/// dynamic linking is not yet officially supported. +/// note 2 : Prefer relying on the enum than on its value whenever possible +/// This is the only supported way to use the error list < v1.3.1 +/// note 3 : ZSTD_isError() is always correct, whatever the library version. +abstract class ZSTD_ErrorCode { + static const int ZSTD_error_no_error = 0; + static const int ZSTD_error_GENERIC = 1; + static const int ZSTD_error_prefix_unknown = 10; + static const int ZSTD_error_version_unsupported = 12; + static const int ZSTD_error_frameParameter_unsupported = 14; + static const int ZSTD_error_frameParameter_windowTooLarge = 16; + static const int ZSTD_error_corruption_detected = 20; + static const int ZSTD_error_checksum_wrong = 22; + static const int ZSTD_error_literals_headerWrong = 24; + static const int ZSTD_error_dictionary_corrupted = 30; + static const int ZSTD_error_dictionary_wrong = 32; + static const int ZSTD_error_dictionaryCreation_failed = 34; + static const int ZSTD_error_parameter_unsupported = 40; + static const int ZSTD_error_parameter_combination_unsupported = 41; + static const int ZSTD_error_parameter_outOfBound = 42; + static const int ZSTD_error_tableLog_tooLarge = 44; + static const int ZSTD_error_maxSymbolValue_tooLarge = 46; + static const int ZSTD_error_maxSymbolValue_tooSmall = 48; + static const int ZSTD_error_cannotProduce_uncompressedBlock = 49; + static const int ZSTD_error_stabilityCondition_notRespected = 50; + static const int ZSTD_error_stage_wrong = 60; + static const int ZSTD_error_init_missing = 62; + static const int ZSTD_error_memory_allocation = 64; + static const int ZSTD_error_workSpace_tooSmall = 66; + static const int ZSTD_error_dstSize_tooSmall = 70; + static const int ZSTD_error_srcSize_wrong = 72; + static const int ZSTD_error_dstBuffer_null = 74; + static const int ZSTD_error_noForwardProgress_destFull = 80; + static const int ZSTD_error_noForwardProgress_inputEmpty = 82; + + /// following error codes are __NOT STABLE__, they can be removed or changed in future versions + static const int ZSTD_error_frameIndex_tooLarge = 100; + static const int ZSTD_error_seekableIO = 102; + static const int ZSTD_error_dstBuffer_wrong = 104; + static const int ZSTD_error_srcBuffer_wrong = 105; + static const int ZSTD_error_sequenceProducer_failed = 106; + static const int ZSTD_error_externalSequences_invalid = 107; + + /// never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead + static const int ZSTD_error_maxCode = 120; } final class ZSTD_CCtx_s extends ffi.Opaque {} @@ -1508,13 +1584,13 @@ final class ZSTD_CCtx_s extends ffi.Opaque {} /// / /// /*= Compression context /// When compressing many times, -/// it is recommended to allocate a context just once, +/// it is recommended to allocate a compression context just once, /// and reuse it for each successive compression operation. -/// This will make workload friendlier for system's memory. +/// This will make the workload easier for system's memory. /// Note : re-using context is just a speed / resource optimization. /// It doesn't change the compression ratio, which remains identical. -/// Note 2 : In multi-threaded environments, -/// use one different context per thread for parallel execution. +/// Note 2: For parallel execution in multi-threaded environments, +/// use one different context per thread . typedef ZSTD_CCtx = ZSTD_CCtx_s; final class ZSTD_DCtx_s extends ffi.Opaque {} @@ -1722,7 +1798,8 @@ abstract class ZSTD_cParameter { /// ZSTD_c_stableOutBuffer /// ZSTD_c_blockDelimiters /// ZSTD_c_validateSequences - /// ZSTD_c_useBlockSplitter + /// ZSTD_c_blockSplitterLevel + /// ZSTD_c_splitAfterSequences /// ZSTD_c_useRowMatchFinder /// ZSTD_c_prefetchCDictTables /// ZSTD_c_enableSeqProducerFallback @@ -1750,6 +1827,7 @@ abstract class ZSTD_cParameter { static const int ZSTD_c_experimentalParam17 = 1014; static const int ZSTD_c_experimentalParam18 = 1015; static const int ZSTD_c_experimentalParam19 = 1016; + static const int ZSTD_c_experimentalParam20 = 1017; } final class ZSTD_bounds extends ffi.Struct { @@ -1957,13 +2035,13 @@ typedef ZSTD_DDict = ZSTD_DDict_s; const int ZSTD_VERSION_MAJOR = 1; -const int ZSTD_VERSION_MINOR = 5; +const int ZSTD_VERSION_MINOR = 6; -const int ZSTD_VERSION_RELEASE = 6; +const int ZSTD_VERSION_RELEASE = 0; -const int ZSTD_VERSION_NUMBER = 10506; +const int ZSTD_VERSION_NUMBER = 10600; -const String ZSTD_VERSION_STRING = '1.5.6'; +const String ZSTD_VERSION_STRING = '1.6.0'; const int ZSTD_CLEVEL_DEFAULT = 3; diff --git a/zstandard_android/pubspec.yaml b/zstandard_android/pubspec.yaml index 98113f6..1c4cb54 100644 --- a/zstandard_android/pubspec.yaml +++ b/zstandard_android/pubspec.yaml @@ -19,6 +19,10 @@ dev_dependencies: flutter_test: sdk: flutter flutter_lints: ^5.0.0 + kiri_check: ^1.3.1 + leak_tracker: ^11.0.0 + leak_tracker_testing: ^3.0.2 + mutation_test: ^1.8.0 flutter: plugin: diff --git a/zstandard_android/src/Makefile b/zstandard_android/src/Makefile deleted file mode 100644 index 020d2ff..0000000 --- a/zstandard_android/src/Makefile +++ /dev/null @@ -1,387 +0,0 @@ -# ################################################################ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# You may select, at your option, one of the above-listed licenses. -# ################################################################ - -# default target (when running `make` with no argument) -lib-release: - -# Modules -ZSTD_LIB_COMPRESSION ?= 1 -ZSTD_LIB_DECOMPRESSION ?= 1 -ZSTD_LIB_DICTBUILDER ?= 1 -ZSTD_LIB_DEPRECATED ?= 0 - -# Input variables for libzstd.mk -ifeq ($(ZSTD_LIB_COMPRESSION), 0) - ZSTD_LIB_DICTBUILDER = 0 - ZSTD_LIB_DEPRECATED = 0 -endif - -ifeq ($(ZSTD_LIB_DECOMPRESSION), 0) - ZSTD_LEGACY_SUPPORT = 0 - ZSTD_LIB_DEPRECATED = 0 -endif - -include libzstd.mk - -ZSTD_FILES := $(ZSTD_COMMON_FILES) $(ZSTD_LEGACY_FILES) - -ifneq ($(ZSTD_LIB_COMPRESSION), 0) - ZSTD_FILES += $(ZSTD_COMPRESS_FILES) -endif - -ifneq ($(ZSTD_LIB_DECOMPRESSION), 0) - ZSTD_FILES += $(ZSTD_DECOMPRESS_FILES) -endif - -ifneq ($(ZSTD_LIB_DEPRECATED), 0) - ZSTD_FILES += $(ZSTD_DEPRECATED_FILES) -endif - -ifneq ($(ZSTD_LIB_DICTBUILDER), 0) - ZSTD_FILES += $(ZSTD_DICTBUILDER_FILES) -endif - -ZSTD_LOCAL_SRC := $(notdir $(ZSTD_FILES)) -ZSTD_LOCAL_OBJ0 := $(ZSTD_LOCAL_SRC:.c=.o) -ZSTD_LOCAL_OBJ := $(ZSTD_LOCAL_OBJ0:.S=.o) - -VERSION := $(ZSTD_VERSION) - -# Note: by default, the static library is built single-threaded and dynamic library is built -# multi-threaded. It is possible to force multi or single threaded builds by appending -# -mt or -nomt to the build target (like lib-mt for multi-threaded, lib-nomt for single-threaded). - - -CPPFLAGS_DYNLIB += -DZSTD_MULTITHREAD # dynamic library build defaults to multi-threaded -LDFLAGS_DYNLIB += -pthread -CPPFLAGS_STATICLIB += # static library build defaults to single-threaded - -# pkg-config Libs.private points to LDFLAGS_DYNLIB -PCLIB := $(LDFLAGS_DYNLIB) - -ifeq ($(findstring GCC,$(CCVER)),GCC) -decompress/zstd_decompress_block.o : CFLAGS+=-fno-tree-vectorize -endif - - -# macOS linker doesn't support -soname, and use different extension -# see : https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryDesignGuidelines.html -ifeq ($(UNAME), Darwin) - SHARED_EXT = dylib - SHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT) - SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT) - SONAME_FLAGS = -install_name $(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER) -else - ifeq ($(UNAME), AIX) - SONAME_FLAGS = - else - SONAME_FLAGS = -Wl,-soname=libzstd.$(SHARED_EXT).$(LIBVER_MAJOR) - endif - SHARED_EXT = so - SHARED_EXT_MAJOR = $(SHARED_EXT).$(LIBVER_MAJOR) - SHARED_EXT_VER = $(SHARED_EXT).$(LIBVER) -endif - - -.PHONY: all -all: lib - - -.PHONY: libzstd.a # must be run every time -libzstd.a: CPPFLAGS += $(CPPFLAGS_STATICLIB) - -SET_CACHE_DIRECTORY = \ - +$(MAKE) --no-print-directory $@ \ - BUILD_DIR=obj/$(HASH_DIR) \ - CPPFLAGS="$(CPPFLAGS)" \ - CFLAGS="$(CFLAGS)" \ - LDFLAGS="$(LDFLAGS)" - -ifndef BUILD_DIR -# determine BUILD_DIR from compilation flags - -libzstd.a: - $(SET_CACHE_DIRECTORY) - -else -# BUILD_DIR is defined - -ZSTD_STATICLIB_DIR := $(BUILD_DIR)/static -ZSTD_STATICLIB := $(ZSTD_STATICLIB_DIR)/libzstd.a -ZSTD_STATICLIB_OBJ := $(addprefix $(ZSTD_STATICLIB_DIR)/,$(ZSTD_LOCAL_OBJ)) -$(ZSTD_STATICLIB): ARFLAGS = rcs -$(ZSTD_STATICLIB): | $(ZSTD_STATICLIB_DIR) -$(ZSTD_STATICLIB): $(ZSTD_STATICLIB_OBJ) - # Check for multithread flag at target execution time - $(if $(filter -DZSTD_MULTITHREAD,$(CPPFLAGS)),\ - @echo compiling multi-threaded static library $(LIBVER),\ - @echo compiling single-threaded static library $(LIBVER)) - $(AR) $(ARFLAGS) $@ $^ - -libzstd.a: $(ZSTD_STATICLIB) - cp -f $< $@ - -endif - -ifneq (,$(filter Windows%,$(TARGET_SYSTEM))) - -LIBZSTD = dll/libzstd.dll -$(LIBZSTD): $(ZSTD_FILES) - @echo compiling dynamic library $(LIBVER) - $(CC) $(FLAGS) -DZSTD_DLL_EXPORT=1 -Wl,--out-implib,dll/libzstd.dll.a -shared $^ -o $@ - -else # not Windows - -LIBZSTD = libzstd.$(SHARED_EXT_VER) -.PHONY: $(LIBZSTD) # must be run every time -$(LIBZSTD): CPPFLAGS += $(CPPFLAGS_DYNLIB) -$(LIBZSTD): CFLAGS += -fPIC -fvisibility=hidden -$(LIBZSTD): LDFLAGS += -shared $(LDFLAGS_DYNLIB) - -ifndef BUILD_DIR -# determine BUILD_DIR from compilation flags - -$(LIBZSTD): - $(SET_CACHE_DIRECTORY) - -else -# BUILD_DIR is defined - -ZSTD_DYNLIB_DIR := $(BUILD_DIR)/dynamic -ZSTD_DYNLIB := $(ZSTD_DYNLIB_DIR)/$(LIBZSTD) -ZSTD_DYNLIB_OBJ := $(addprefix $(ZSTD_DYNLIB_DIR)/,$(ZSTD_LOCAL_OBJ)) - -$(ZSTD_DYNLIB): | $(ZSTD_DYNLIB_DIR) -$(ZSTD_DYNLIB): $(ZSTD_DYNLIB_OBJ) -# Check for multithread flag at target execution time - $(if $(filter -DZSTD_MULTITHREAD,$(CPPFLAGS)),\ - @echo compiling multi-threaded dynamic library $(LIBVER),\ - @echo compiling single-threaded dynamic library $(LIBVER)) - $(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@ - @echo creating versioned links - ln -sf $@ libzstd.$(SHARED_EXT_MAJOR) - ln -sf $@ libzstd.$(SHARED_EXT) - -$(LIBZSTD): $(ZSTD_DYNLIB) - cp -f $< $@ - -endif # ifndef BUILD_DIR -endif # if windows - -.PHONY: libzstd -libzstd : $(LIBZSTD) - -.PHONY: lib -lib : libzstd.a libzstd - - -# note : do not define lib-mt or lib-release as .PHONY -# make does not consider implicit pattern rule for .PHONY target - -%-mt : CPPFLAGS_DYNLIB := -DZSTD_MULTITHREAD -%-mt : CPPFLAGS_STATICLIB := -DZSTD_MULTITHREAD -%-mt : LDFLAGS_DYNLIB := -pthread -%-mt : PCLIB := -%-mt : PCMTLIB := $(LDFLAGS_DYNLIB) -%-mt : % - @echo multi-threaded build completed - -%-nomt : CPPFLAGS_DYNLIB := -%-nomt : LDFLAGS_DYNLIB := -%-nomt : CPPFLAGS_STATICLIB := -%-nomt : PCLIB := -%-nomt : % - @echo single-threaded build completed - -%-release : DEBUGFLAGS := -%-release : % - @echo release build completed - - -# Generate .h dependencies automatically - -# -MMD: compiler generates dependency information as a side-effect of compilation, without system headers -# -MP: adds phony target for each dependency other than main file. -DEPFLAGS = -MMD -MP - -# ensure that ZSTD_DYNLIB_DIR exists prior to generating %.o -$(ZSTD_DYNLIB_DIR)/%.o : %.c | $(ZSTD_DYNLIB_DIR) - @echo CC $@ - $(COMPILE.c) $(DEPFLAGS) $(OUTPUT_OPTION) $< - -$(ZSTD_STATICLIB_DIR)/%.o : %.c | $(ZSTD_STATICLIB_DIR) - @echo CC $@ - $(COMPILE.c) $(DEPFLAGS) $(OUTPUT_OPTION) $< - -$(ZSTD_DYNLIB_DIR)/%.o : %.S | $(ZSTD_DYNLIB_DIR) - @echo AS $@ - $(COMPILE.S) $(OUTPUT_OPTION) $< - -$(ZSTD_STATICLIB_DIR)/%.o : %.S | $(ZSTD_STATICLIB_DIR) - @echo AS $@ - $(COMPILE.S) $(OUTPUT_OPTION) $< - -MKDIR ?= mkdir -p -$(BUILD_DIR) $(ZSTD_DYNLIB_DIR) $(ZSTD_STATICLIB_DIR): - $(MKDIR) $@ - -DEPFILES := $(ZSTD_DYNLIB_OBJ:.o=.d) $(ZSTD_STATICLIB_OBJ:.o=.d) -$(DEPFILES): - -# The leading '-' means: do not fail is include fails (ex: directory does not exist yet) --include $(wildcard $(DEPFILES)) - - -# Special case : build library in single-thread mode _and_ without zstdmt_compress.c -# Note : we still need threading.c and pool.c for the dictionary builder, -# but they will correctly behave single-threaded. -ZSTDMT_FILES = zstdmt_compress.c -ZSTD_NOMT_FILES = $(filter-out $(ZSTDMT_FILES),$(notdir $(ZSTD_FILES))) -libzstd-nomt: CFLAGS += -fPIC -fvisibility=hidden -libzstd-nomt: LDFLAGS += -shared -libzstd-nomt: $(ZSTD_NOMT_FILES) - @echo compiling single-thread dynamic library $(LIBVER) - @echo files : $(ZSTD_NOMT_FILES) - @if echo "$(ZSTD_NOMT_FILES)" | tr ' ' '\n' | $(GREP) -q zstdmt; then \ - echo "Error: Found zstdmt in list."; \ - exit 1; \ - fi - $(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@ - -.PHONY: clean -clean: - $(RM) -r *.dSYM # macOS-specific - $(RM) core *.o *.a *.gcda *.$(SHARED_EXT) *.$(SHARED_EXT).* libzstd.pc - $(RM) dll/libzstd.dll dll/libzstd.lib libzstd-nomt* - $(RM) -r obj/* - @echo Cleaning library completed - -#----------------------------------------------------------------------------- -# make install is validated only for below listed environments -#----------------------------------------------------------------------------- -ifneq (,$(filter Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku AIX MSYS_NT% CYGWIN_NT%,$(UNAME))) - -lib: libzstd.pc - -HAS_EXPLICIT_EXEC_PREFIX := $(if $(or $(EXEC_PREFIX),$(exec_prefix)),1,) - -DESTDIR ?= -# directory variables : GNU conventions prefer lowercase -# see https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html -# support both lower and uppercase (BSD), use uppercase in script -prefix ?= /usr/local -PREFIX ?= $(prefix) -exec_prefix ?= $(PREFIX) -EXEC_PREFIX ?= $(exec_prefix) -libdir ?= $(EXEC_PREFIX)/lib -LIBDIR ?= $(libdir) -includedir ?= $(PREFIX)/include -INCLUDEDIR ?= $(includedir) - -PCINCDIR := $(patsubst $(PREFIX)%,%,$(INCLUDEDIR)) -PCLIBDIR := $(patsubst $(EXEC_PREFIX)%,%,$(LIBDIR)) - -# If we successfully stripped off a prefix, we'll add a reference to the -# relevant pc variable. -PCINCPREFIX := $(if $(findstring $(INCLUDEDIR),$(PCINCDIR)),,$${prefix}) -PCLIBPREFIX := $(if $(findstring $(LIBDIR),$(PCLIBDIR)),,$${exec_prefix}) - -# If no explicit EXEC_PREFIX was set by the caller, write it out as a reference -# to PREFIX, rather than as a resolved value. -PCEXEC_PREFIX := $(if $(HAS_EXPLICIT_EXEC_PREFIX),$(EXEC_PREFIX),$${prefix}) - - -ifneq ($(MT),) - PCLIB := - PCMTLIB := $(LDFLAGS_DYNLIB) -else - PCLIB := $(LDFLAGS_DYNLIB) -endif - -ifneq (,$(filter FreeBSD NetBSD DragonFly,$(UNAME))) - PKGCONFIGDIR ?= $(PREFIX)/libdata/pkgconfig -else - PKGCONFIGDIR ?= $(LIBDIR)/pkgconfig -endif - -ifneq (,$(filter SunOS,$(UNAME))) - INSTALL ?= ginstall -else - INSTALL ?= install -endif - -INSTALL_PROGRAM ?= $(INSTALL) -INSTALL_DATA ?= $(INSTALL) -m 644 - - -# pkg-config library define. -# For static single-threaded library declare -pthread in Libs.private -# For static multi-threaded library declare -pthread in Libs and Cflags -.PHONY: libzstd.pc -libzstd.pc: libzstd.pc.in - @echo creating pkgconfig - @sed \ - -e 's|@PREFIX@|$(PREFIX)|' \ - -e 's|@EXEC_PREFIX@|$(PCEXEC_PREFIX)|' \ - -e 's|@INCLUDEDIR@|$(PCINCPREFIX)$(PCINCDIR)|' \ - -e 's|@LIBDIR@|$(PCLIBPREFIX)$(PCLIBDIR)|' \ - -e 's|@VERSION@|$(VERSION)|' \ - -e 's|@LIBS_MT@|$(PCMTLIB)|' \ - -e 's|@LIBS_PRIVATE@|$(PCLIB)|' \ - $< >$@ - -.PHONY: install -install: install-pc install-static install-shared install-includes - @echo zstd static and shared library installed - -.PHONY: install-pc -install-pc: libzstd.pc - [ -e $(DESTDIR)$(PKGCONFIGDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(PKGCONFIGDIR)/ - $(INSTALL_DATA) libzstd.pc $(DESTDIR)$(PKGCONFIGDIR)/ - -.PHONY: install-static -install-static: - # only generate libzstd.a if it's not already present - [ -e libzstd.a ] || $(MAKE) libzstd.a-release - [ -e $(DESTDIR)$(LIBDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)/ - @echo Installing static library - $(INSTALL_DATA) libzstd.a $(DESTDIR)$(LIBDIR) - -.PHONY: install-shared -install-shared: - # only generate libzstd.so if it's not already present - [ -e $(LIBZSTD) ] || $(MAKE) libzstd-release - [ -e $(DESTDIR)$(LIBDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)/ - @echo Installing shared library - $(INSTALL_PROGRAM) $(LIBZSTD) $(DESTDIR)$(LIBDIR) - ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) - ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) - -.PHONY: install-includes -install-includes: - [ -e $(DESTDIR)$(INCLUDEDIR) ] || $(INSTALL) -d -m 755 $(DESTDIR)$(INCLUDEDIR)/ - @echo Installing includes - $(INSTALL_DATA) zstd.h $(DESTDIR)$(INCLUDEDIR) - $(INSTALL_DATA) zstd_errors.h $(DESTDIR)$(INCLUDEDIR) - $(INSTALL_DATA) zdict.h $(DESTDIR)$(INCLUDEDIR) - -.PHONY: uninstall -uninstall: - $(RM) $(DESTDIR)$(LIBDIR)/libzstd.a - $(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) - $(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) - $(RM) $(DESTDIR)$(LIBDIR)/$(LIBZSTD) - $(RM) $(DESTDIR)$(PKGCONFIGDIR)/libzstd.pc - $(RM) $(DESTDIR)$(INCLUDEDIR)/zstd.h - $(RM) $(DESTDIR)$(INCLUDEDIR)/zstd_errors.h - $(RM) $(DESTDIR)$(INCLUDEDIR)/zdict.h - @echo zstd libraries successfully uninstalled - -endif diff --git a/zstandard_android/src/README.md b/zstandard_android/src/README.md deleted file mode 100644 index 0f6c647..0000000 --- a/zstandard_android/src/README.md +++ /dev/null @@ -1,241 +0,0 @@ -Zstandard library files -================================ - -The __lib__ directory is split into several sub-directories, -in order to make it easier to select or exclude features. - - -#### Building - -`Makefile` script is provided, supporting [Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html#Makefile-Conventions), -including commands variables, staged install, directory variables and standard targets. -- `make` : generates both static and dynamic libraries -- `make install` : install libraries and headers in target system directories - -`libzstd` default scope is pretty large, including compression, decompression, dictionary builder, -and support for decoding legacy formats >= v0.5.0. -The scope can be reduced on demand (see paragraph _modular build_). - - -#### Multithreading support - -When building with `make`, by default the dynamic library is multithreaded and static library is single-threaded (for compatibility reasons). - -Enabling multithreading requires 2 conditions : -- set build macro `ZSTD_MULTITHREAD` (`-DZSTD_MULTITHREAD` for `gcc`) -- for POSIX systems : compile with pthread (`-pthread` compilation flag for `gcc`) - -For convenience, we provide a build target to generate multi and single threaded libraries: -- Force enable multithreading on both dynamic and static libraries by appending `-mt` to the target, e.g. `make lib-mt`. - Note that the `.pc` generated on calling `make lib-mt` will already include the require Libs and Cflags. -- Force disable multithreading on both dynamic and static libraries by appending `-nomt` to the target, e.g. `make lib-nomt`. -- By default, as mentioned before, dynamic library is multithreaded, and static library is single-threaded, e.g. `make lib`. - -When linking a POSIX program with a multithreaded version of `libzstd`, -note that it's necessary to invoke the `-pthread` flag during link stage. - -The `.pc` generated from `make install` or `make install-pc` always assume a single-threaded static library -is compiled. To correctly generate a `.pc` for the multi-threaded static library, set `MT=1` as ENV variable. - -Multithreading capabilities are exposed -via the [advanced API defined in `lib/zstd.h`](https://github.com/facebook/zstd/blob/v1.4.3/lib/zstd.h#L351). - - -#### API - -Zstandard's stable API is exposed within [lib/zstd.h](zstd.h). - - -#### Advanced API - -Optional advanced features are exposed via : - -- `lib/zstd_errors.h` : translates `size_t` function results - into a `ZSTD_ErrorCode`, for accurate error handling. - -- `ZSTD_STATIC_LINKING_ONLY` : if this macro is defined _before_ including `zstd.h`, - it unlocks access to the experimental API, - exposed in the second part of `zstd.h`. - All definitions in the experimental APIs are unstable, - they may still change in the future, or even be removed. - As a consequence, experimental definitions shall ___never be used with dynamic library___ ! - Only static linking is allowed. - - -#### Modular build - -It's possible to compile only a limited set of features within `libzstd`. -The file structure is designed to make this selection manually achievable for any build system : - -- Directory `lib/common` is always required, for all variants. - -- Compression source code lies in `lib/compress` - -- Decompression source code lies in `lib/decompress` - -- It's possible to include only `compress` or only `decompress`, they don't depend on each other. - -- `lib/dictBuilder` : makes it possible to generate dictionaries from a set of samples. - The API is exposed in `lib/dictBuilder/zdict.h`. - This module depends on both `lib/common` and `lib/compress` . - -- `lib/legacy` : makes it possible to decompress legacy zstd formats, starting from `v0.1.0`. - This module depends on `lib/common` and `lib/decompress`. - To enable this feature, define `ZSTD_LEGACY_SUPPORT` during compilation. - Specifying a number limits versions supported to that version onward. - For example, `ZSTD_LEGACY_SUPPORT=2` means : "support legacy formats >= v0.2.0". - Conversely, `ZSTD_LEGACY_SUPPORT=0` means "do __not__ support legacy formats". - By default, this build macro is set as `ZSTD_LEGACY_SUPPORT=5`. - Decoding supported legacy format is a transparent capability triggered within decompression functions. - It's also allowed to invoke legacy API directly, exposed in `lib/legacy/zstd_legacy.h`. - Each version does also provide its own set of advanced API. - For example, advanced API for version `v0.4` is exposed in `lib/legacy/zstd_v04.h` . - -- While invoking `make libzstd`, it's possible to define build macros - `ZSTD_LIB_COMPRESSION`, `ZSTD_LIB_DECOMPRESSION`, `ZSTD_LIB_DICTBUILDER`, - and `ZSTD_LIB_DEPRECATED` as `0` to forgo compilation of the - corresponding features. This will also disable compilation of all - dependencies (e.g. `ZSTD_LIB_COMPRESSION=0` will also disable - dictBuilder). - -- There are a number of options that can help minimize the binary size of - `libzstd`. - - The first step is to select the components needed (using the above-described - `ZSTD_LIB_COMPRESSION` etc.). - - The next step is to set `ZSTD_LIB_MINIFY` to `1` when invoking `make`. This - disables various optional components and changes the compilation flags to - prioritize space-saving. - - Detailed options: Zstandard's code and build environment is set up by default - to optimize above all else for performance. In pursuit of this goal, Zstandard - makes significant trade-offs in code size. For example, Zstandard often has - more than one implementation of a particular component, with each - implementation optimized for different scenarios. For example, the Huffman - decoder has complementary implementations that decode the stream one symbol at - a time or two symbols at a time. Zstd normally includes both (and dispatches - between them at runtime), but by defining `HUF_FORCE_DECOMPRESS_X1` or - `HUF_FORCE_DECOMPRESS_X2`, you can force the use of one or the other, avoiding - compilation of the other. Similarly, `ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT` - and `ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG` force the compilation and use of - only one or the other of two decompression implementations. The smallest - binary is achieved by using `HUF_FORCE_DECOMPRESS_X1` and - `ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT` (implied by `ZSTD_LIB_MINIFY`). - - On the compressor side, Zstd's compression levels map to several internal - strategies. In environments where the higher compression levels aren't used, - it is possible to exclude all but the fastest strategy with - `ZSTD_LIB_EXCLUDE_COMPRESSORS_DFAST_AND_UP=1`. (Note that this will change - the behavior of the default compression level.) Or if you want to retain the - default compressor as well, you can set - `ZSTD_LIB_EXCLUDE_COMPRESSORS_GREEDY_AND_UP=1`, at the cost of an additional - ~20KB or so. - - For squeezing the last ounce of size out, you can also define - `ZSTD_NO_INLINE`, which disables inlining, and `ZSTD_STRIP_ERROR_STRINGS`, - which removes the error messages that are otherwise returned by - `ZSTD_getErrorName` (implied by `ZSTD_LIB_MINIFY`). - - Finally, when integrating into your application, make sure you're doing link- - time optimization and unused symbol garbage collection (via some combination of, - e.g., `-flto`, `-ffat-lto-objects`, `-fuse-linker-plugin`, - `-ffunction-sections`, `-fdata-sections`, `-fmerge-all-constants`, - `-Wl,--gc-sections`, `-Wl,-z,norelro`, and an archiver that understands - the compiler's intermediate representation, e.g., `AR=gcc-ar`). Consult your - compiler's documentation. - -- While invoking `make libzstd`, the build macro `ZSTD_LEGACY_MULTITHREADED_API=1` - will expose the deprecated `ZSTDMT` API exposed by `zstdmt_compress.h` in - the shared library, which is now hidden by default. - -- The build macro `DYNAMIC_BMI2` can be set to 1 or 0 in order to generate binaries - which can detect at runtime the presence of BMI2 instructions, and use them only if present. - These instructions contribute to better performance, notably on the decoder side. - By default, this feature is automatically enabled on detecting - the right instruction set (x64) and compiler (clang or gcc >= 5). - It's obviously disabled for different cpus, - or when BMI2 instruction set is _required_ by the compiler command line - (in this case, only the BMI2 code path is generated). - Setting this macro will either force to generate the BMI2 dispatcher (1) - or prevent it (0). It overrides automatic detection. - -- The build macro `ZSTD_NO_UNUSED_FUNCTIONS` can be defined to hide the definitions of functions - that zstd does not use. Not all unused functions are hidden, but they can be if needed. - Currently, this macro will hide function definitions in FSE and HUF that use an excessive - amount of stack space. - -- The build macro `ZSTD_NO_INTRINSICS` can be defined to disable all explicit intrinsics. - Compiler builtins are still used. - -- The build macro `ZSTD_DECODER_INTERNAL_BUFFER` can be set to control - the amount of extra memory used during decompression to store literals. - This defaults to 64kB. Reducing this value reduces the memory footprint of - `ZSTD_DCtx` decompression contexts, - but might also result in a small decompression speed cost. - -- The C compiler macros `ZSTDLIB_VISIBLE`, `ZSTDERRORLIB_VISIBLE` and `ZDICTLIB_VISIBLE` - can be overridden to control the visibility of zstd's API. Additionally, - `ZSTDLIB_STATIC_API` and `ZDICTLIB_STATIC_API` can be overridden to control the visibility - of zstd's static API. Specifically, it can be set to `ZSTDLIB_HIDDEN` to hide the symbols - from the shared library. These macros default to `ZSTDLIB_VISIBILITY`, - `ZSTDERRORLIB_VSIBILITY`, and `ZDICTLIB_VISIBILITY` if unset, for backwards compatibility - with the old macro names. - -- The C compiler macro `HUF_DISABLE_FAST_DECODE` disables the newer Huffman fast C - and assembly decoding loops. You may want to use this macro if these loops are - slower on your platform. - -#### Windows : using MinGW+MSYS to create DLL - -DLL can be created using MinGW+MSYS with the `make libzstd` command. -This command creates `dll\libzstd.dll` and the import library `dll\libzstd.lib`. -The import library is only required with Visual C++. -The header file `zstd.h` and the dynamic library `dll\libzstd.dll` are required to -compile a project using gcc/MinGW. -The dynamic library has to be added to linking options. -It means that if a project that uses ZSTD consists of a single `test-dll.c` -file it should be linked with `dll\libzstd.dll`. For example: -``` - gcc $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\libzstd.dll -``` -The compiled executable will require ZSTD DLL which is available at `dll\libzstd.dll`. - - -#### Advanced Build options - -The build system requires a hash function in order to -separate object files created with different compilation flags. -By default, it tries to use `md5sum` or equivalent. -The hash function can be manually switched by setting the `HASH` variable. -For example : `make HASH=xxhsum` -The hash function needs to generate at least 64-bit using hexadecimal format. -When no hash function is found, -the Makefile just generates all object files into the same default directory, -irrespective of compilation flags. -This functionality only matters if `libzstd` is compiled multiple times -with different build flags. - -The build directory, where object files are stored -can also be manually controlled using variable `BUILD_DIR`, -for example `make BUILD_DIR=objectDir/v1`. -In which case, the hash function doesn't matter. - - -#### Deprecated API - -Obsolete API on their way out are stored in directory `lib/deprecated`. -At this stage, it contains older streaming prototypes, in `lib/deprecated/zbuff.h`. -These prototypes will be removed in some future version. -Consider migrating code towards supported streaming API exposed in `zstd.h`. - - -#### Miscellaneous - -The other files are not source code. There are : - - - `BUCK` : support for `buck` build system (https://buckbuild.com/) - - `Makefile` : `make` script to build and install zstd library (static and dynamic) - - `README.md` : this file - - `dll/` : resources directory for Windows compilation - - `libzstd.pc.in` : script for `pkg-config` (used in `make install`) diff --git a/zstandard_android/src/common/bits.h b/zstandard_android/src/common/bits.h deleted file mode 100644 index 992cc69..0000000 --- a/zstandard_android/src/common/bits.h +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_BITS_H -#define ZSTD_BITS_H - -#include "mem.h" - -MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val) -{ - assert(val != 0); - { - static const U32 DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3, - 30, 22, 20, 15, 25, 17, 4, 8, - 31, 27, 13, 23, 21, 19, 16, 7, - 26, 12, 18, 6, 11, 5, 10, 9}; - return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >> 27]; - } -} - -MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val) -{ - assert(val != 0); -# if defined(_MSC_VER) -# if STATIC_BMI2 == 1 - return (unsigned)_tzcnt_u32(val); -# else - if (val != 0) { - unsigned long r; - _BitScanForward(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (unsigned)__builtin_ctz(val); -# elif defined(__ICCARM__) - return (unsigned)__builtin_ctz(val); -# else - return ZSTD_countTrailingZeros32_fallback(val); -# endif -} - -MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) { - assert(val != 0); - { - static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29, - 11, 14, 16, 18, 22, 25, 3, 30, - 8, 12, 20, 28, 15, 17, 24, 7, - 19, 27, 23, 6, 26, 5, 4, 31}; - val |= val >> 1; - val |= val >> 2; - val |= val >> 4; - val |= val >> 8; - val |= val >> 16; - return 31 - DeBruijnClz[(val * 0x07C4ACDDU) >> 27]; - } -} - -MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val) -{ - assert(val != 0); -# if defined(_MSC_VER) -# if STATIC_BMI2 == 1 - return (unsigned)_lzcnt_u32(val); -# else - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, val); - return (unsigned)(31 - r); - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (unsigned)__builtin_clz(val); -# elif defined(__ICCARM__) - return (unsigned)__builtin_clz(val); -# else - return ZSTD_countLeadingZeros32_fallback(val); -# endif -} - -MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val) -{ - assert(val != 0); -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 == 1 - return (unsigned)_tzcnt_u64(val); -# else - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__) - return (unsigned)__builtin_ctzll(val); -# elif defined(__ICCARM__) - return (unsigned)__builtin_ctzll(val); -# else - { - U32 mostSignificantWord = (U32)(val >> 32); - U32 leastSignificantWord = (U32)val; - if (leastSignificantWord == 0) { - return 32 + ZSTD_countTrailingZeros32(mostSignificantWord); - } else { - return ZSTD_countTrailingZeros32(leastSignificantWord); - } - } -# endif -} - -MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val) -{ - assert(val != 0); -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 == 1 - return (unsigned)_lzcnt_u64(val); -# else - if (val != 0) { - unsigned long r; - _BitScanReverse64(&r, val); - return (unsigned)(63 - r); - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (unsigned)(__builtin_clzll(val)); -# elif defined(__ICCARM__) - return (unsigned)(__builtin_clzll(val)); -# else - { - U32 mostSignificantWord = (U32)(val >> 32); - U32 leastSignificantWord = (U32)val; - if (mostSignificantWord == 0) { - return 32 + ZSTD_countLeadingZeros32(leastSignificantWord); - } else { - return ZSTD_countLeadingZeros32(mostSignificantWord); - } - } -# endif -} - -MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val) -{ - if (MEM_isLittleEndian()) { - if (MEM_64bits()) { - return ZSTD_countTrailingZeros64((U64)val) >> 3; - } else { - return ZSTD_countTrailingZeros32((U32)val) >> 3; - } - } else { /* Big Endian CPU */ - if (MEM_64bits()) { - return ZSTD_countLeadingZeros64((U64)val) >> 3; - } else { - return ZSTD_countLeadingZeros32((U32)val) >> 3; - } - } -} - -MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ -{ - assert(val != 0); - return 31 - ZSTD_countLeadingZeros32(val); -} - -/* ZSTD_rotateRight_*(): - * Rotates a bitfield to the right by "count" bits. - * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts - */ -MEM_STATIC -U64 ZSTD_rotateRight_U64(U64 const value, U32 count) { - assert(count < 64); - count &= 0x3F; /* for fickle pattern recognition */ - return (value >> count) | (U64)(value << ((0U - count) & 0x3F)); -} - -MEM_STATIC -U32 ZSTD_rotateRight_U32(U32 const value, U32 count) { - assert(count < 32); - count &= 0x1F; /* for fickle pattern recognition */ - return (value >> count) | (U32)(value << ((0U - count) & 0x1F)); -} - -MEM_STATIC -U16 ZSTD_rotateRight_U16(U16 const value, U32 count) { - assert(count < 16); - count &= 0x0F; /* for fickle pattern recognition */ - return (value >> count) | (U16)(value << ((0U - count) & 0x0F)); -} - -#endif /* ZSTD_BITS_H */ diff --git a/zstandard_android/src/common/compiler.h b/zstandard_android/src/common/compiler.h deleted file mode 100644 index fdf0dd1..0000000 --- a/zstandard_android/src/common/compiler.h +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_COMPILER_H -#define ZSTD_COMPILER_H - -#include - -#include "portability_macros.h" - -/*-******************************************************* -* Compiler specifics -*********************************************************/ -/* force inlining */ - -#if !defined(ZSTD_NO_INLINE) -#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# define INLINE_KEYWORD inline -#else -# define INLINE_KEYWORD -#endif - -#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) -# define FORCE_INLINE_ATTR __attribute__((always_inline)) -#elif defined(_MSC_VER) -# define FORCE_INLINE_ATTR __forceinline -#else -# define FORCE_INLINE_ATTR -#endif - -#else - -#define INLINE_KEYWORD -#define FORCE_INLINE_ATTR - -#endif - -/** - On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC). - This explicitly marks such functions as __cdecl so that the code will still compile - if a CC other than __cdecl has been made the default. -*/ -#if defined(_MSC_VER) -# define WIN_CDECL __cdecl -#else -# define WIN_CDECL -#endif - -/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ -#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) -# define UNUSED_ATTR __attribute__((unused)) -#else -# define UNUSED_ATTR -#endif - -/** - * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant - * parameters. They must be inlined for the compiler to eliminate the constant - * branches. - */ -#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR -/** - * HINT_INLINE is used to help the compiler generate better code. It is *not* - * used for "templates", so it can be tweaked based on the compilers - * performance. - * - * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the - * always_inline attribute. - * - * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline - * attribute. - */ -#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 -# define HINT_INLINE static INLINE_KEYWORD -#else -# define HINT_INLINE FORCE_INLINE_TEMPLATE -#endif - -/* "soft" inline : - * The compiler is free to select if it's a good idea to inline or not. - * The main objective is to silence compiler warnings - * when a defined function in included but not used. - * - * Note : this macro is prefixed `MEM_` because it used to be provided by `mem.h` unit. - * Updating the prefix is probably preferable, but requires a fairly large codemod, - * since this name is used everywhere. - */ -#ifndef MEM_STATIC /* already defined in Linux Kernel mem.h */ -#if defined(__GNUC__) -# define MEM_STATIC static __inline UNUSED_ATTR -#elif defined(__IAR_SYSTEMS_ICC__) -# define MEM_STATIC static inline UNUSED_ATTR -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define MEM_STATIC static inline -#elif defined(_MSC_VER) -# define MEM_STATIC static __inline -#else -# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif -#endif - -/* force no inlining */ -#ifdef _MSC_VER -# define FORCE_NOINLINE static __declspec(noinline) -#else -# if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) -# define FORCE_NOINLINE static __attribute__((__noinline__)) -# else -# define FORCE_NOINLINE static -# endif -#endif - - -/* target attribute */ -#if defined(__GNUC__) || defined(__IAR_SYSTEMS_ICC__) -# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) -#else -# define TARGET_ATTRIBUTE(target) -#endif - -/* Target attribute for BMI2 dynamic dispatch. - * Enable lzcnt, bmi, and bmi2. - * We test for bmi1 & bmi2. lzcnt is included in bmi1. - */ -#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2") - -/* prefetch - * can be disabled, by declaring NO_PREFETCH build macro */ -#if defined(NO_PREFETCH) -# define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */ -# define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */ -#else -# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) && !defined(_M_ARM64EC) /* _mm_prefetch() is not defined outside of x86/x64 */ -# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ -# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) -# define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1) -# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) -# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) -# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */) -# elif defined(__aarch64__) -# define PREFETCH_L1(ptr) do { __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))); } while (0) -# define PREFETCH_L2(ptr) do { __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))); } while (0) -# else -# define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */ -# define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */ -# endif -#endif /* NO_PREFETCH */ - -#define CACHELINE_SIZE 64 - -#define PREFETCH_AREA(p, s) \ - do { \ - const char* const _ptr = (const char*)(p); \ - size_t const _size = (size_t)(s); \ - size_t _pos; \ - for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \ - PREFETCH_L2(_ptr + _pos); \ - } \ - } while (0) - -/* vectorization - * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax, - * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */ -#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__) -# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5) -# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize"))) -# else -# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")") -# endif -#else -# define DONT_VECTORIZE -#endif - -/* Tell the compiler that a branch is likely or unlikely. - * Only use these macros if it causes the compiler to generate better code. - * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc - * and clang, please do. - */ -#if defined(__GNUC__) -#define LIKELY(x) (__builtin_expect((x), 1)) -#define UNLIKELY(x) (__builtin_expect((x), 0)) -#else -#define LIKELY(x) (x) -#define UNLIKELY(x) (x) -#endif - -#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))) -# define ZSTD_UNREACHABLE do { assert(0), __builtin_unreachable(); } while (0) -#else -# define ZSTD_UNREACHABLE do { assert(0); } while (0) -#endif - -/* disable warnings */ -#ifdef _MSC_VER /* Visual Studio */ -# include /* For Visual 2005 */ -# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ -# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ -# pragma warning(disable : 4324) /* disable: C4324: padded structure */ -#endif - -/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/ -#ifndef STATIC_BMI2 -# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) -# ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2 -# define STATIC_BMI2 1 -# endif -# elif defined(__BMI2__) && defined(__x86_64__) && defined(__GNUC__) -# define STATIC_BMI2 1 -# endif -#endif - -#ifndef STATIC_BMI2 - #define STATIC_BMI2 0 -#endif - -/* compile time determination of SIMD support */ -#if !defined(ZSTD_NO_INTRINSICS) -# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) -# define ZSTD_ARCH_X86_SSE2 -# endif -# if defined(__ARM_NEON) || defined(_M_ARM64) -# define ZSTD_ARCH_ARM_NEON -# endif -# -# if defined(ZSTD_ARCH_X86_SSE2) -# include -# elif defined(ZSTD_ARCH_ARM_NEON) -# include -# endif -#endif - -/* C-language Attributes are added in C23. */ -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute) -# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) -#else -# define ZSTD_HAS_C_ATTRIBUTE(x) 0 -#endif - -/* Only use C++ attributes in C++. Some compilers report support for C++ - * attributes when compiling with C. - */ -#if defined(__cplusplus) && defined(__has_cpp_attribute) -# define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) -#else -# define ZSTD_HAS_CPP_ATTRIBUTE(x) 0 -#endif - -/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute. - * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough - * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough - * - Else: __attribute__((__fallthrough__)) - */ -#ifndef ZSTD_FALLTHROUGH -# if ZSTD_HAS_C_ATTRIBUTE(fallthrough) -# define ZSTD_FALLTHROUGH [[fallthrough]] -# elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough) -# define ZSTD_FALLTHROUGH [[fallthrough]] -# elif __has_attribute(__fallthrough__) -/* Leading semicolon is to satisfy gcc-11 with -pedantic. Without the semicolon - * gcc complains about: a label can only be part of a statement and a declaration is not a statement. - */ -# define ZSTD_FALLTHROUGH ; __attribute__((__fallthrough__)) -# else -# define ZSTD_FALLTHROUGH -# endif -#endif - -/*-************************************************************** -* Alignment check -*****************************************************************/ - -/* this test was initially positioned in mem.h, - * but this file is removed (or replaced) for linux kernel - * so it's now hosted in compiler.h, - * which remains valid for both user & kernel spaces. - */ - -#ifndef ZSTD_ALIGNOF -# if defined(__GNUC__) || defined(_MSC_VER) -/* covers gcc, clang & MSVC */ -/* note : this section must come first, before C11, - * due to a limitation in the kernel source generator */ -# define ZSTD_ALIGNOF(T) __alignof(T) - -# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) -/* C11 support */ -# include -# define ZSTD_ALIGNOF(T) alignof(T) - -# else -/* No known support for alignof() - imperfect backup */ -# define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T)) - -# endif -#endif /* ZSTD_ALIGNOF */ - -/*-************************************************************** -* Sanitizer -*****************************************************************/ - -/** - * Zstd relies on pointer overflow in its decompressor. - * We add this attribute to functions that rely on pointer overflow. - */ -#ifndef ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -# if __has_attribute(no_sanitize) -# if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 8 - /* gcc < 8 only has signed-integer-overlow which triggers on pointer overflow */ -# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("signed-integer-overflow"))) -# else - /* older versions of clang [3.7, 5.0) will warn that pointer-overflow is ignored. */ -# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("pointer-overflow"))) -# endif -# else -# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -# endif -#endif - -/** - * Helper function to perform a wrapped pointer difference without triggering - * UBSAN. - * - * @returns lhs - rhs with wrapping - */ -MEM_STATIC -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs) -{ - return lhs - rhs; -} - -/** - * Helper function to perform a wrapped pointer add without triggering UBSAN. - * - * @return ptr + add with wrapping - */ -MEM_STATIC -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add) -{ - return ptr + add; -} - -/** - * Helper function to perform a wrapped pointer subtraction without triggering - * UBSAN. - * - * @return ptr - sub with wrapping - */ -MEM_STATIC -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -unsigned char const* ZSTD_wrappedPtrSub(unsigned char const* ptr, ptrdiff_t sub) -{ - return ptr - sub; -} - -/** - * Helper function to add to a pointer that works around C's undefined behavior - * of adding 0 to NULL. - * - * @returns `ptr + add` except it defines `NULL + 0 == NULL`. - */ -MEM_STATIC -unsigned char* ZSTD_maybeNullPtrAdd(unsigned char* ptr, ptrdiff_t add) -{ - return add > 0 ? ptr + add : ptr; -} - -/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an - * abundance of caution, disable our custom poisoning on mingw. */ -#ifdef __MINGW32__ -#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE -#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1 -#endif -#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE -#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1 -#endif -#endif - -#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) -/* Not all platforms that support msan provide sanitizers/msan_interface.h. - * We therefore declare the functions we need ourselves, rather than trying to - * include the header file... */ -#include /* size_t */ -#define ZSTD_DEPS_NEED_STDINT -#include "zstd_deps.h" /* intptr_t */ - -/* Make memory region fully initialized (without changing its contents). */ -void __msan_unpoison(const volatile void *a, size_t size); - -/* Make memory region fully uninitialized (without changing its contents). - This is a legacy interface that does not update origin information. Use - __msan_allocated_memory() instead. */ -void __msan_poison(const volatile void *a, size_t size); - -/* Returns the offset of the first (at least partially) poisoned byte in the - memory range, or -1 if the whole range is good. */ -intptr_t __msan_test_shadow(const volatile void *x, size_t size); - -/* Print shadow and origin for the memory range to stderr in a human-readable - format. */ -void __msan_print_shadow(const volatile void *x, size_t size); -#endif - -#if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE) -/* Not all platforms that support asan provide sanitizers/asan_interface.h. - * We therefore declare the functions we need ourselves, rather than trying to - * include the header file... */ -#include /* size_t */ - -/** - * Marks a memory region ([addr, addr+size)) as unaddressable. - * - * This memory must be previously allocated by your program. Instrumented - * code is forbidden from accessing addresses in this region until it is - * unpoisoned. This function is not guaranteed to poison the entire region - - * it could poison only a subregion of [addr, addr+size) due to ASan - * alignment restrictions. - * - * \note This function is not thread-safe because no two threads can poison or - * unpoison memory in the same memory region simultaneously. - * - * \param addr Start of memory region. - * \param size Size of memory region. */ -void __asan_poison_memory_region(void const volatile *addr, size_t size); - -/** - * Marks a memory region ([addr, addr+size)) as addressable. - * - * This memory must be previously allocated by your program. Accessing - * addresses in this region is allowed until this region is poisoned again. - * This function could unpoison a super-region of [addr, addr+size) due - * to ASan alignment restrictions. - * - * \note This function is not thread-safe because no two threads can - * poison or unpoison memory in the same memory region simultaneously. - * - * \param addr Start of memory region. - * \param size Size of memory region. */ -void __asan_unpoison_memory_region(void const volatile *addr, size_t size); -#endif - -#endif /* ZSTD_COMPILER_H */ diff --git a/zstandard_android/src/common/debug.h b/zstandard_android/src/common/debug.h deleted file mode 100644 index a16b69e..0000000 --- a/zstandard_android/src/common/debug.h +++ /dev/null @@ -1,116 +0,0 @@ -/* ****************************************************************** - * debug - * Part of FSE library - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. -****************************************************************** */ - - -/* - * The purpose of this header is to enable debug functions. - * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time, - * and DEBUG_STATIC_ASSERT() for compile-time. - * - * By default, DEBUGLEVEL==0, which means run-time debug is disabled. - * - * Level 1 enables assert() only. - * Starting level 2, traces can be generated and pushed to stderr. - * The higher the level, the more verbose the traces. - * - * It's possible to dynamically adjust level using variable g_debug_level, - * which is only declared if DEBUGLEVEL>=2, - * and is a global variable, not multi-thread protected (use with care) - */ - -#ifndef DEBUG_H_12987983217 -#define DEBUG_H_12987983217 - -#if defined (__cplusplus) -extern "C" { -#endif - - -/* static assert is triggered at compile time, leaving no runtime artefact. - * static assert only works with compile-time constants. - * Also, this variant can only be used inside a function. */ -#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1]) - - -/* DEBUGLEVEL is expected to be defined externally, - * typically through compiler command line. - * Value must be a number. */ -#ifndef DEBUGLEVEL -# define DEBUGLEVEL 0 -#endif - - -/* recommended values for DEBUGLEVEL : - * 0 : release mode, no debug, all run-time checks disabled - * 1 : enables assert() only, no display - * 2 : reserved, for currently active debug path - * 3 : events once per object lifetime (CCtx, CDict, etc.) - * 4 : events once per frame - * 5 : events once per block - * 6 : events once per sequence (verbose) - * 7+: events at every position (*very* verbose) - * - * It's generally inconvenient to output traces > 5. - * In which case, it's possible to selectively trigger high verbosity levels - * by modifying g_debug_level. - */ - -#if (DEBUGLEVEL>=1) -# define ZSTD_DEPS_NEED_ASSERT -# include "zstd_deps.h" -#else -# ifndef assert /* assert may be already defined, due to prior #include */ -# define assert(condition) ((void)0) /* disable assert (default) */ -# endif -#endif - -#if (DEBUGLEVEL>=2) -# define ZSTD_DEPS_NEED_IO -# include "zstd_deps.h" -extern int g_debuglevel; /* the variable is only declared, - it actually lives in debug.c, - and is shared by the whole process. - It's not thread-safe. - It's useful when enabling very verbose levels - on selective conditions (such as position in src) */ - -# define RAWLOG(l, ...) \ - do { \ - if (l<=g_debuglevel) { \ - ZSTD_DEBUG_PRINT(__VA_ARGS__); \ - } \ - } while (0) - -#define STRINGIFY(x) #x -#define TOSTRING(x) STRINGIFY(x) -#define LINE_AS_STRING TOSTRING(__LINE__) - -# define DEBUGLOG(l, ...) \ - do { \ - if (l<=g_debuglevel) { \ - ZSTD_DEBUG_PRINT(__FILE__ ":" LINE_AS_STRING ": " __VA_ARGS__); \ - ZSTD_DEBUG_PRINT(" \n"); \ - } \ - } while (0) -#else -# define RAWLOG(l, ...) do { } while (0) /* disabled */ -# define DEBUGLOG(l, ...) do { } while (0) /* disabled */ -#endif - - -#if defined (__cplusplus) -} -#endif - -#endif /* DEBUG_H_12987983217 */ diff --git a/zstandard_android/src/common/error_private.c b/zstandard_android/src/common/error_private.c deleted file mode 100644 index 075fc5e..0000000 --- a/zstandard_android/src/common/error_private.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -/* The purpose of this file is to have a single list of error strings embedded in binary */ - -#include "error_private.h" - -const char* ERR_getErrorString(ERR_enum code) -{ -#ifdef ZSTD_STRIP_ERROR_STRINGS - (void)code; - return "Error strings stripped"; -#else - static const char* const notErrorCode = "Unspecified error code"; - switch( code ) - { - case PREFIX(no_error): return "No error detected"; - case PREFIX(GENERIC): return "Error (generic)"; - case PREFIX(prefix_unknown): return "Unknown frame descriptor"; - case PREFIX(version_unsupported): return "Version not supported"; - case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; - case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; - case PREFIX(corruption_detected): return "Data corruption detected"; - case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; - case PREFIX(literals_headerWrong): return "Header of Literals' block doesn't respect format specification"; - case PREFIX(parameter_unsupported): return "Unsupported parameter"; - case PREFIX(parameter_combination_unsupported): return "Unsupported combination of parameters"; - case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; - case PREFIX(init_missing): return "Context should be init first"; - case PREFIX(memory_allocation): return "Allocation error : not enough memory"; - case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough"; - case PREFIX(stage_wrong): return "Operation not authorized at current processing stage"; - case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; - case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; - case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; - case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected"; - case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; - case PREFIX(dictionary_wrong): return "Dictionary mismatch"; - case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; - case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; - case PREFIX(srcSize_wrong): return "Src size is incorrect"; - case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; - case PREFIX(noForwardProgress_destFull): return "Operation made no progress over multiple calls, due to output buffer being full"; - case PREFIX(noForwardProgress_inputEmpty): return "Operation made no progress over multiple calls, due to input being empty"; - /* following error codes are not stable and may be removed or changed in a future version */ - case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; - case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; - case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong"; - case PREFIX(srcBuffer_wrong): return "Source buffer is wrong"; - case PREFIX(sequenceProducer_failed): return "Block-level external sequence producer returned an error code"; - case PREFIX(externalSequences_invalid): return "External sequences are not valid"; - case PREFIX(maxCode): - default: return notErrorCode; - } -#endif -} diff --git a/zstandard_android/src/common/error_private.h b/zstandard_android/src/common/error_private.h deleted file mode 100644 index 0156010..0000000 --- a/zstandard_android/src/common/error_private.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -/* Note : this module is expected to remain private, do not expose it */ - -#ifndef ERROR_H_MODULE -#define ERROR_H_MODULE - -#if defined (__cplusplus) -extern "C" { -#endif - - -/* **************************************** -* Dependencies -******************************************/ -#include "../zstd_errors.h" /* enum list */ -#include "compiler.h" -#include "debug.h" -#include "zstd_deps.h" /* size_t */ - - -/* **************************************** -* Compiler-specific -******************************************/ -#if defined(__GNUC__) -# define ERR_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define ERR_STATIC static inline -#elif defined(_MSC_VER) -# define ERR_STATIC static __inline -#else -# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif - - -/*-**************************************** -* Customization (error_public.h) -******************************************/ -typedef ZSTD_ErrorCode ERR_enum; -#define PREFIX(name) ZSTD_error_##name - - -/*-**************************************** -* Error codes handling -******************************************/ -#undef ERROR /* already defined on Visual Studio */ -#define ERROR(name) ZSTD_ERROR(name) -#define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) - -ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } - -ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } - -/* check and forward error code */ -#define CHECK_V_F(e, f) \ - size_t const e = f; \ - do { \ - if (ERR_isError(e)) \ - return e; \ - } while (0) -#define CHECK_F(f) do { CHECK_V_F(_var_err__, f); } while (0) - - -/*-**************************************** -* Error Strings -******************************************/ - -const char* ERR_getErrorString(ERR_enum code); /* error_private.c */ - -ERR_STATIC const char* ERR_getErrorName(size_t code) -{ - return ERR_getErrorString(ERR_getErrorCode(code)); -} - -/** - * Ignore: this is an internal helper. - * - * This is a helper function to help force C99-correctness during compilation. - * Under strict compilation modes, variadic macro arguments can't be empty. - * However, variadic function arguments can be. Using a function therefore lets - * us statically check that at least one (string) argument was passed, - * independent of the compilation flags. - */ -static INLINE_KEYWORD UNUSED_ATTR -void _force_has_format_string(const char *format, ...) { - (void)format; -} - -/** - * Ignore: this is an internal helper. - * - * We want to force this function invocation to be syntactically correct, but - * we don't want to force runtime evaluation of its arguments. - */ -#define _FORCE_HAS_FORMAT_STRING(...) \ - do { \ - if (0) { \ - _force_has_format_string(__VA_ARGS__); \ - } \ - } while (0) - -#define ERR_QUOTE(str) #str - -/** - * Return the specified error if the condition evaluates to true. - * - * In debug modes, prints additional information. - * In order to do that (particularly, printing the conditional that failed), - * this can't just wrap RETURN_ERROR(). - */ -#define RETURN_ERROR_IF(cond, err, ...) \ - do { \ - if (cond) { \ - RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ - __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } \ - } while (0) - -/** - * Unconditionally return the specified error. - * - * In debug modes, prints additional information. - */ -#define RETURN_ERROR(err, ...) \ - do { \ - RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ - __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } while(0) - -/** - * If the provided expression evaluates to an error code, returns that error code. - * - * In debug modes, prints additional information. - */ -#define FORWARD_IF_ERROR(err, ...) \ - do { \ - size_t const err_code = (err); \ - if (ERR_isError(err_code)) { \ - RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ - __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return err_code; \ - } \ - } while(0) - -#if defined (__cplusplus) -} -#endif - -#endif /* ERROR_H_MODULE */ diff --git a/zstandard_android/src/common/fse.h b/zstandard_android/src/common/fse.h deleted file mode 100644 index 2ae128e..0000000 --- a/zstandard_android/src/common/fse.h +++ /dev/null @@ -1,640 +0,0 @@ -/* ****************************************************************** - * FSE : Finite State Entropy codec - * Public Prototypes declaration - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. -****************************************************************** */ - -#if defined (__cplusplus) -extern "C" { -#endif - -#ifndef FSE_H -#define FSE_H - - -/*-***************************************** -* Dependencies -******************************************/ -#include "zstd_deps.h" /* size_t, ptrdiff_t */ - - -/*-***************************************** -* FSE_PUBLIC_API : control library symbols visibility -******************************************/ -#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) -# define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) -#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ -# define FSE_PUBLIC_API __declspec(dllexport) -#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) -# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define FSE_PUBLIC_API -#endif - -/*------ Version ------*/ -#define FSE_VERSION_MAJOR 0 -#define FSE_VERSION_MINOR 9 -#define FSE_VERSION_RELEASE 0 - -#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE -#define FSE_QUOTE(str) #str -#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) -#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) - -#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) -FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ - - -/*-***************************************** -* Tool functions -******************************************/ -FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ - -/* Error Management */ -FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ -FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ - - -/*-***************************************** -* FSE detailed API -******************************************/ -/*! -FSE_compress() does the following: -1. count symbol occurrence from source[] into table count[] (see hist.h) -2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) -3. save normalized counters to memory buffer using writeNCount() -4. build encoding table 'CTable' from normalized counters -5. encode the data stream using encoding table 'CTable' - -FSE_decompress() does the following: -1. read normalized counters with readNCount() -2. build decoding table 'DTable' from normalized counters -3. decode the data stream using decoding table 'DTable' - -The following API allows targeting specific sub-functions for advanced tasks. -For example, it's possible to compress several blocks using the same 'CTable', -or to save and provide normalized distribution using external method. -*/ - -/* *** COMPRESSION *** */ - -/*! FSE_optimalTableLog(): - dynamically downsize 'tableLog' when conditions are met. - It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. - @return : recommended tableLog (necessarily <= 'maxTableLog') */ -FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); - -/*! FSE_normalizeCount(): - normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) - 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). - useLowProbCount is a boolean parameter which trades off compressed size for - faster header decoding. When it is set to 1, the compressed data will be slightly - smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be - faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0 - is a good default, since header deserialization makes a big speed difference. - Otherwise, useLowProbCount=1 is a good default, since the speed difference is small. - @return : tableLog, - or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, - const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount); - -/*! FSE_NCountWriteBound(): - Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. - Typically useful for allocation purpose. */ -FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); - -/*! FSE_writeNCount(): - Compactly save 'normalizedCounter' into 'buffer'. - @return : size of the compressed table, - or an errorCode, which can be tested using FSE_isError(). */ -FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, - const short* normalizedCounter, - unsigned maxSymbolValue, unsigned tableLog); - -/*! Constructor and Destructor of FSE_CTable. - Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ -typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ - -/*! FSE_buildCTable(): - Builds `ct`, which must be already allocated, using FSE_createCTable(). - @return : 0, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); - -/*! FSE_compress_usingCTable(): - Compress `src` using `ct` into `dst` which must be already allocated. - @return : size of compressed data (<= `dstCapacity`), - or 0 if compressed data could not fit into `dst`, - or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); - -/*! -Tutorial : ----------- -The first step is to count all symbols. FSE_count() does this job very fast. -Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. -'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] -maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) -FSE_count() will return the number of occurrence of the most frequent symbol. -This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). - -The next step is to normalize the frequencies. -FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. -It also guarantees a minimum of 1 to any Symbol with frequency >= 1. -You can use 'tableLog'==0 to mean "use default tableLog value". -If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), -which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). - -The result of FSE_normalizeCount() will be saved into a table, -called 'normalizedCounter', which is a table of signed short. -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. -The return value is tableLog if everything proceeded as expected. -It is 0 if there is a single symbol within distribution. -If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). - -'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). -'buffer' must be already allocated. -For guaranteed success, buffer size must be at least FSE_headerBound(). -The result of the function is the number of bytes written into 'buffer'. -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). - -'normalizedCounter' can then be used to create the compression table 'CTable'. -The space required by 'CTable' must be already allocated, using FSE_createCTable(). -You can then use FSE_buildCTable() to fill 'CTable'. -If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). - -'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). -Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' -The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. -If it returns '0', compressed data could not fit into 'dst'. -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). -*/ - - -/* *** DECOMPRESSION *** */ - -/*! FSE_readNCount(): - Read compactly saved 'normalizedCounter' from 'rBuffer'. - @return : size read from 'rBuffer', - or an errorCode, which can be tested using FSE_isError(). - maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ -FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, - unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, - const void* rBuffer, size_t rBuffSize); - -/*! FSE_readNCount_bmi2(): - * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise. - */ -FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter, - unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, - const void* rBuffer, size_t rBuffSize, int bmi2); - -typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ - -/*! -Tutorial : ----------- -(Note : these functions only decompress FSE-compressed blocks. - If block is uncompressed, use memcpy() instead - If block is a single repeated byte, use memset() instead ) - -The first step is to obtain the normalized frequencies of symbols. -This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. -In practice, that means it's necessary to know 'maxSymbolValue' beforehand, -or size the table to handle worst case situations (typically 256). -FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. -The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. -Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. -If there is an error, the function will return an error code, which can be tested using FSE_isError(). - -The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. -This is performed by the function FSE_buildDTable(). -The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). -If there is an error, the function will return an error code, which can be tested using FSE_isError(). - -`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). -`cSrcSize` must be strictly correct, otherwise decompression will fail. -FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). -If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) -*/ - -#endif /* FSE_H */ - - -#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) -#define FSE_H_FSE_STATIC_LINKING_ONLY - -/* *** Dependency *** */ -#include "bitstream.h" - - -/* ***************************************** -* Static allocation -*******************************************/ -/* FSE buffer bounds */ -#define FSE_NCOUNTBOUND 512 -#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */) -#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ - -/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ -#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2)) -#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog))) - -/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */ -#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable)) -#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable)) - - -/* ***************************************** - * FSE advanced API - ***************************************** */ - -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); -/**< same as FSE_optimalTableLog(), which used `minus==2` */ - -size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); -/**< build a fake FSE_CTable, designed to compress always the same symbolValue */ - -/* FSE_buildCTable_wksp() : - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). - * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`. - * See FSE_buildCTable_wksp() for breakdown of workspace usage. - */ -#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */) -#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)) -size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); - -#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8) -#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned)) -FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); -/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */ - -#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1) -#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned)) -size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2); -/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`. - * Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */ - -typedef enum { - FSE_repeat_none, /**< Cannot use the previous table */ - FSE_repeat_check, /**< Can use the previous table but it must be checked */ - FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */ - } FSE_repeat; - -/* ***************************************** -* FSE symbol compression API -*******************************************/ -/*! - This API consists of small unitary functions, which highly benefit from being inlined. - Hence their body are included in next section. -*/ -typedef struct { - ptrdiff_t value; - const void* stateTable; - const void* symbolTT; - unsigned stateLog; -} FSE_CState_t; - -static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct); - -static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol); - -static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr); - -/**< -These functions are inner components of FSE_compress_usingCTable(). -They allow the creation of custom streams, mixing multiple tables and bit sources. - -A key property to keep in mind is that encoding and decoding are done **in reverse direction**. -So the first symbol you will encode is the last you will decode, like a LIFO stack. - -You will need a few variables to track your CStream. They are : - -FSE_CTable ct; // Provided by FSE_buildCTable() -BIT_CStream_t bitStream; // bitStream tracking structure -FSE_CState_t state; // State tracking structure (can have several) - - -The first thing to do is to init bitStream and state. - size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize); - FSE_initCState(&state, ct); - -Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError(); -You can then encode your input data, byte after byte. -FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time. -Remember decoding will be done in reverse direction. - FSE_encodeByte(&bitStream, &state, symbol); - -At any time, you can also add any bit sequence. -Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders - BIT_addBits(&bitStream, bitField, nbBits); - -The above methods don't commit data to memory, they just store it into local register, for speed. -Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). -Writing data to memory is a manual operation, performed by the flushBits function. - BIT_flushBits(&bitStream); - -Your last FSE encoding operation shall be to flush your last state value(s). - FSE_flushState(&bitStream, &state); - -Finally, you must close the bitStream. -The function returns the size of CStream in bytes. -If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible) -If there is an error, it returns an errorCode (which can be tested using FSE_isError()). - size_t size = BIT_closeCStream(&bitStream); -*/ - - -/* ***************************************** -* FSE symbol decompression API -*******************************************/ -typedef struct { - size_t state; - const void* table; /* precise table may vary, depending on U16 */ -} FSE_DState_t; - - -static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt); - -static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); - -static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr); - -/**< -Let's now decompose FSE_decompress_usingDTable() into its unitary components. -You will decode FSE-encoded symbols from the bitStream, -and also any other bitFields you put in, **in reverse order**. - -You will need a few variables to track your bitStream. They are : - -BIT_DStream_t DStream; // Stream context -FSE_DState_t DState; // State context. Multiple ones are possible -FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable() - -The first thing to do is to init the bitStream. - errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize); - -You should then retrieve your initial state(s) -(in reverse flushing order if you have several ones) : - errorCode = FSE_initDState(&DState, &DStream, DTablePtr); - -You can then decode your data, symbol after symbol. -For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'. -Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out). - unsigned char symbol = FSE_decodeSymbol(&DState, &DStream); - -You can retrieve any bitfield you eventually stored into the bitStream (in reverse order) -Note : maximum allowed nbBits is 25, for 32-bits compatibility - size_t bitField = BIT_readBits(&DStream, nbBits); - -All above operations only read from local register (which size depends on size_t). -Refueling the register from memory is manually performed by the reload method. - endSignal = FSE_reloadDStream(&DStream); - -BIT_reloadDStream() result tells if there is still some more data to read from DStream. -BIT_DStream_unfinished : there is still some data left into the DStream. -BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled. -BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed. -BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted. - -When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop, -to properly detect the exact end of stream. -After each decoded symbol, check if DStream is fully consumed using this simple test : - BIT_reloadDStream(&DStream) >= BIT_DStream_completed - -When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. -Checking if DStream has reached its end is performed by : - BIT_endOfDStream(&DStream); -Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. - FSE_endOfDState(&DState); -*/ - - -/* ***************************************** -* FSE unsafe API -*******************************************/ -static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); -/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ - - -/* ***************************************** -* Implementation of inlined functions -*******************************************/ -typedef struct { - int deltaFindState; - U32 deltaNbBits; -} FSE_symbolCompressionTransform; /* total 8 bytes */ - -MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) -{ - const void* ptr = ct; - const U16* u16ptr = (const U16*) ptr; - const U32 tableLog = MEM_read16(ptr); - statePtr->value = (ptrdiff_t)1<stateTable = u16ptr+2; - statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1); - statePtr->stateLog = tableLog; -} - - -/*! FSE_initCState2() : -* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) -* uses the smallest state value possible, saving the cost of this symbol */ -MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) -{ - FSE_initCState(statePtr, ct); - { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; - const U16* stateTable = (const U16*)(statePtr->stateTable); - U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); - statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; - statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; - } -} - -MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol) -{ - FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; - const U16* const stateTable = (const U16*)(statePtr->stateTable); - U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); - BIT_addBits(bitC, (size_t)statePtr->value, nbBitsOut); - statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; -} - -MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) -{ - BIT_addBits(bitC, (size_t)statePtr->value, statePtr->stateLog); - BIT_flushBits(bitC); -} - - -/* FSE_getMaxNbBits() : - * Approximate maximum cost of a symbol, in bits. - * Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) - * note 1 : assume symbolValue is valid (<= maxSymbolValue) - * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ -MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue) -{ - const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; - return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16; -} - -/* FSE_bitCost() : - * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) - * note 1 : assume symbolValue is valid (<= maxSymbolValue) - * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ -MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog) -{ - const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; - U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16; - U32 const threshold = (minNbBits+1) << 16; - assert(tableLog < 16); - assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */ - { U32 const tableSize = 1 << tableLog; - U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); - U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */ - U32 const bitMultiplier = 1 << accuracyLog; - assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold); - assert(normalizedDeltaFromThreshold <= bitMultiplier); - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold; - } -} - - -/* ====== Decompression ====== */ - -typedef struct { - U16 tableLog; - U16 fastMode; -} FSE_DTableHeader; /* sizeof U32 */ - -typedef struct -{ - unsigned short newState; - unsigned char symbol; - unsigned char nbBits; -} FSE_decode_t; /* size == U32 */ - -MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) -{ - const void* ptr = dt; - const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; - DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); - BIT_reloadDStream(bitD); - DStatePtr->table = dt + 1; -} - -MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr) -{ - FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; - return DInfo.symbol; -} - -MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) -{ - FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; - U32 const nbBits = DInfo.nbBits; - size_t const lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = DInfo.newState + lowBits; -} - -MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) -{ - FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; - U32 const nbBits = DInfo.nbBits; - BYTE const symbol = DInfo.symbol; - size_t const lowBits = BIT_readBits(bitD, nbBits); - - DStatePtr->state = DInfo.newState + lowBits; - return symbol; -} - -/*! FSE_decodeSymbolFast() : - unsafe, only works if no symbol has a probability > 50% */ -MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) -{ - FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; - U32 const nbBits = DInfo.nbBits; - BYTE const symbol = DInfo.symbol; - size_t const lowBits = BIT_readBitsFast(bitD, nbBits); - - DStatePtr->state = DInfo.newState + lowBits; - return symbol; -} - -MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) -{ - return DStatePtr->state == 0; -} - - - -#ifndef FSE_COMMONDEFS_ONLY - -/* ************************************************************** -* Tuning parameters -****************************************************************/ -/*!MEMORY_USAGE : -* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) -* Increasing memory usage improves compression ratio -* Reduced memory usage can improve speed, due to cache effect -* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ -#ifndef FSE_MAX_MEMORY_USAGE -# define FSE_MAX_MEMORY_USAGE 14 -#endif -#ifndef FSE_DEFAULT_MEMORY_USAGE -# define FSE_DEFAULT_MEMORY_USAGE 13 -#endif -#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE) -# error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE" -#endif - -/*!FSE_MAX_SYMBOL_VALUE : -* Maximum symbol value authorized. -* Required for proper stack allocation */ -#ifndef FSE_MAX_SYMBOL_VALUE -# define FSE_MAX_SYMBOL_VALUE 255 -#endif - -/* ************************************************************** -* template functions type & suffix -****************************************************************/ -#define FSE_FUNCTION_TYPE BYTE -#define FSE_FUNCTION_EXTENSION -#define FSE_DECODE_TYPE FSE_decode_t - - -#endif /* !FSE_COMMONDEFS_ONLY */ - - -/* *************************************************************** -* Constants -*****************************************************************/ -#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) -#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX -# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" -#endif - -#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3) - - -#endif /* FSE_STATIC_LINKING_ONLY */ - - -#if defined (__cplusplus) -} -#endif diff --git a/zstandard_android/src/common/huf.h b/zstandard_android/src/common/huf.h deleted file mode 100644 index 99bf85d..0000000 --- a/zstandard_android/src/common/huf.h +++ /dev/null @@ -1,286 +0,0 @@ -/* ****************************************************************** - * huff0 huffman codec, - * part of Finite State Entropy library - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * You can contact the author at : - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. -****************************************************************** */ - -#if defined (__cplusplus) -extern "C" { -#endif - -#ifndef HUF_H_298734234 -#define HUF_H_298734234 - -/* *** Dependencies *** */ -#include "zstd_deps.h" /* size_t */ -#include "mem.h" /* U32 */ -#define FSE_STATIC_LINKING_ONLY -#include "fse.h" - - -/* *** Tool functions *** */ -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ -size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ - -/* Error Management */ -unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ -const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ - - -#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) -#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) - -/* *** Constants *** */ -#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ -#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ -#define HUF_SYMBOLVALUE_MAX 255 - -#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ -#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) -# error "HUF_TABLELOG_MAX is too large !" -#endif - - -/* **************************************** -* Static allocation -******************************************/ -/* HUF buffer bounds */ -#define HUF_CTABLEBOUND 129 -#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ -#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ - -/* static allocation of HUF's Compression Table */ -/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */ -typedef size_t HUF_CElt; /* consider it an incomplete type */ -#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */ -#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t)) -#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ - HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */ - -/* static allocation of HUF's DTable */ -typedef U32 HUF_DTable; -#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) -#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \ - HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } -#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ - HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } - - -/* **************************************** -* Advanced decompression functions -******************************************/ - -/** - * Huffman flags bitset. - * For all flags, 0 is the default value. - */ -typedef enum { - /** - * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime. - * Otherwise: Ignored. - */ - HUF_flags_bmi2 = (1 << 0), - /** - * If set: Test possible table depths to find the one that produces the smallest header + encoded size. - * If unset: Use heuristic to find the table depth. - */ - HUF_flags_optimalDepth = (1 << 1), - /** - * If set: If the previous table can encode the input, always reuse the previous table. - * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output. - */ - HUF_flags_preferRepeat = (1 << 2), - /** - * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. - * If unset: Always histogram the entire input. - */ - HUF_flags_suspectUncompressible = (1 << 3), - /** - * If set: Don't use assembly implementations - * If unset: Allow using assembly implementations - */ - HUF_flags_disableAsm = (1 << 4), - /** - * If set: Don't use the fast decoding loop, always use the fallback decoding loop. - * If unset: Use the fast decoding loop when possible. - */ - HUF_flags_disableFast = (1 << 5) -} HUF_flags_e; - - -/* **************************************** - * HUF detailed API - * ****************************************/ -#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra - -/*! HUF_compress() does the following: - * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") - * 2. (optional) refine tableLog using HUF_optimalTableLog() - * 3. build Huffman table from count using HUF_buildCTable() - * 4. save Huffman table to memory buffer using HUF_writeCTable() - * 5. encode the data stream using HUF_compress4X_usingCTable() - * - * The following API allows targeting specific sub-functions for advanced tasks. - * For example, it's possible to compress several blocks using the same 'CTable', - * or to save and regenerate 'CTable' using external methods. - */ -unsigned HUF_minTableLog(unsigned symbolCardinality); -unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue); -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace, - size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */ -size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize); -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); -size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); -int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); - -typedef enum { - HUF_repeat_none, /**< Cannot use the previous table */ - HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ - HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ - } HUF_repeat; - -/** HUF_compress4X_repeat() : - * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. - * If it uses hufTable it does not modify hufTable or repeat. - * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. - * If preferRepeat then the old table will always be used if valid. - * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ -size_t HUF_compress4X_repeat(void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog, - void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int flags); - -/** HUF_buildCTable_wksp() : - * Same as HUF_buildCTable(), but using externally allocated scratch buffer. - * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. - */ -#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192) -#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) -size_t HUF_buildCTable_wksp (HUF_CElt* tree, - const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, - void* workSpace, size_t wkspSize); - -/*! HUF_readStats() : - * Read compact Huffman tree, saved by HUF_writeCTable(). - * `huffWeight` is destination buffer. - * @return : size read from `src` , or an error Code . - * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ -size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, - U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, - const void* src, size_t srcSize); - -/*! HUF_readStats_wksp() : - * Same as HUF_readStats() but takes an external workspace which must be - * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE. - * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. - */ -#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1) -#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned)) -size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, - U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, - const void* src, size_t srcSize, - void* workspace, size_t wkspSize, - int flags); - -/** HUF_readCTable() : - * Loading a CTable saved with HUF_writeCTable() */ -size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights); - -/** HUF_getNbBitsFromCTable() : - * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX - * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0 - * Note 2 : is not inlined, as HUF_CElt definition is private - */ -U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue); - -typedef struct { - BYTE tableLog; - BYTE maxSymbolValue; - BYTE unused[sizeof(size_t) - 2]; -} HUF_CTableHeader; - -/** HUF_readCTableHeader() : - * @returns The header from the CTable specifying the tableLog and the maxSymbolValue. - */ -HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable); - -/* - * HUF_decompress() does the following: - * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics - * 2. build Huffman table from save, using HUF_readDTableX?() - * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable() - */ - -/** HUF_selectDecoder() : - * Tells which decoder is likely to decode faster, - * based on a set of pre-computed metrics. - * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . - * Assumption : 0 < dstSize <= 128 KB */ -U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); - -/** - * The minimum workspace size for the `workSpace` used in - * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp(). - * - * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when - * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. - * Buffer overflow errors may potentially occur if code modifications result in - * a required workspace size greater than that specified in the following - * macro. - */ -#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9)) -#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) - - -/* ====================== */ -/* single stream variants */ -/* ====================== */ - -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); -/** HUF_compress1X_repeat() : - * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. - * If it uses hufTable it does not modify hufTable or repeat. - * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. - * If preferRepeat then the old table will always be used if valid. - * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ -size_t HUF_compress1X_repeat(void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog, - void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int flags); - -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */ -#endif - -/* BMI2 variants. - * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. - */ -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); -#endif -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); -#endif - -#endif /* HUF_H_298734234 */ - -#if defined (__cplusplus) -} -#endif diff --git a/zstandard_android/src/common/mem.h b/zstandard_android/src/common/mem.h deleted file mode 100644 index a02141c..0000000 --- a/zstandard_android/src/common/mem.h +++ /dev/null @@ -1,432 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef MEM_H_MODULE -#define MEM_H_MODULE - -#if defined (__cplusplus) -extern "C" { -#endif - -/*-**************************************** -* Dependencies -******************************************/ -#include /* size_t, ptrdiff_t */ -#include "compiler.h" /* __has_builtin */ -#include "debug.h" /* DEBUG_STATIC_ASSERT */ -#include "zstd_deps.h" /* ZSTD_memcpy */ - - -/*-**************************************** -* Compiler specifics -******************************************/ -#if defined(_MSC_VER) /* Visual Studio */ -# include /* _byteswap_ulong */ -# include /* _byteswap_* */ -#elif defined(__ICCARM__) -# include -#endif - -/*-************************************************************** -* Basic Types -*****************************************************************/ -#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# if defined(_AIX) -# include -# else -# include /* intptr_t */ -# endif - typedef uint8_t BYTE; - typedef uint8_t U8; - typedef int8_t S8; - typedef uint16_t U16; - typedef int16_t S16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; - typedef int64_t S64; -#else -# include -#if CHAR_BIT != 8 -# error "this implementation requires char to be exactly 8-bit type" -#endif - typedef unsigned char BYTE; - typedef unsigned char U8; - typedef signed char S8; -#if USHRT_MAX != 65535 -# error "this implementation requires short to be exactly 16-bit type" -#endif - typedef unsigned short U16; - typedef signed short S16; -#if UINT_MAX != 4294967295 -# error "this implementation requires int to be exactly 32-bit type" -#endif - typedef unsigned int U32; - typedef signed int S32; -/* note : there are no limits defined for long long type in C90. - * limits exist in C99, however, in such case, is preferred */ - typedef unsigned long long U64; - typedef signed long long S64; -#endif - - -/*-************************************************************** -* Memory I/O API -*****************************************************************/ -/*=== Static platform detection ===*/ -MEM_STATIC unsigned MEM_32bits(void); -MEM_STATIC unsigned MEM_64bits(void); -MEM_STATIC unsigned MEM_isLittleEndian(void); - -/*=== Native unaligned read/write ===*/ -MEM_STATIC U16 MEM_read16(const void* memPtr); -MEM_STATIC U32 MEM_read32(const void* memPtr); -MEM_STATIC U64 MEM_read64(const void* memPtr); -MEM_STATIC size_t MEM_readST(const void* memPtr); - -MEM_STATIC void MEM_write16(void* memPtr, U16 value); -MEM_STATIC void MEM_write32(void* memPtr, U32 value); -MEM_STATIC void MEM_write64(void* memPtr, U64 value); - -/*=== Little endian unaligned read/write ===*/ -MEM_STATIC U16 MEM_readLE16(const void* memPtr); -MEM_STATIC U32 MEM_readLE24(const void* memPtr); -MEM_STATIC U32 MEM_readLE32(const void* memPtr); -MEM_STATIC U64 MEM_readLE64(const void* memPtr); -MEM_STATIC size_t MEM_readLEST(const void* memPtr); - -MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val); -MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val); -MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32); -MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64); -MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val); - -/*=== Big endian unaligned read/write ===*/ -MEM_STATIC U32 MEM_readBE32(const void* memPtr); -MEM_STATIC U64 MEM_readBE64(const void* memPtr); -MEM_STATIC size_t MEM_readBEST(const void* memPtr); - -MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32); -MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64); -MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val); - -/*=== Byteswap ===*/ -MEM_STATIC U32 MEM_swap32(U32 in); -MEM_STATIC U64 MEM_swap64(U64 in); -MEM_STATIC size_t MEM_swapST(size_t in); - - -/*-************************************************************** -* Memory I/O Implementation -*****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory: - * Method 0 : always use `memcpy()`. Safe and portable. - * Method 1 : Use compiler extension to set unaligned access. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * Default : method 1 if supported, else method 0 - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# ifdef __GNUC__ -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } -MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } - -MEM_STATIC unsigned MEM_isLittleEndian(void) -{ -#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) - return 1; -#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) - return 0; -#elif defined(__clang__) && __LITTLE_ENDIAN__ - return 1; -#elif defined(__clang__) && __BIG_ENDIAN__ - return 0; -#elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86) - return 1; -#elif defined(__DMC__) && defined(_M_IX86) - return 1; -#elif defined(__IAR_SYSTEMS_ICC__) && __LITTLE_ENDIAN__ - return 1; -#else - const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ - return one.c[0]; -#endif -} - -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } -MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -typedef __attribute__((aligned(1))) U16 unalign16; -typedef __attribute__((aligned(1))) U32 unalign32; -typedef __attribute__((aligned(1))) U64 unalign64; -typedef __attribute__((aligned(1))) size_t unalignArch; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; } -MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - -MEM_STATIC U16 MEM_read16(const void* memPtr) -{ - U16 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; -} - -MEM_STATIC U32 MEM_read32(const void* memPtr) -{ - U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; -} - -MEM_STATIC U64 MEM_read64(const void* memPtr) -{ - U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; -} - -MEM_STATIC size_t MEM_readST(const void* memPtr) -{ - size_t val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; -} - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) -{ - ZSTD_memcpy(memPtr, &value, sizeof(value)); -} - -MEM_STATIC void MEM_write32(void* memPtr, U32 value) -{ - ZSTD_memcpy(memPtr, &value, sizeof(value)); -} - -MEM_STATIC void MEM_write64(void* memPtr, U64 value) -{ - ZSTD_memcpy(memPtr, &value, sizeof(value)); -} - -#endif /* MEM_FORCE_MEMORY_ACCESS */ - -MEM_STATIC U32 MEM_swap32_fallback(U32 in) -{ - return ((in << 24) & 0xff000000 ) | - ((in << 8) & 0x00ff0000 ) | - ((in >> 8) & 0x0000ff00 ) | - ((in >> 24) & 0x000000ff ); -} - -MEM_STATIC U32 MEM_swap32(U32 in) -{ -#if defined(_MSC_VER) /* Visual Studio */ - return _byteswap_ulong(in); -#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ - || (defined(__clang__) && __has_builtin(__builtin_bswap32)) - return __builtin_bswap32(in); -#elif defined(__ICCARM__) - return __REV(in); -#else - return MEM_swap32_fallback(in); -#endif -} - -MEM_STATIC U64 MEM_swap64_fallback(U64 in) -{ - return ((in << 56) & 0xff00000000000000ULL) | - ((in << 40) & 0x00ff000000000000ULL) | - ((in << 24) & 0x0000ff0000000000ULL) | - ((in << 8) & 0x000000ff00000000ULL) | - ((in >> 8) & 0x00000000ff000000ULL) | - ((in >> 24) & 0x0000000000ff0000ULL) | - ((in >> 40) & 0x000000000000ff00ULL) | - ((in >> 56) & 0x00000000000000ffULL); -} - -MEM_STATIC U64 MEM_swap64(U64 in) -{ -#if defined(_MSC_VER) /* Visual Studio */ - return _byteswap_uint64(in); -#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ - || (defined(__clang__) && __has_builtin(__builtin_bswap64)) - return __builtin_bswap64(in); -#else - return MEM_swap64_fallback(in); -#endif -} - -MEM_STATIC size_t MEM_swapST(size_t in) -{ - if (MEM_32bits()) - return (size_t)MEM_swap32((U32)in); - else - return (size_t)MEM_swap64((U64)in); -} - -/*=== Little endian r/w ===*/ - -MEM_STATIC U16 MEM_readLE16(const void* memPtr) -{ - if (MEM_isLittleEndian()) - return MEM_read16(memPtr); - else { - const BYTE* p = (const BYTE*)memPtr; - return (U16)(p[0] + (p[1]<<8)); - } -} - -MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) -{ - if (MEM_isLittleEndian()) { - MEM_write16(memPtr, val); - } else { - BYTE* p = (BYTE*)memPtr; - p[0] = (BYTE)val; - p[1] = (BYTE)(val>>8); - } -} - -MEM_STATIC U32 MEM_readLE24(const void* memPtr) -{ - return (U32)MEM_readLE16(memPtr) + ((U32)(((const BYTE*)memPtr)[2]) << 16); -} - -MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) -{ - MEM_writeLE16(memPtr, (U16)val); - ((BYTE*)memPtr)[2] = (BYTE)(val>>16); -} - -MEM_STATIC U32 MEM_readLE32(const void* memPtr) -{ - if (MEM_isLittleEndian()) - return MEM_read32(memPtr); - else - return MEM_swap32(MEM_read32(memPtr)); -} - -MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) -{ - if (MEM_isLittleEndian()) - MEM_write32(memPtr, val32); - else - MEM_write32(memPtr, MEM_swap32(val32)); -} - -MEM_STATIC U64 MEM_readLE64(const void* memPtr) -{ - if (MEM_isLittleEndian()) - return MEM_read64(memPtr); - else - return MEM_swap64(MEM_read64(memPtr)); -} - -MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) -{ - if (MEM_isLittleEndian()) - MEM_write64(memPtr, val64); - else - MEM_write64(memPtr, MEM_swap64(val64)); -} - -MEM_STATIC size_t MEM_readLEST(const void* memPtr) -{ - if (MEM_32bits()) - return (size_t)MEM_readLE32(memPtr); - else - return (size_t)MEM_readLE64(memPtr); -} - -MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) -{ - if (MEM_32bits()) - MEM_writeLE32(memPtr, (U32)val); - else - MEM_writeLE64(memPtr, (U64)val); -} - -/*=== Big endian r/w ===*/ - -MEM_STATIC U32 MEM_readBE32(const void* memPtr) -{ - if (MEM_isLittleEndian()) - return MEM_swap32(MEM_read32(memPtr)); - else - return MEM_read32(memPtr); -} - -MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) -{ - if (MEM_isLittleEndian()) - MEM_write32(memPtr, MEM_swap32(val32)); - else - MEM_write32(memPtr, val32); -} - -MEM_STATIC U64 MEM_readBE64(const void* memPtr) -{ - if (MEM_isLittleEndian()) - return MEM_swap64(MEM_read64(memPtr)); - else - return MEM_read64(memPtr); -} - -MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) -{ - if (MEM_isLittleEndian()) - MEM_write64(memPtr, MEM_swap64(val64)); - else - MEM_write64(memPtr, val64); -} - -MEM_STATIC size_t MEM_readBEST(const void* memPtr) -{ - if (MEM_32bits()) - return (size_t)MEM_readBE32(memPtr); - else - return (size_t)MEM_readBE64(memPtr); -} - -MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) -{ - if (MEM_32bits()) - MEM_writeBE32(memPtr, (U32)val); - else - MEM_writeBE64(memPtr, (U64)val); -} - -/* code only tested on 32 and 64 bits systems */ -MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } - - -#if defined (__cplusplus) -} -#endif - -#endif /* MEM_H_MODULE */ diff --git a/zstandard_android/src/common/pool.c b/zstandard_android/src/common/pool.c deleted file mode 100644 index 3adcefc..0000000 --- a/zstandard_android/src/common/pool.c +++ /dev/null @@ -1,371 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - -/* ====== Dependencies ======= */ -#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */ -#include "zstd_deps.h" /* size_t */ -#include "debug.h" /* assert */ -#include "pool.h" - -/* ====== Compiler specifics ====== */ -#if defined(_MSC_VER) -# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ -#endif - - -#ifdef ZSTD_MULTITHREAD - -#include "threading.h" /* pthread adaptation */ - -/* A job is a function and an opaque argument */ -typedef struct POOL_job_s { - POOL_function function; - void *opaque; -} POOL_job; - -struct POOL_ctx_s { - ZSTD_customMem customMem; - /* Keep track of the threads */ - ZSTD_pthread_t* threads; - size_t threadCapacity; - size_t threadLimit; - - /* The queue is a circular buffer */ - POOL_job *queue; - size_t queueHead; - size_t queueTail; - size_t queueSize; - - /* The number of threads working on jobs */ - size_t numThreadsBusy; - /* Indicates if the queue is empty */ - int queueEmpty; - - /* The mutex protects the queue */ - ZSTD_pthread_mutex_t queueMutex; - /* Condition variable for pushers to wait on when the queue is full */ - ZSTD_pthread_cond_t queuePushCond; - /* Condition variables for poppers to wait on when the queue is empty */ - ZSTD_pthread_cond_t queuePopCond; - /* Indicates if the queue is shutting down */ - int shutdown; -}; - -/* POOL_thread() : - * Work thread for the thread pool. - * Waits for jobs and executes them. - * @returns : NULL on failure else non-null. - */ -static void* POOL_thread(void* opaque) { - POOL_ctx* const ctx = (POOL_ctx*)opaque; - if (!ctx) { return NULL; } - for (;;) { - /* Lock the mutex and wait for a non-empty queue or until shutdown */ - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - - while ( ctx->queueEmpty - || (ctx->numThreadsBusy >= ctx->threadLimit) ) { - if (ctx->shutdown) { - /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit), - * a few threads will be shutdown while !queueEmpty, - * but enough threads will remain active to finish the queue */ - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - return opaque; - } - ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); - } - /* Pop a job off the queue */ - { POOL_job const job = ctx->queue[ctx->queueHead]; - ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; - ctx->numThreadsBusy++; - ctx->queueEmpty = (ctx->queueHead == ctx->queueTail); - /* Unlock the mutex, signal a pusher, and run the job */ - ZSTD_pthread_cond_signal(&ctx->queuePushCond); - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - - job.function(job.opaque); - - /* If the intended queue size was 0, signal after finishing job */ - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - ctx->numThreadsBusy--; - ZSTD_pthread_cond_signal(&ctx->queuePushCond); - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - } - } /* for (;;) */ - assert(0); /* Unreachable */ -} - -/* ZSTD_createThreadPool() : public access point */ -POOL_ctx* ZSTD_createThreadPool(size_t numThreads) { - return POOL_create (numThreads, 0); -} - -POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { - return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); -} - -POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, - ZSTD_customMem customMem) -{ - POOL_ctx* ctx; - /* Check parameters */ - if (!numThreads) { return NULL; } - /* Allocate the context and zero initialize */ - ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem); - if (!ctx) { return NULL; } - /* Initialize the job queue. - * It needs one extra space since one space is wasted to differentiate - * empty and full queues. - */ - ctx->queueSize = queueSize + 1; - ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem); - ctx->queueHead = 0; - ctx->queueTail = 0; - ctx->numThreadsBusy = 0; - ctx->queueEmpty = 1; - { - int error = 0; - error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL); - error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL); - error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL); - if (error) { POOL_free(ctx); return NULL; } - } - ctx->shutdown = 0; - /* Allocate space for the thread handles */ - ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); - ctx->threadCapacity = 0; - ctx->customMem = customMem; - /* Check for errors */ - if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } - /* Initialize the threads */ - { size_t i; - for (i = 0; i < numThreads; ++i) { - if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { - ctx->threadCapacity = i; - POOL_free(ctx); - return NULL; - } } - ctx->threadCapacity = numThreads; - ctx->threadLimit = numThreads; - } - return ctx; -} - -/*! POOL_join() : - Shutdown the queue, wake any sleeping threads, and join all of the threads. -*/ -static void POOL_join(POOL_ctx* ctx) { - /* Shut down the queue */ - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - ctx->shutdown = 1; - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - /* Wake up sleeping threads */ - ZSTD_pthread_cond_broadcast(&ctx->queuePushCond); - ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); - /* Join all of the threads */ - { size_t i; - for (i = 0; i < ctx->threadCapacity; ++i) { - ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */ - } } -} - -void POOL_free(POOL_ctx *ctx) { - if (!ctx) { return; } - POOL_join(ctx); - ZSTD_pthread_mutex_destroy(&ctx->queueMutex); - ZSTD_pthread_cond_destroy(&ctx->queuePushCond); - ZSTD_pthread_cond_destroy(&ctx->queuePopCond); - ZSTD_customFree(ctx->queue, ctx->customMem); - ZSTD_customFree(ctx->threads, ctx->customMem); - ZSTD_customFree(ctx, ctx->customMem); -} - -/*! POOL_joinJobs() : - * Waits for all queued jobs to finish executing. - */ -void POOL_joinJobs(POOL_ctx* ctx) { - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) { - ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); - } - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); -} - -void ZSTD_freeThreadPool (ZSTD_threadPool* pool) { - POOL_free (pool); -} - -size_t POOL_sizeof(const POOL_ctx* ctx) { - if (ctx==NULL) return 0; /* supports sizeof NULL */ - return sizeof(*ctx) - + ctx->queueSize * sizeof(POOL_job) - + ctx->threadCapacity * sizeof(ZSTD_pthread_t); -} - - -/* @return : 0 on success, 1 on error */ -static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads) -{ - if (numThreads <= ctx->threadCapacity) { - if (!numThreads) return 1; - ctx->threadLimit = numThreads; - return 0; - } - /* numThreads > threadCapacity */ - { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); - if (!threadPool) return 1; - /* replace existing thread pool */ - ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(ZSTD_pthread_t)); - ZSTD_customFree(ctx->threads, ctx->customMem); - ctx->threads = threadPool; - /* Initialize additional threads */ - { size_t threadId; - for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) { - if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) { - ctx->threadCapacity = threadId; - return 1; - } } - } } - /* successfully expanded */ - ctx->threadCapacity = numThreads; - ctx->threadLimit = numThreads; - return 0; -} - -/* @return : 0 on success, 1 on error */ -int POOL_resize(POOL_ctx* ctx, size_t numThreads) -{ - int result; - if (ctx==NULL) return 1; - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - result = POOL_resize_internal(ctx, numThreads); - ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - return result; -} - -/** - * Returns 1 if the queue is full and 0 otherwise. - * - * When queueSize is 1 (pool was created with an intended queueSize of 0), - * then a queue is empty if there is a thread free _and_ no job is waiting. - */ -static int isQueueFull(POOL_ctx const* ctx) { - if (ctx->queueSize > 1) { - return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize); - } else { - return (ctx->numThreadsBusy == ctx->threadLimit) || - !ctx->queueEmpty; - } -} - - -static void -POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) -{ - POOL_job job; - job.function = function; - job.opaque = opaque; - assert(ctx != NULL); - if (ctx->shutdown) return; - - ctx->queueEmpty = 0; - ctx->queue[ctx->queueTail] = job; - ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize; - ZSTD_pthread_cond_signal(&ctx->queuePopCond); -} - -void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) -{ - assert(ctx != NULL); - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - /* Wait until there is space in the queue for the new job */ - while (isQueueFull(ctx) && (!ctx->shutdown)) { - ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); - } - POOL_add_internal(ctx, function, opaque); - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); -} - - -int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) -{ - assert(ctx != NULL); - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - if (isQueueFull(ctx)) { - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - return 0; - } - POOL_add_internal(ctx, function, opaque); - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - return 1; -} - - -#else /* ZSTD_MULTITHREAD not defined */ - -/* ========================== */ -/* No multi-threading support */ -/* ========================== */ - - -/* We don't need any data, but if it is empty, malloc() might return NULL. */ -struct POOL_ctx_s { - int dummy; -}; -static POOL_ctx g_poolCtx; - -POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { - return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); -} - -POOL_ctx* -POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) -{ - (void)numThreads; - (void)queueSize; - (void)customMem; - return &g_poolCtx; -} - -void POOL_free(POOL_ctx* ctx) { - assert(!ctx || ctx == &g_poolCtx); - (void)ctx; -} - -void POOL_joinJobs(POOL_ctx* ctx){ - assert(!ctx || ctx == &g_poolCtx); - (void)ctx; -} - -int POOL_resize(POOL_ctx* ctx, size_t numThreads) { - (void)ctx; (void)numThreads; - return 0; -} - -void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { - (void)ctx; - function(opaque); -} - -int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { - (void)ctx; - function(opaque); - return 1; -} - -size_t POOL_sizeof(const POOL_ctx* ctx) { - if (ctx==NULL) return 0; /* supports sizeof NULL */ - assert(ctx == &g_poolCtx); - return sizeof(*ctx); -} - -#endif /* ZSTD_MULTITHREAD */ diff --git a/zstandard_android/src/common/pool.h b/zstandard_android/src/common/pool.h deleted file mode 100644 index cca4de7..0000000 --- a/zstandard_android/src/common/pool.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef POOL_H -#define POOL_H - -#if defined (__cplusplus) -extern "C" { -#endif - - -#include "zstd_deps.h" -#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ -#include "../zstd.h" - -typedef struct POOL_ctx_s POOL_ctx; - -/*! POOL_create() : - * Create a thread pool with at most `numThreads` threads. - * `numThreads` must be at least 1. - * The maximum number of queued jobs before blocking is `queueSize`. - * @return : POOL_ctx pointer on success, else NULL. -*/ -POOL_ctx* POOL_create(size_t numThreads, size_t queueSize); - -POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, - ZSTD_customMem customMem); - -/*! POOL_free() : - * Free a thread pool returned by POOL_create(). - */ -void POOL_free(POOL_ctx* ctx); - - -/*! POOL_joinJobs() : - * Waits for all queued jobs to finish executing. - */ -void POOL_joinJobs(POOL_ctx* ctx); - -/*! POOL_resize() : - * Expands or shrinks pool's number of threads. - * This is more efficient than releasing + creating a new context, - * since it tries to preserve and reuse existing threads. - * `numThreads` must be at least 1. - * @return : 0 when resize was successful, - * !0 (typically 1) if there is an error. - * note : only numThreads can be resized, queueSize remains unchanged. - */ -int POOL_resize(POOL_ctx* ctx, size_t numThreads); - -/*! POOL_sizeof() : - * @return threadpool memory usage - * note : compatible with NULL (returns 0 in this case) - */ -size_t POOL_sizeof(const POOL_ctx* ctx); - -/*! POOL_function : - * The function type that can be added to a thread pool. - */ -typedef void (*POOL_function)(void*); - -/*! POOL_add() : - * Add the job `function(opaque)` to the thread pool. `ctx` must be valid. - * Possibly blocks until there is room in the queue. - * Note : The function may be executed asynchronously, - * therefore, `opaque` must live until function has been completed. - */ -void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); - - -/*! POOL_tryAdd() : - * Add the job `function(opaque)` to thread pool _if_ a queue slot is available. - * Returns immediately even if not (does not block). - * @return : 1 if successful, 0 if not. - */ -int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); - - -#if defined (__cplusplus) -} -#endif - -#endif diff --git a/zstandard_android/src/common/portability_macros.h b/zstandard_android/src/common/portability_macros.h deleted file mode 100644 index b1d9765..0000000 --- a/zstandard_android/src/common/portability_macros.h +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_PORTABILITY_MACROS_H -#define ZSTD_PORTABILITY_MACROS_H - -/** - * This header file contains macro definitions to support portability. - * This header is shared between C and ASM code, so it MUST only - * contain macro definitions. It MUST not contain any C code. - * - * This header ONLY defines macros to detect platforms/feature support. - * - */ - - -/* compat. with non-clang compilers */ -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif - -/* compat. with non-clang compilers */ -#ifndef __has_builtin -# define __has_builtin(x) 0 -#endif - -/* compat. with non-clang compilers */ -#ifndef __has_feature -# define __has_feature(x) 0 -#endif - -/* detects whether we are being compiled under msan */ -#ifndef ZSTD_MEMORY_SANITIZER -# if __has_feature(memory_sanitizer) -# define ZSTD_MEMORY_SANITIZER 1 -# else -# define ZSTD_MEMORY_SANITIZER 0 -# endif -#endif - -/* detects whether we are being compiled under asan */ -#ifndef ZSTD_ADDRESS_SANITIZER -# if __has_feature(address_sanitizer) -# define ZSTD_ADDRESS_SANITIZER 1 -# elif defined(__SANITIZE_ADDRESS__) -# define ZSTD_ADDRESS_SANITIZER 1 -# else -# define ZSTD_ADDRESS_SANITIZER 0 -# endif -#endif - -/* detects whether we are being compiled under dfsan */ -#ifndef ZSTD_DATAFLOW_SANITIZER -# if __has_feature(dataflow_sanitizer) -# define ZSTD_DATAFLOW_SANITIZER 1 -# else -# define ZSTD_DATAFLOW_SANITIZER 0 -# endif -#endif - -/* Mark the internal assembly functions as hidden */ -#ifdef __ELF__ -# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func -#elif defined(__APPLE__) -# define ZSTD_HIDE_ASM_FUNCTION(func) .private_extern func -#else -# define ZSTD_HIDE_ASM_FUNCTION(func) -#endif - -/* Enable runtime BMI2 dispatch based on the CPU. - * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. - */ -#ifndef DYNAMIC_BMI2 - #if ((defined(__clang__) && __has_attribute(__target__)) \ - || (defined(__GNUC__) \ - && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ - && (defined(__x86_64__) || defined(_M_X64)) \ - && !defined(__BMI2__) - # define DYNAMIC_BMI2 1 - #else - # define DYNAMIC_BMI2 0 - #endif -#endif - -/** - * Only enable assembly for GNU C compatible compilers, - * because other platforms may not support GAS assembly syntax. - * - * Only enable assembly for Linux / MacOS, other platforms may - * work, but they haven't been tested. This could likely be - * extended to BSD systems. - * - * Disable assembly when MSAN is enabled, because MSAN requires - * 100% of code to be instrumented to work. - */ -#if defined(__GNUC__) -# if defined(__linux__) || defined(__linux) || defined(__APPLE__) -# if ZSTD_MEMORY_SANITIZER -# define ZSTD_ASM_SUPPORTED 0 -# elif ZSTD_DATAFLOW_SANITIZER -# define ZSTD_ASM_SUPPORTED 0 -# else -# define ZSTD_ASM_SUPPORTED 1 -# endif -# else -# define ZSTD_ASM_SUPPORTED 0 -# endif -#else -# define ZSTD_ASM_SUPPORTED 0 -#endif - -/** - * Determines whether we should enable assembly for x86-64 - * with BMI2. - * - * Enable if all of the following conditions hold: - * - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM - * - Assembly is supported - * - We are compiling for x86-64 and either: - * - DYNAMIC_BMI2 is enabled - * - BMI2 is supported at compile time - */ -#if !defined(ZSTD_DISABLE_ASM) && \ - ZSTD_ASM_SUPPORTED && \ - defined(__x86_64__) && \ - (DYNAMIC_BMI2 || defined(__BMI2__)) -# define ZSTD_ENABLE_ASM_X86_64_BMI2 1 -#else -# define ZSTD_ENABLE_ASM_X86_64_BMI2 0 -#endif - -/* - * For x86 ELF targets, add .note.gnu.property section for Intel CET in - * assembly sources when CET is enabled. - * - * Additionally, any function that may be called indirectly must begin - * with ZSTD_CET_ENDBRANCH. - */ -#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \ - && defined(__has_include) -# if __has_include() -# include -# define ZSTD_CET_ENDBRANCH _CET_ENDBR -# endif -#endif - -#ifndef ZSTD_CET_ENDBRANCH -# define ZSTD_CET_ENDBRANCH -#endif - -#endif /* ZSTD_PORTABILITY_MACROS_H */ diff --git a/zstandard_android/src/common/threading.c b/zstandard_android/src/common/threading.c deleted file mode 100644 index 25bb8b9..0000000 --- a/zstandard_android/src/common/threading.c +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Copyright (c) 2016 Tino Reichardt - * All rights reserved. - * - * You can contact the author at: - * - zstdmt source repository: https://github.com/mcmilk/zstdmt - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -/** - * This file will hold wrapper for systems, which do not support pthreads - */ - -#include "threading.h" - -/* create fake symbol to avoid empty translation unit warning */ -int g_ZSTD_threading_useless_symbol; - -#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) - -/** - * Windows minimalist Pthread Wrapper - */ - - -/* === Dependencies === */ -#include -#include - - -/* === Implementation === */ - -typedef struct { - void* (*start_routine)(void*); - void* arg; - int initialized; - ZSTD_pthread_cond_t initialized_cond; - ZSTD_pthread_mutex_t initialized_mutex; -} ZSTD_thread_params_t; - -static unsigned __stdcall worker(void *arg) -{ - void* (*start_routine)(void*); - void* thread_arg; - - /* Initialized thread_arg and start_routine and signal main thread that we don't need it - * to wait any longer. - */ - { - ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg; - thread_arg = thread_param->arg; - start_routine = thread_param->start_routine; - - /* Signal main thread that we are running and do not depend on its memory anymore */ - ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex); - thread_param->initialized = 1; - ZSTD_pthread_cond_signal(&thread_param->initialized_cond); - ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex); - } - - start_routine(thread_arg); - - return 0; -} - -int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, - void* (*start_routine) (void*), void* arg) -{ - ZSTD_thread_params_t thread_param; - (void)unused; - - if (thread==NULL) return -1; - *thread = NULL; - - thread_param.start_routine = start_routine; - thread_param.arg = arg; - thread_param.initialized = 0; - - /* Setup thread initialization synchronization */ - if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) { - /* Should never happen on Windows */ - return -1; - } - if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) { - /* Should never happen on Windows */ - ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); - return -1; - } - - /* Spawn thread */ - *thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL); - if (*thread==NULL) { - ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); - ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); - return errno; - } - - /* Wait for thread to be initialized */ - ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex); - while(!thread_param.initialized) { - ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex); - } - ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex); - ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); - ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); - - return 0; -} - -int ZSTD_pthread_join(ZSTD_pthread_t thread) -{ - DWORD result; - - if (!thread) return 0; - - result = WaitForSingleObject(thread, INFINITE); - CloseHandle(thread); - - switch (result) { - case WAIT_OBJECT_0: - return 0; - case WAIT_ABANDONED: - return EINVAL; - default: - return GetLastError(); - } -} - -#endif /* ZSTD_MULTITHREAD */ - -#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32) - -#define ZSTD_DEPS_NEED_MALLOC -#include "zstd_deps.h" - -int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr) -{ - assert(mutex != NULL); - *mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t)); - if (!*mutex) - return 1; - return pthread_mutex_init(*mutex, attr); -} - -int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex) -{ - assert(mutex != NULL); - if (!*mutex) - return 0; - { - int const ret = pthread_mutex_destroy(*mutex); - ZSTD_free(*mutex); - return ret; - } -} - -int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr) -{ - assert(cond != NULL); - *cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t)); - if (!*cond) - return 1; - return pthread_cond_init(*cond, attr); -} - -int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond) -{ - assert(cond != NULL); - if (!*cond) - return 0; - { - int const ret = pthread_cond_destroy(*cond); - ZSTD_free(*cond); - return ret; - } -} - -#endif diff --git a/zstandard_android/src/common/threading.h b/zstandard_android/src/common/threading.h deleted file mode 100644 index fb5c1c8..0000000 --- a/zstandard_android/src/common/threading.h +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Copyright (c) 2016 Tino Reichardt - * All rights reserved. - * - * You can contact the author at: - * - zstdmt source repository: https://github.com/mcmilk/zstdmt - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef THREADING_H_938743 -#define THREADING_H_938743 - -#include "debug.h" - -#if defined (__cplusplus) -extern "C" { -#endif - -#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) - -/** - * Windows minimalist Pthread Wrapper - */ -#ifdef WINVER -# undef WINVER -#endif -#define WINVER 0x0600 - -#ifdef _WIN32_WINNT -# undef _WIN32_WINNT -#endif -#define _WIN32_WINNT 0x0600 - -#ifndef WIN32_LEAN_AND_MEAN -# define WIN32_LEAN_AND_MEAN -#endif - -#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ -#include -#undef ERROR -#define ERROR(name) ZSTD_ERROR(name) - - -/* mutex */ -#define ZSTD_pthread_mutex_t CRITICAL_SECTION -#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0) -#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a)) -#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a)) -#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a)) - -/* condition variable */ -#define ZSTD_pthread_cond_t CONDITION_VARIABLE -#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0) -#define ZSTD_pthread_cond_destroy(a) ((void)(a)) -#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) -#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a)) -#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) - -/* ZSTD_pthread_create() and ZSTD_pthread_join() */ -typedef HANDLE ZSTD_pthread_t; - -int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, - void* (*start_routine) (void*), void* arg); - -int ZSTD_pthread_join(ZSTD_pthread_t thread); - -/** - * add here more wrappers as required - */ - - -#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ -/* === POSIX Systems === */ -# include - -#if DEBUGLEVEL < 1 - -#define ZSTD_pthread_mutex_t pthread_mutex_t -#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) -#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) -#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a)) -#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a)) - -#define ZSTD_pthread_cond_t pthread_cond_t -#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b)) -#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a)) -#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b)) -#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a)) -#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a)) - -#define ZSTD_pthread_t pthread_t -#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a) pthread_join((a),NULL) - -#else /* DEBUGLEVEL >= 1 */ - -/* Debug implementation of threading. - * In this implementation we use pointers for mutexes and condition variables. - * This way, if we forget to init/destroy them the program will crash or ASAN - * will report leaks. - */ - -#define ZSTD_pthread_mutex_t pthread_mutex_t* -int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr); -int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex); -#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a)) -#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a)) - -#define ZSTD_pthread_cond_t pthread_cond_t* -int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr); -int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond); -#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b)) -#define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a)) -#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a)) - -#define ZSTD_pthread_t pthread_t -#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a) pthread_join((a),NULL) - -#endif - -#else /* ZSTD_MULTITHREAD not defined */ -/* No multithreading support */ - -typedef int ZSTD_pthread_mutex_t; -#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0) -#define ZSTD_pthread_mutex_destroy(a) ((void)(a)) -#define ZSTD_pthread_mutex_lock(a) ((void)(a)) -#define ZSTD_pthread_mutex_unlock(a) ((void)(a)) - -typedef int ZSTD_pthread_cond_t; -#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0) -#define ZSTD_pthread_cond_destroy(a) ((void)(a)) -#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b)) -#define ZSTD_pthread_cond_signal(a) ((void)(a)) -#define ZSTD_pthread_cond_broadcast(a) ((void)(a)) - -/* do not use ZSTD_pthread_t */ - -#endif /* ZSTD_MULTITHREAD */ - -#if defined (__cplusplus) -} -#endif - -#endif /* THREADING_H_938743 */ diff --git a/zstandard_android/src/common/zstd_common.c b/zstandard_android/src/common/zstd_common.c deleted file mode 100644 index 3f04c22..0000000 --- a/zstandard_android/src/common/zstd_common.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - - -/*-************************************* -* Dependencies -***************************************/ -#define ZSTD_DEPS_NEED_MALLOC -#include "error_private.h" -#include "zstd_internal.h" - - -/*-**************************************** -* Version -******************************************/ -unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; } - -const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; } - - -/*-**************************************** -* ZSTD Error Management -******************************************/ -#undef ZSTD_isError /* defined within zstd_internal.h */ -/*! ZSTD_isError() : - * tells if a return value is an error code - * symbol is required for external callers */ -unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } - -/*! ZSTD_getErrorName() : - * provides error code string from function result (useful for debugging) */ -const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } - -/*! ZSTD_getError() : - * convert a `size_t` function result into a proper ZSTD_errorCode enum */ -ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } - -/*! ZSTD_getErrorString() : - * provides error code string from enum */ -const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } diff --git a/zstandard_android/src/common/zstd_internal.h b/zstandard_android/src/common/zstd_internal.h deleted file mode 100644 index ecb9cfb..0000000 --- a/zstandard_android/src/common/zstd_internal.h +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_CCOMMON_H_MODULE -#define ZSTD_CCOMMON_H_MODULE - -/* this module contains definitions which must be identical - * across compression, decompression and dictBuilder. - * It also contains a few functions useful to at least 2 of them - * and which benefit from being inlined */ - -/*-************************************* -* Dependencies -***************************************/ -#include "compiler.h" -#include "cpu.h" -#include "mem.h" -#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */ -#include "error_private.h" -#define ZSTD_STATIC_LINKING_ONLY -#include "../zstd.h" -#define FSE_STATIC_LINKING_ONLY -#include "fse.h" -#include "huf.h" -#ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ -#endif -#include "xxhash.h" /* XXH_reset, update, digest */ -#ifndef ZSTD_NO_TRACE -# include "zstd_trace.h" -#else -# define ZSTD_TRACE 0 -#endif - -#if defined (__cplusplus) -extern "C" { -#endif - -/* ---- static assert (debug) --- */ -#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) -#define ZSTD_isError ERR_isError /* for inlining */ -#define FSE_isError ERR_isError -#define HUF_isError ERR_isError - - -/*-************************************* -* shared macros -***************************************/ -#undef MIN -#undef MAX -#define MIN(a,b) ((a)<(b) ? (a) : (b)) -#define MAX(a,b) ((a)>(b) ? (a) : (b)) -#define BOUNDED(min,val,max) (MAX(min,MIN(val,max))) - - -/*-************************************* -* Common constants -***************************************/ -#define ZSTD_OPT_NUM (1<<12) - -#define ZSTD_REP_NUM 3 /* number of repcodes */ -static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; - -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) - -#define BIT7 128 -#define BIT6 64 -#define BIT5 32 -#define BIT4 16 -#define BIT1 2 -#define BIT0 1 - -#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 -static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; -static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; - -#define ZSTD_FRAMEIDSIZE 4 /* magic number size */ - -#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ -static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; -typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; - -#define ZSTD_FRAMECHECKSUMSIZE 4 - -#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ -#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */ -#define MIN_LITERALS_FOR_4_STREAMS 6 - -typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; - -#define LONGNBSEQ 0x7F00 - -#define MINMATCH 3 - -#define Litbits 8 -#define LitHufLog 11 -#define MaxLit ((1<= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); - /* Separate out the first COPY16() call because the copy length is - * almost certain to be short, so the branches have different - * probabilities. Since it is almost certain to be short, only do - * one COPY16() in the first call. Then, do two calls per loop since - * at that point it is more likely to have a high trip count. - */ - ZSTD_copy16(op, ip); - if (16 >= length) return; - op += 16; - ip += 16; - do { - COPY16(op, ip); - COPY16(op, ip); - } - while (op < oend); - } -} - -MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - size_t const length = MIN(dstCapacity, srcSize); - if (length > 0) { - ZSTD_memcpy(dst, src, length); - } - return length; -} - -/* define "workspace is too large" as this number of times larger than needed */ -#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 - -/* when workspace is continuously too large - * during at least this number of times, - * context's memory usage is considered wasteful, - * because it's sized to handle a worst case scenario which rarely happens. - * In which case, resize it down to free some memory */ -#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 - -/* Controls whether the input/output buffer is buffered or stable. */ -typedef enum { - ZSTD_bm_buffered = 0, /* Buffer the input/output */ - ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */ -} ZSTD_bufferMode_e; - - -/*-******************************************* -* Private declarations -*********************************************/ -typedef struct seqDef_s { - U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ - U16 litLength; - U16 mlBase; /* mlBase == matchLength - MINMATCH */ -} seqDef; - -/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */ -typedef enum { - ZSTD_llt_none = 0, /* no longLengthType */ - ZSTD_llt_literalLength = 1, /* represents a long literal */ - ZSTD_llt_matchLength = 2 /* represents a long match */ -} ZSTD_longLengthType_e; - -typedef struct { - seqDef* sequencesStart; - seqDef* sequences; /* ptr to end of sequences */ - BYTE* litStart; - BYTE* lit; /* ptr to end of literals */ - BYTE* llCode; - BYTE* mlCode; - BYTE* ofCode; - size_t maxNbSeq; - size_t maxNbLit; - - /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength - * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment - * the existing value of the litLength or matchLength by 0x10000. - */ - ZSTD_longLengthType_e longLengthType; - U32 longLengthPos; /* Index of the sequence to apply long length modification to */ -} seqStore_t; - -typedef struct { - U32 litLength; - U32 matchLength; -} ZSTD_sequenceLength; - -/** - * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences - * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. - */ -MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq) -{ - ZSTD_sequenceLength seqLen; - seqLen.litLength = seq->litLength; - seqLen.matchLength = seq->mlBase + MINMATCH; - if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { - if (seqStore->longLengthType == ZSTD_llt_literalLength) { - seqLen.litLength += 0x10000; - } - if (seqStore->longLengthType == ZSTD_llt_matchLength) { - seqLen.matchLength += 0x10000; - } - } - return seqLen; -} - -/** - * Contains the compressed frame size and an upper-bound for the decompressed frame size. - * Note: before using `compressedSize`, check for errors using ZSTD_isError(). - * similarly, before using `decompressedBound`, check for errors using: - * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` - */ -typedef struct { - size_t nbBlocks; - size_t compressedSize; - unsigned long long decompressedBound; -} ZSTD_frameSizeInfo; /* decompress & legacy */ - -const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ -int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ - - -/* ZSTD_invalidateRepCodes() : - * ensures next compression will not use repcodes from previous block. - * Note : only works with regular variant; - * do not use with extDict variant ! */ -void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */ - - -typedef struct { - blockType_e blockType; - U32 lastBlock; - U32 origSize; -} blockProperties_t; /* declared here for decompress and fullbench */ - -/*! ZSTD_getcBlockSize() : - * Provides the size of compressed block from block header `src` */ -/* Used by: decompress, fullbench */ -size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, - blockProperties_t* bpPtr); - -/*! ZSTD_decodeSeqHeaders() : - * decode sequence header from src */ -/* Used by: zstd_decompress_block, fullbench */ -size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, - const void* src, size_t srcSize); - -/** - * @returns true iff the CPU supports dynamic BMI2 dispatch. - */ -MEM_STATIC int ZSTD_cpuSupportsBmi2(void) -{ - ZSTD_cpuid_t cpuid = ZSTD_cpuid(); - return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); -} - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/zstandard_android/src/common/zstd_trace.h b/zstandard_android/src/common/zstd_trace.h deleted file mode 100644 index 173d63f..0000000 --- a/zstandard_android/src/common/zstd_trace.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_TRACE_H -#define ZSTD_TRACE_H - -#if defined (__cplusplus) -extern "C" { -#endif - -#include - -/* weak symbol support - * For now, enable conservatively: - * - Only GNUC - * - Only ELF - * - Only x86-64, i386, aarch64 and risc-v. - * Also, explicitly disable on platforms known not to work so they aren't - * forgotten in the future. - */ -#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \ - defined(__GNUC__) && defined(__ELF__) && \ - (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \ - defined(_M_IX86) || defined(__aarch64__) || defined(__riscv)) && \ - !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \ - !defined(__CYGWIN__) && !defined(_AIX) -# define ZSTD_HAVE_WEAK_SYMBOLS 1 -#else -# define ZSTD_HAVE_WEAK_SYMBOLS 0 -#endif -#if ZSTD_HAVE_WEAK_SYMBOLS -# define ZSTD_WEAK_ATTR __attribute__((__weak__)) -#else -# define ZSTD_WEAK_ATTR -#endif - -/* Only enable tracing when weak symbols are available. */ -#ifndef ZSTD_TRACE -# define ZSTD_TRACE ZSTD_HAVE_WEAK_SYMBOLS -#endif - -#if ZSTD_TRACE - -struct ZSTD_CCtx_s; -struct ZSTD_DCtx_s; -struct ZSTD_CCtx_params_s; - -typedef struct { - /** - * ZSTD_VERSION_NUMBER - * - * This is guaranteed to be the first member of ZSTD_trace. - * Otherwise, this struct is not stable between versions. If - * the version number does not match your expectation, you - * should not interpret the rest of the struct. - */ - unsigned version; - /** - * Non-zero if streaming (de)compression is used. - */ - unsigned streaming; - /** - * The dictionary ID. - */ - unsigned dictionaryID; - /** - * Is the dictionary cold? - * Only set on decompression. - */ - unsigned dictionaryIsCold; - /** - * The dictionary size or zero if no dictionary. - */ - size_t dictionarySize; - /** - * The uncompressed size of the data. - */ - size_t uncompressedSize; - /** - * The compressed size of the data. - */ - size_t compressedSize; - /** - * The fully resolved CCtx parameters (NULL on decompression). - */ - struct ZSTD_CCtx_params_s const* params; - /** - * The ZSTD_CCtx pointer (NULL on decompression). - */ - struct ZSTD_CCtx_s const* cctx; - /** - * The ZSTD_DCtx pointer (NULL on compression). - */ - struct ZSTD_DCtx_s const* dctx; -} ZSTD_Trace; - -/** - * A tracing context. It must be 0 when tracing is disabled. - * Otherwise, any non-zero value returned by a tracing begin() - * function is presented to any subsequent calls to end(). - * - * Any non-zero value is treated as tracing is enabled and not - * interpreted by the library. - * - * Two possible uses are: - * * A timestamp for when the begin() function was called. - * * A unique key identifying the (de)compression, like the - * address of the [dc]ctx pointer if you need to track - * more information than just a timestamp. - */ -typedef unsigned long long ZSTD_TraceCtx; - -/** - * Trace the beginning of a compression call. - * @param cctx The dctx pointer for the compression. - * It can be used as a key to map begin() to end(). - * @returns Non-zero if tracing is enabled. The return value is - * passed to ZSTD_trace_compress_end(). - */ -ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_compress_begin( - struct ZSTD_CCtx_s const* cctx); - -/** - * Trace the end of a compression call. - * @param ctx The return value of ZSTD_trace_compress_begin(). - * @param trace The zstd tracing info. - */ -ZSTD_WEAK_ATTR void ZSTD_trace_compress_end( - ZSTD_TraceCtx ctx, - ZSTD_Trace const* trace); - -/** - * Trace the beginning of a decompression call. - * @param dctx The dctx pointer for the decompression. - * It can be used as a key to map begin() to end(). - * @returns Non-zero if tracing is enabled. The return value is - * passed to ZSTD_trace_compress_end(). - */ -ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_decompress_begin( - struct ZSTD_DCtx_s const* dctx); - -/** - * Trace the end of a decompression call. - * @param ctx The return value of ZSTD_trace_decompress_begin(). - * @param trace The zstd tracing info. - */ -ZSTD_WEAK_ATTR void ZSTD_trace_decompress_end( - ZSTD_TraceCtx ctx, - ZSTD_Trace const* trace); - -#endif /* ZSTD_TRACE */ - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTD_TRACE_H */ diff --git a/zstandard_android/src/compress/hist.c b/zstandard_android/src/compress/hist.c deleted file mode 100644 index e2fb431..0000000 --- a/zstandard_android/src/compress/hist.c +++ /dev/null @@ -1,181 +0,0 @@ -/* ****************************************************************** - * hist : Histogram functions - * part of Finite State Entropy project - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * You can contact the author at : - * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - * - Public forum : https://groups.google.com/forum/#!forum/lz4c - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. -****************************************************************** */ - -/* --- dependencies --- */ -#include "../common/mem.h" /* U32, BYTE, etc. */ -#include "../common/debug.h" /* assert, DEBUGLOG */ -#include "../common/error_private.h" /* ERROR */ -#include "hist.h" - - -/* --- Error management --- */ -unsigned HIST_isError(size_t code) { return ERR_isError(code); } - -/*-************************************************************** - * Histogram functions - ****************************************************************/ -unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, - const void* src, size_t srcSize) -{ - const BYTE* ip = (const BYTE*)src; - const BYTE* const end = ip + srcSize; - unsigned maxSymbolValue = *maxSymbolValuePtr; - unsigned largestCount=0; - - ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count)); - if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } - - while (ip largestCount) largestCount = count[s]; - } - - return largestCount; -} - -typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e; - -/* HIST_count_parallel_wksp() : - * store histogram into 4 intermediate tables, recombined at the end. - * this design makes better use of OoO cpus, - * and is noticeably faster when some values are heavily repeated. - * But it needs some additional workspace for intermediate tables. - * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32. - * @return : largest histogram frequency, - * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */ -static size_t HIST_count_parallel_wksp( - unsigned* count, unsigned* maxSymbolValuePtr, - const void* source, size_t sourceSize, - HIST_checkInput_e check, - U32* const workSpace) -{ - const BYTE* ip = (const BYTE*)source; - const BYTE* const iend = ip+sourceSize; - size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count); - unsigned max=0; - U32* const Counting1 = workSpace; - U32* const Counting2 = Counting1 + 256; - U32* const Counting3 = Counting2 + 256; - U32* const Counting4 = Counting3 + 256; - - /* safety checks */ - assert(*maxSymbolValuePtr <= 255); - if (!sourceSize) { - ZSTD_memset(count, 0, countSize); - *maxSymbolValuePtr = 0; - return 0; - } - ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned)); - - /* by stripes of 16 bytes */ - { U32 cached = MEM_read32(ip); ip += 4; - while (ip < iend-15) { - U32 c = cached; cached = MEM_read32(ip); ip += 4; - Counting1[(BYTE) c ]++; - Counting2[(BYTE)(c>>8) ]++; - Counting3[(BYTE)(c>>16)]++; - Counting4[ c>>24 ]++; - c = cached; cached = MEM_read32(ip); ip += 4; - Counting1[(BYTE) c ]++; - Counting2[(BYTE)(c>>8) ]++; - Counting3[(BYTE)(c>>16)]++; - Counting4[ c>>24 ]++; - c = cached; cached = MEM_read32(ip); ip += 4; - Counting1[(BYTE) c ]++; - Counting2[(BYTE)(c>>8) ]++; - Counting3[(BYTE)(c>>16)]++; - Counting4[ c>>24 ]++; - c = cached; cached = MEM_read32(ip); ip += 4; - Counting1[(BYTE) c ]++; - Counting2[(BYTE)(c>>8) ]++; - Counting3[(BYTE)(c>>16)]++; - Counting4[ c>>24 ]++; - } - ip-=4; - } - - /* finish last symbols */ - while (ip max) max = Counting1[s]; - } } - - { unsigned maxSymbolValue = 255; - while (!Counting1[maxSymbolValue]) maxSymbolValue--; - if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall); - *maxSymbolValuePtr = maxSymbolValue; - ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */ - } - return (size_t)max; -} - -/* HIST_countFast_wksp() : - * Same as HIST_countFast(), but using an externally provided scratch buffer. - * `workSpace` is a writable buffer which must be 4-bytes aligned, - * `workSpaceSize` must be >= HIST_WKSP_SIZE - */ -size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, - const void* source, size_t sourceSize, - void* workSpace, size_t workSpaceSize) -{ - if (sourceSize < 1500) /* heuristic threshold */ - return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); - if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ - if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); - return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace); -} - -/* HIST_count_wksp() : - * Same as HIST_count(), but using an externally provided scratch buffer. - * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */ -size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, - const void* source, size_t sourceSize, - void* workSpace, size_t workSpaceSize) -{ - if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ - if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); - if (*maxSymbolValuePtr < 255) - return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace); - *maxSymbolValuePtr = 255; - return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize); -} - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ -size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, - const void* source, size_t sourceSize) -{ - unsigned tmpCounters[HIST_WKSP_SIZE_U32]; - return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters)); -} - -size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, - const void* src, size_t srcSize) -{ - unsigned tmpCounters[HIST_WKSP_SIZE_U32]; - return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters)); -} -#endif diff --git a/zstandard_android/src/compress/huf_compress.c b/zstandard_android/src/compress/huf_compress.c deleted file mode 100644 index ea00072..0000000 --- a/zstandard_android/src/compress/huf_compress.c +++ /dev/null @@ -1,1464 +0,0 @@ -/* ****************************************************************** - * Huffman encoder, part of New Generation Entropy library - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * You can contact the author at : - * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy - * - Public forum : https://groups.google.com/forum/#!forum/lz4c - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. -****************************************************************** */ - -/* ************************************************************** -* Compiler specifics -****************************************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#endif - - -/* ************************************************************** -* Includes -****************************************************************/ -#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */ -#include "../common/compiler.h" -#include "../common/bitstream.h" -#include "hist.h" -#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ -#include "../common/fse.h" /* header compression */ -#include "../common/huf.h" -#include "../common/error_private.h" -#include "../common/bits.h" /* ZSTD_highbit32 */ - - -/* ************************************************************** -* Error Management -****************************************************************/ -#define HUF_isError ERR_isError -#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ - - -/* ************************************************************** -* Required declarations -****************************************************************/ -typedef struct nodeElt_s { - U32 count; - U16 parent; - BYTE byte; - BYTE nbBits; -} nodeElt; - - -/* ************************************************************** -* Debug Traces -****************************************************************/ - -#if DEBUGLEVEL >= 2 - -static size_t showU32(const U32* arr, size_t size) -{ - size_t u; - for (u=0; u= add) { - assert(add < align); - assert(((size_t)aligned & mask) == 0); - *workspaceSizePtr -= add; - return aligned; - } else { - *workspaceSizePtr = 0; - return NULL; - } -} - - -/* HUF_compressWeights() : - * Same as FSE_compress(), but dedicated to huff0's weights compression. - * The use case needs much less stack memory. - * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. - */ -#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 - -typedef struct { - FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; - U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)]; - unsigned count[HUF_TABLELOG_MAX+1]; - S16 norm[HUF_TABLELOG_MAX+1]; -} HUF_CompressWeightsWksp; - -static size_t -HUF_compressWeights(void* dst, size_t dstSize, - const void* weightTable, size_t wtSize, - void* workspace, size_t workspaceSize) -{ - BYTE* const ostart = (BYTE*) dst; - BYTE* op = ostart; - BYTE* const oend = ostart + dstSize; - - unsigned maxSymbolValue = HUF_TABLELOG_MAX; - U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; - HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); - - if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC); - - /* init conditions */ - if (wtSize <= 1) return 0; /* Not compressible */ - - /* Scan input and build symbol stats */ - { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */ - if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */ - if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ - } - - tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); - CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) ); - - /* Write table description header */ - { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) ); - op += hSize; - } - - /* Compress */ - CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) ); - { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) ); - if (cSize == 0) return 0; /* not enough space for compressed data */ - op += cSize; - } - - return (size_t)(op-ostart); -} - -static size_t HUF_getNbBits(HUF_CElt elt) -{ - return elt & 0xFF; -} - -static size_t HUF_getNbBitsFast(HUF_CElt elt) -{ - return elt; -} - -static size_t HUF_getValue(HUF_CElt elt) -{ - return elt & ~(size_t)0xFF; -} - -static size_t HUF_getValueFast(HUF_CElt elt) -{ - return elt; -} - -static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits) -{ - assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX); - *elt = nbBits; -} - -static void HUF_setValue(HUF_CElt* elt, size_t value) -{ - size_t const nbBits = HUF_getNbBits(*elt); - if (nbBits > 0) { - assert((value >> nbBits) == 0); - *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits); - } -} - -HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable) -{ - HUF_CTableHeader header; - ZSTD_memcpy(&header, ctable, sizeof(header)); - return header; -} - -static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue) -{ - HUF_CTableHeader header; - HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header)); - ZSTD_memset(&header, 0, sizeof(header)); - assert(tableLog < 256); - header.tableLog = (BYTE)tableLog; - assert(maxSymbolValue < 256); - header.maxSymbolValue = (BYTE)maxSymbolValue; - ZSTD_memcpy(ctable, &header, sizeof(header)); -} - -typedef struct { - HUF_CompressWeightsWksp wksp; - BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ - BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; -} HUF_WriteCTableWksp; - -size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, - const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, - void* workspace, size_t workspaceSize) -{ - HUF_CElt const* const ct = CTable + 1; - BYTE* op = (BYTE*)dst; - U32 n; - HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); - - HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp)); - - assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue); - assert(HUF_readCTableHeader(CTable).tableLog == huffLog); - - /* check conditions */ - if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); - if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); - - /* convert to weight */ - wksp->bitsToWeight[0] = 0; - for (n=1; nbitsToWeight[n] = (BYTE)(huffLog + 1 - n); - for (n=0; nhuffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; - - /* attempt weights compression by FSE */ - if (maxDstSize < 1) return ERROR(dstSize_tooSmall); - { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) ); - if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ - op[0] = (BYTE)hSize; - return hSize+1; - } } - - /* write raw values as 4-bits (max : 15) */ - if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ - if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ - op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); - wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ - for (n=0; nhuffWeight[n] << 4) + wksp->huffWeight[n+1]); - return ((maxSymbolValue+1)/2) + 1; -} - - -size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights) -{ - BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ - U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ - U32 tableLog = 0; - U32 nbSymbols = 0; - HUF_CElt* const ct = CTable + 1; - - /* get symbol weights */ - CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); - *hasZeroWeights = (rankVal[0] > 0); - - /* check result */ - if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); - if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); - - *maxSymbolValuePtr = nbSymbols - 1; - - HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); - - /* Prepare base value per rank */ - { U32 n, nextRankStart = 0; - for (n=1; n<=tableLog; n++) { - U32 curr = nextRankStart; - nextRankStart += (rankVal[n] << (n-1)); - rankVal[n] = curr; - } } - - /* fill nbBits */ - { U32 n; for (n=0; nn=tableLog+1 */ - U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; - { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ - valPerRank[n] = min; /* get starting value within each rank */ - min += nbPerRank[n]; - min >>= 1; - } } - /* assign value within rank, symbol order */ - { U32 n; for (n=0; n HUF_readCTableHeader(CTable).maxSymbolValue) - return 0; - return (U32)HUF_getNbBits(ct[symbolValue]); -} - - -/** - * HUF_setMaxHeight(): - * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. - * - * It attempts to convert all nodes with nbBits > @targetNbBits - * to employ @targetNbBits instead. Then it adjusts the tree - * so that it remains a valid canonical Huffman tree. - * - * @pre The sum of the ranks of each symbol == 2^largestBits, - * where largestBits == huffNode[lastNonNull].nbBits. - * @post The sum of the ranks of each symbol == 2^largestBits, - * where largestBits is the return value (expected <= targetNbBits). - * - * @param huffNode The Huffman tree modified in place to enforce targetNbBits. - * It's presumed sorted, from most frequent to rarest symbol. - * @param lastNonNull The symbol with the lowest count in the Huffman tree. - * @param targetNbBits The allowed number of bits, which the Huffman tree - * may not respect. After this function the Huffman tree will - * respect targetNbBits. - * @return The maximum number of bits of the Huffman tree after adjustment. - */ -static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits) -{ - const U32 largestBits = huffNode[lastNonNull].nbBits; - /* early exit : no elt > targetNbBits, so the tree is already valid. */ - if (largestBits <= targetNbBits) return largestBits; - - DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits); - - /* there are several too large elements (at least >= 2) */ - { int totalCost = 0; - const U32 baseCost = 1 << (largestBits - targetNbBits); - int n = (int)lastNonNull; - - /* Adjust any ranks > targetNbBits to targetNbBits. - * Compute totalCost, which is how far the sum of the ranks is - * we are over 2^largestBits after adjust the offending ranks. - */ - while (huffNode[n].nbBits > targetNbBits) { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); - huffNode[n].nbBits = (BYTE)targetNbBits; - n--; - } - /* n stops at huffNode[n].nbBits <= targetNbBits */ - assert(huffNode[n].nbBits <= targetNbBits); - /* n end at index of smallest symbol using < targetNbBits */ - while (huffNode[n].nbBits == targetNbBits) --n; - - /* renorm totalCost from 2^largestBits to 2^targetNbBits - * note : totalCost is necessarily a multiple of baseCost */ - assert(((U32)totalCost & (baseCost - 1)) == 0); - totalCost >>= (largestBits - targetNbBits); - assert(totalCost > 0); - - /* repay normalized cost */ - { U32 const noSymbol = 0xF0F0F0F0; - U32 rankLast[HUF_TABLELOG_MAX+2]; - - /* Get pos of last (smallest = lowest cum. count) symbol per rank */ - ZSTD_memset(rankLast, 0xF0, sizeof(rankLast)); - { U32 currentNbBits = targetNbBits; - int pos; - for (pos=n ; pos >= 0; pos--) { - if (huffNode[pos].nbBits >= currentNbBits) continue; - currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */ - rankLast[targetNbBits-currentNbBits] = (U32)pos; - } } - - while (totalCost > 0) { - /* Try to reduce the next power of 2 above totalCost because we - * gain back half the rank. - */ - U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1; - for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { - U32 const highPos = rankLast[nBitsToDecrease]; - U32 const lowPos = rankLast[nBitsToDecrease-1]; - if (highPos == noSymbol) continue; - /* Decrease highPos if no symbols of lowPos or if it is - * not cheaper to remove 2 lowPos than highPos. - */ - if (lowPos == noSymbol) break; - { U32 const highTotal = huffNode[highPos].count; - U32 const lowTotal = 2 * huffNode[lowPos].count; - if (highTotal <= lowTotal) break; - } } - /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ - assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); - /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ - while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) - nBitsToDecrease++; - assert(rankLast[nBitsToDecrease] != noSymbol); - /* Increase the number of bits to gain back half the rank cost. */ - totalCost -= 1 << (nBitsToDecrease-1); - huffNode[rankLast[nBitsToDecrease]].nbBits++; - - /* Fix up the new rank. - * If the new rank was empty, this symbol is now its smallest. - * Otherwise, this symbol will be the largest in the new rank so no adjustment. - */ - if (rankLast[nBitsToDecrease-1] == noSymbol) - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; - /* Fix up the old rank. - * If the symbol was at position 0, meaning it was the highest weight symbol in the tree, - * it must be the only symbol in its rank, so the old rank now has no symbols. - * Otherwise, since the Huffman nodes are sorted by count, the previous position is now - * the smallest node in the rank. If the previous position belongs to a different rank, - * then the rank is now empty. - */ - if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol; - else { - rankLast[nBitsToDecrease]--; - if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease) - rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ - } - } /* while (totalCost > 0) */ - - /* If we've removed too much weight, then we have to add it back. - * To avoid overshooting again, we only adjust the smallest rank. - * We take the largest nodes from the lowest rank 0 and move them - * to rank 1. There's guaranteed to be enough rank 0 symbols because - * TODO. - */ - while (totalCost < 0) { /* Sometimes, cost correction overshoot */ - /* special case : no rank 1 symbol (using targetNbBits-1); - * let's create one from largest rank 0 (using targetNbBits). - */ - if (rankLast[1] == noSymbol) { - while (huffNode[n].nbBits == targetNbBits) n--; - huffNode[n+1].nbBits--; - assert(n >= 0); - rankLast[1] = (U32)(n+1); - totalCost++; - continue; - } - huffNode[ rankLast[1] + 1 ].nbBits--; - rankLast[1]++; - totalCost ++; - } - } /* repay normalized cost */ - } /* there are several too large elements (at least >= 2) */ - - return targetNbBits; -} - -typedef struct { - U16 base; - U16 curr; -} rankPos; - -typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)]; - -/* Number of buckets available for HUF_sort() */ -#define RANK_POSITION_TABLE_SIZE 192 - -typedef struct { - huffNodeTable huffNodeTbl; - rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; -} HUF_buildCTable_wksp_tables; - -/* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing. - * Strategy is to use as many buckets as possible for representing distinct - * counts while using the remainder to represent all "large" counts. - * - * To satisfy this requirement for 192 buckets, we can do the following: - * Let buckets 0-166 represent distinct counts of [0, 166] - * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing. - */ -#define RANK_POSITION_MAX_COUNT_LOG 32 -#define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) -#define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) - -/* Return the appropriate bucket index for a given count. See definition of - * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. - */ -static U32 HUF_getIndex(U32 const count) { - return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF) - ? count - : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; -} - -/* Helper swap function for HUF_quickSortPartition() */ -static void HUF_swapNodes(nodeElt* a, nodeElt* b) { - nodeElt tmp = *a; - *a = *b; - *b = tmp; -} - -/* Returns 0 if the huffNode array is not sorted by descending count */ -MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) { - U32 i; - for (i = 1; i < maxSymbolValue1; ++i) { - if (huffNode[i].count > huffNode[i-1].count) { - return 0; - } - } - return 1; -} - -/* Insertion sort by descending order */ -HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) { - int i; - int const size = high-low+1; - huffNode += low; - for (i = 1; i < size; ++i) { - nodeElt const key = huffNode[i]; - int j = i - 1; - while (j >= 0 && huffNode[j].count < key.count) { - huffNode[j + 1] = huffNode[j]; - j--; - } - huffNode[j + 1] = key; - } -} - -/* Pivot helper function for quicksort. */ -static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) { - /* Simply select rightmost element as pivot. "Better" selectors like - * median-of-three don't experimentally appear to have any benefit. - */ - U32 const pivot = arr[high].count; - int i = low - 1; - int j = low; - for ( ; j < high; j++) { - if (arr[j].count > pivot) { - i++; - HUF_swapNodes(&arr[i], &arr[j]); - } - } - HUF_swapNodes(&arr[i + 1], &arr[high]); - return i + 1; -} - -/* Classic quicksort by descending with partially iterative calls - * to reduce worst case callstack size. - */ -static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) { - int const kInsertionSortThreshold = 8; - if (high - low < kInsertionSortThreshold) { - HUF_insertionSort(arr, low, high); - return; - } - while (low < high) { - int const idx = HUF_quickSortPartition(arr, low, high); - if (idx - low < high - idx) { - HUF_simpleQuickSort(arr, low, idx - 1); - low = idx + 1; - } else { - HUF_simpleQuickSort(arr, idx + 1, high); - high = idx - 1; - } - } -} - -/** - * HUF_sort(): - * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. - * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. - * - * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. - * Must have (maxSymbolValue + 1) entries. - * @param[in] count Histogram of the symbols. - * @param[in] maxSymbolValue Maximum symbol value. - * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. - */ -static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) { - U32 n; - U32 const maxSymbolValue1 = maxSymbolValue+1; - - /* Compute base and set curr to base. - * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1. - * See HUF_getIndex to see bucketing strategy. - * We attribute each symbol to lowerRank's base value, because we want to know where - * each rank begins in the output, so for rank R we want to count ranks R+1 and above. - */ - ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); - for (n = 0; n < maxSymbolValue1; ++n) { - U32 lowerRank = HUF_getIndex(count[n]); - assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1); - rankPosition[lowerRank].base++; - } - - assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0); - /* Set up the rankPosition table */ - for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) { - rankPosition[n-1].base += rankPosition[n].base; - rankPosition[n-1].curr = rankPosition[n-1].base; - } - - /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */ - for (n = 0; n < maxSymbolValue1; ++n) { - U32 const c = count[n]; - U32 const r = HUF_getIndex(c) + 1; - U32 const pos = rankPosition[r].curr++; - assert(pos < maxSymbolValue1); - huffNode[pos].count = c; - huffNode[pos].byte = (BYTE)n; - } - - /* Sort each bucket. */ - for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { - int const bucketSize = rankPosition[n].curr - rankPosition[n].base; - U32 const bucketStartIdx = rankPosition[n].base; - if (bucketSize > 1) { - assert(bucketStartIdx < maxSymbolValue1); - HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1); - } - } - - assert(HUF_isSorted(huffNode, maxSymbolValue1)); -} - - -/** HUF_buildCTable_wksp() : - * Same as HUF_buildCTable(), but using externally allocated scratch buffer. - * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). - */ -#define STARTNODE (HUF_SYMBOLVALUE_MAX+1) - -/* HUF_buildTree(): - * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. - * - * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. - * @param maxSymbolValue The maximum symbol value. - * @return The smallest node in the Huffman tree (by count). - */ -static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) -{ - nodeElt* const huffNode0 = huffNode - 1; - int nonNullRank; - int lowS, lowN; - int nodeNb = STARTNODE; - int n, nodeRoot; - DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1); - /* init for parents */ - nonNullRank = (int)maxSymbolValue; - while(huffNode[nonNullRank].count == 0) nonNullRank--; - lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; - huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb; - nodeNb++; lowS-=2; - for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); - huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ - - /* create parents */ - while (nodeNb <= nodeRoot) { - int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; - int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; - huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; - huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb; - nodeNb++; - } - - /* distribute weights (unlimited tree height) */ - huffNode[nodeRoot].nbBits = 0; - for (n=nodeRoot-1; n>=STARTNODE; n--) - huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; - for (n=0; n<=nonNullRank; n++) - huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; - - DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1)); - - return nonNullRank; -} - -/** - * HUF_buildCTableFromTree(): - * Build the CTable given the Huffman tree in huffNode. - * - * @param[out] CTable The output Huffman CTable. - * @param huffNode The Huffman tree. - * @param nonNullRank The last and smallest node in the Huffman tree. - * @param maxSymbolValue The maximum symbol value. - * @param maxNbBits The exact maximum number of bits used in the Huffman tree. - */ -static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) -{ - HUF_CElt* const ct = CTable + 1; - /* fill result into ctable (val, nbBits) */ - int n; - U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; - U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; - int const alphabetSize = (int)(maxSymbolValue + 1); - for (n=0; n<=nonNullRank; n++) - nbPerRank[huffNode[n].nbBits]++; - /* determine starting value per rank */ - { U16 min = 0; - for (n=(int)maxNbBits; n>0; n--) { - valPerRank[n] = min; /* get starting value within each rank */ - min += nbPerRank[n]; - min >>= 1; - } } - for (n=0; nhuffNodeTbl; - nodeElt* const huffNode = huffNode0+1; - int nonNullRank; - - HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables)); - - DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1); - - /* safety checks */ - if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) - return ERROR(workSpace_tooSmall); - if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; - if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) - return ERROR(maxSymbolValue_tooLarge); - ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable)); - - /* sort, decreasing order */ - HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); - DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1)); - - /* build tree */ - nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); - - /* determine and enforce maxTableLog */ - maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); - if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ - - HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); - - return maxNbBits; -} - -size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) -{ - HUF_CElt const* ct = CTable + 1; - size_t nbBits = 0; - int s; - for (s = 0; s <= (int)maxSymbolValue; ++s) { - nbBits += HUF_getNbBits(ct[s]) * count[s]; - } - return nbBits >> 3; -} - -int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { - HUF_CTableHeader header = HUF_readCTableHeader(CTable); - HUF_CElt const* ct = CTable + 1; - int bad = 0; - int s; - - assert(header.tableLog <= HUF_TABLELOG_ABSOLUTEMAX); - - if (header.maxSymbolValue < maxSymbolValue) - return 0; - - for (s = 0; s <= (int)maxSymbolValue; ++s) { - bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0); - } - return !bad; -} - -size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } - -/** HUF_CStream_t: - * Huffman uses its own BIT_CStream_t implementation. - * There are three major differences from BIT_CStream_t: - * 1. HUF_addBits() takes a HUF_CElt (size_t) which is - * the pair (nbBits, value) in the format: - * format: - * - Bits [0, 4) = nbBits - * - Bits [4, 64 - nbBits) = 0 - * - Bits [64 - nbBits, 64) = value - * 2. The bitContainer is built from the upper bits and - * right shifted. E.g. to add a new value of N bits - * you right shift the bitContainer by N, then or in - * the new value into the N upper bits. - * 3. The bitstream has two bit containers. You can add - * bits to the second container and merge them into - * the first container. - */ - -#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) - -typedef struct { - size_t bitContainer[2]; - size_t bitPos[2]; - - BYTE* startPtr; - BYTE* ptr; - BYTE* endPtr; -} HUF_CStream_t; - -/**! HUF_initCStream(): - * Initializes the bitstream. - * @returns 0 or an error code. - */ -static size_t HUF_initCStream(HUF_CStream_t* bitC, - void* startPtr, size_t dstCapacity) -{ - ZSTD_memset(bitC, 0, sizeof(*bitC)); - bitC->startPtr = (BYTE*)startPtr; - bitC->ptr = bitC->startPtr; - bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]); - if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall); - return 0; -} - -/*! HUF_addBits(): - * Adds the symbol stored in HUF_CElt elt to the bitstream. - * - * @param elt The element we're adding. This is a (nbBits, value) pair. - * See the HUF_CStream_t docs for the format. - * @param idx Insert into the bitstream at this idx. - * @param kFast This is a template parameter. If the bitstream is guaranteed - * to have at least 4 unused bits after this call it may be 1, - * otherwise it must be 0. HUF_addBits() is faster when fast is set. - */ -FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast) -{ - assert(idx <= 1); - assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX); - /* This is efficient on x86-64 with BMI2 because shrx - * only reads the low 6 bits of the register. The compiler - * knows this and elides the mask. When fast is set, - * every operation can use the same value loaded from elt. - */ - bitC->bitContainer[idx] >>= HUF_getNbBits(elt); - bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt); - /* We only read the low 8 bits of bitC->bitPos[idx] so it - * doesn't matter that the high bits have noise from the value. - */ - bitC->bitPos[idx] += HUF_getNbBitsFast(elt); - assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); - /* The last 4-bits of elt are dirty if fast is set, - * so we must not be overwriting bits that have already been - * inserted into the bit container. - */ -#if DEBUGLEVEL >= 1 - { - size_t const nbBits = HUF_getNbBits(elt); - size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1; - (void)dirtyBits; - /* Middle bits are 0. */ - assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); - /* We didn't overwrite any bits in the bit container. */ - assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); - (void)dirtyBits; - } -#endif -} - -FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC) -{ - bitC->bitContainer[1] = 0; - bitC->bitPos[1] = 0; -} - -/*! HUF_mergeIndex1() : - * Merges the bit container @ index 1 into the bit container @ index 0 - * and zeros the bit container @ index 1. - */ -FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC) -{ - assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER); - bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF); - bitC->bitContainer[0] |= bitC->bitContainer[1]; - bitC->bitPos[0] += bitC->bitPos[1]; - assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER); -} - -/*! HUF_flushBits() : -* Flushes the bits in the bit container @ index 0. -* -* @post bitPos will be < 8. -* @param kFast If kFast is set then we must know a-priori that -* the bit container will not overflow. -*/ -FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast) -{ - /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ - size_t const nbBits = bitC->bitPos[0] & 0xFF; - size_t const nbBytes = nbBits >> 3; - /* The top nbBits bits of bitContainer are the ones we need. */ - size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits); - /* Mask bitPos to account for the bytes we consumed. */ - bitC->bitPos[0] &= 7; - assert(nbBits > 0); - assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8); - assert(bitC->ptr <= bitC->endPtr); - MEM_writeLEST(bitC->ptr, bitContainer); - bitC->ptr += nbBytes; - assert(!kFast || bitC->ptr <= bitC->endPtr); - if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; - /* bitContainer doesn't need to be modified because the leftover - * bits are already the top bitPos bits. And we don't care about - * noise in the lower values. - */ -} - -/*! HUF_endMark() - * @returns The Huffman stream end mark: A 1-bit value = 1. - */ -static HUF_CElt HUF_endMark(void) -{ - HUF_CElt endMark; - HUF_setNbBits(&endMark, 1); - HUF_setValue(&endMark, 1); - return endMark; -} - -/*! HUF_closeCStream() : - * @return Size of CStream, in bytes, - * or 0 if it could not fit into dstBuffer */ -static size_t HUF_closeCStream(HUF_CStream_t* bitC) -{ - HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0); - HUF_flushBits(bitC, /* kFast */ 0); - { - size_t const nbBits = bitC->bitPos[0] & 0xFF; - if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ - return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0); - } -} - -FORCE_INLINE_TEMPLATE void -HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast) -{ - HUF_addBits(bitCPtr, CTable[symbol], idx, fast); -} - -FORCE_INLINE_TEMPLATE void -HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC, - const BYTE* ip, size_t srcSize, - const HUF_CElt* ct, - int kUnroll, int kFastFlush, int kLastFast) -{ - /* Join to kUnroll */ - int n = (int)srcSize; - int rem = n % kUnroll; - if (rem > 0) { - for (; rem > 0; --rem) { - HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0); - } - HUF_flushBits(bitC, kFastFlush); - } - assert(n % kUnroll == 0); - - /* Join to 2 * kUnroll */ - if (n % (2 * kUnroll)) { - int u; - for (u = 1; u < kUnroll; ++u) { - HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1); - } - HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast); - HUF_flushBits(bitC, kFastFlush); - n -= kUnroll; - } - assert(n % (2 * kUnroll) == 0); - - for (; n>0; n-= 2 * kUnroll) { - /* Encode kUnroll symbols into the bitstream @ index 0. */ - int u; - for (u = 1; u < kUnroll; ++u) { - HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1); - } - HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast); - HUF_flushBits(bitC, kFastFlush); - /* Encode kUnroll symbols into the bitstream @ index 1. - * This allows us to start filling the bit container - * without any data dependencies. - */ - HUF_zeroIndex1(bitC); - for (u = 1; u < kUnroll; ++u) { - HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1); - } - HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast); - /* Merge bitstream @ index 1 into the bitstream @ index 0 */ - HUF_mergeIndex1(bitC); - HUF_flushBits(bitC, kFastFlush); - } - assert(n == 0); - -} - -/** - * Returns a tight upper bound on the output space needed by Huffman - * with 8 bytes buffer to handle over-writes. If the output is at least - * this large we don't need to do bounds checks during Huffman encoding. - */ -static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog) -{ - return ((srcSize * tableLog) >> 3) + 8; -} - - -FORCE_INLINE_TEMPLATE size_t -HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, - const void* src, size_t srcSize, - const HUF_CElt* CTable) -{ - U32 const tableLog = HUF_readCTableHeader(CTable).tableLog; - HUF_CElt const* ct = CTable + 1; - const BYTE* ip = (const BYTE*) src; - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + dstSize; - HUF_CStream_t bitC; - - /* init */ - if (dstSize < 8) return 0; /* not enough space to compress */ - { BYTE* op = ostart; - size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op)); - if (HUF_isError(initErr)) return 0; } - - if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11) - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0); - else { - if (MEM_32bits()) { - switch (tableLog) { - case 11: - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0); - break; - case 10: ZSTD_FALLTHROUGH; - case 9: ZSTD_FALLTHROUGH; - case 8: - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1); - break; - case 7: ZSTD_FALLTHROUGH; - default: - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1); - break; - } - } else { - switch (tableLog) { - case 11: - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0); - break; - case 10: - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1); - break; - case 9: - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0); - break; - case 8: - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0); - break; - case 7: - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0); - break; - case 6: ZSTD_FALLTHROUGH; - default: - HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1); - break; - } - } - } - assert(bitC.ptr <= bitC.endPtr); - - return HUF_closeCStream(&bitC); -} - -#if DYNAMIC_BMI2 - -static BMI2_TARGET_ATTRIBUTE size_t -HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, - const void* src, size_t srcSize, - const HUF_CElt* CTable) -{ - return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); -} - -static size_t -HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, - const void* src, size_t srcSize, - const HUF_CElt* CTable) -{ - return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); -} - -static size_t -HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, - const void* src, size_t srcSize, - const HUF_CElt* CTable, const int flags) -{ - if (flags & HUF_flags_bmi2) { - return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); - } - return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); -} - -#else - -static size_t -HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, - const void* src, size_t srcSize, - const HUF_CElt* CTable, const int flags) -{ - (void)flags; - return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); -} - -#endif - -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) -{ - return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); -} - -static size_t -HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, - const void* src, size_t srcSize, - const HUF_CElt* CTable, int flags) -{ - size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ - const BYTE* ip = (const BYTE*) src; - const BYTE* const iend = ip + srcSize; - BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ostart + dstSize; - BYTE* op = ostart; - - if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ - if (srcSize < 12) return 0; /* no saving possible : too small input */ - op += 6; /* jumpTable */ - - assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); - if (cSize == 0 || cSize > 65535) return 0; - MEM_writeLE16(ostart, (U16)cSize); - op += cSize; - } - - ip += segmentSize; - assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); - if (cSize == 0 || cSize > 65535) return 0; - MEM_writeLE16(ostart+2, (U16)cSize); - op += cSize; - } - - ip += segmentSize; - assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); - if (cSize == 0 || cSize > 65535) return 0; - MEM_writeLE16(ostart+4, (U16)cSize); - op += cSize; - } - - ip += segmentSize; - assert(op <= oend); - assert(ip <= iend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) ); - if (cSize == 0 || cSize > 65535) return 0; - op += cSize; - } - - return (size_t)(op-ostart); -} - -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) -{ - return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); -} - -typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; - -static size_t HUF_compressCTable_internal( - BYTE* const ostart, BYTE* op, BYTE* const oend, - const void* src, size_t srcSize, - HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags) -{ - size_t const cSize = (nbStreams==HUF_singleStream) ? - HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) : - HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags); - if (HUF_isError(cSize)) { return cSize; } - if (cSize==0) { return 0; } /* uncompressible */ - op += cSize; - /* check compressibility */ - assert(op >= ostart); - if ((size_t)(op-ostart) >= srcSize-1) { return 0; } - return (size_t)(op-ostart); -} - -typedef struct { - unsigned count[HUF_SYMBOLVALUE_MAX + 1]; - HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)]; - union { - HUF_buildCTable_wksp_tables buildCTable_wksp; - HUF_WriteCTableWksp writeCTable_wksp; - U32 hist_wksp[HIST_WKSP_SIZE_U32]; - } wksps; -} HUF_compress_tables_t; - -#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 -#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ - -unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue) -{ - unsigned cardinality = 0; - unsigned i; - - for (i = 0; i < maxSymbolValue + 1; i++) { - if (count[i] != 0) cardinality += 1; - } - - return cardinality; -} - -unsigned HUF_minTableLog(unsigned symbolCardinality) -{ - U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; - return minBitsSymbols; -} - -unsigned HUF_optimalTableLog( - unsigned maxTableLog, - size_t srcSize, - unsigned maxSymbolValue, - void* workSpace, size_t wkspSize, - HUF_CElt* table, - const unsigned* count, - int flags) -{ - assert(srcSize > 1); /* Not supported, RLE should be used instead */ - assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables)); - - if (!(flags & HUF_flags_optimalDepth)) { - /* cheap evaluation, based on FSE */ - return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); - } - - { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp); - size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp); - size_t hSize, newSize; - const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue); - const unsigned minTableLog = HUF_minTableLog(symbolCardinality); - size_t optSize = ((size_t) ~0) - 1; - unsigned optLog = maxTableLog, optLogGuess; - - DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize); - - /* Search until size increases */ - for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { - DEBUGLOG(7, "checking for huffLog=%u", optLogGuess); - - { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); - if (ERR_isError(maxBits)) continue; - - if (maxBits < optLogGuess && optLogGuess > minTableLog) break; - - hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize); - } - - if (ERR_isError(hSize)) continue; - - newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; - - if (newSize > optSize + 1) { - break; - } - - if (newSize < optSize) { - optSize = newSize; - optLog = optLogGuess; - } - } - assert(optLog <= HUF_TABLELOG_MAX); - return optLog; - } -} - -/* HUF_compress_internal() : - * `workSpace_align4` must be aligned on 4-bytes boundaries, - * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ -static size_t -HUF_compress_internal (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - HUF_nbStreams_e nbStreams, - void* workSpace, size_t wkspSize, - HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags) -{ - HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t)); - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + dstSize; - BYTE* op = ostart; - - DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize); - HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); - - /* checks & inits */ - if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); - if (!srcSize) return 0; /* Uncompressed */ - if (!dstSize) return 0; /* cannot fit anything within dst budget */ - if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ - if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); - if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); - if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; - if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; - - /* Heuristic : If old table is valid, use it for small inputs */ - if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) { - return HUF_compressCTable_internal(ostart, op, oend, - src, srcSize, - nbStreams, oldHufTable, flags); - } - - /* If uncompressible data is suspected, do a smaller sampling first */ - DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); - if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { - size_t largestTotal = 0; - DEBUGLOG(5, "input suspected incompressible : sampling to check"); - { unsigned maxSymbolValueBegin = maxSymbolValue; - CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); - largestTotal += largestBegin; - } - { unsigned maxSymbolValueEnd = maxSymbolValue; - CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); - largestTotal += largestEnd; - } - if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */ - } - - /* Scan input and build symbol stats */ - { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) ); - if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ - if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ - } - DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1)); - - /* Check validity of previous table */ - if ( repeat - && *repeat == HUF_repeat_check - && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) { - *repeat = HUF_repeat_none; - } - /* Heuristic : use existing table for small inputs */ - if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) { - return HUF_compressCTable_internal(ostart, op, oend, - src, srcSize, - nbStreams, oldHufTable, flags); - } - - /* Build Huffman Tree */ - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags); - { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, - maxSymbolValue, huffLog, - &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); - CHECK_F(maxBits); - huffLog = (U32)maxBits; - DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1)); - } - - /* Write table description header */ - { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog, - &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) ); - /* Check if using previous huffman table is beneficial */ - if (repeat && *repeat != HUF_repeat_none) { - size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); - size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue); - if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { - return HUF_compressCTable_internal(ostart, op, oend, - src, srcSize, - nbStreams, oldHufTable, flags); - } } - - /* Use the new huffman table */ - if (hSize + 12ul >= srcSize) { return 0; } - op += hSize; - if (repeat) { *repeat = HUF_repeat_none; } - if (oldHufTable) - ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ - } - return HUF_compressCTable_internal(ostart, op, oend, - src, srcSize, - nbStreams, table->CTable, flags); -} - -size_t HUF_compress1X_repeat (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int flags) -{ - DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize); - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_singleStream, - workSpace, wkspSize, hufTable, - repeat, flags); -} - -/* HUF_compress4X_repeat(): - * compress input using 4 streams. - * consider skipping quickly - * reuse an existing huffman compression table */ -size_t HUF_compress4X_repeat (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int flags) -{ - DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize); - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_fourStreams, - workSpace, wkspSize, - hufTable, repeat, flags); -} diff --git a/zstandard_android/src/compress/zstd_compress.c b/zstandard_android/src/compress/zstd_compress.c deleted file mode 100644 index aad2504..0000000 --- a/zstandard_android/src/compress/zstd_compress.c +++ /dev/null @@ -1,7154 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -/*-************************************* -* Dependencies -***************************************/ -#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ -#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */ -#include "../common/mem.h" -#include "hist.h" /* HIST_countFast_wksp */ -#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ -#include "../common/fse.h" -#include "../common/huf.h" -#include "zstd_compress_internal.h" -#include "zstd_compress_sequences.h" -#include "zstd_compress_literals.h" -#include "zstd_fast.h" -#include "zstd_double_fast.h" -#include "zstd_lazy.h" -#include "zstd_opt.h" -#include "zstd_ldm.h" -#include "zstd_compress_superblock.h" -#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_rotateRight_U64 */ - -/* *************************************************************** -* Tuning parameters -*****************************************************************/ -/*! - * COMPRESS_HEAPMODE : - * Select how default decompression function ZSTD_compress() allocates its context, - * on stack (0, default), or into heap (1). - * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected. - */ -#ifndef ZSTD_COMPRESS_HEAPMODE -# define ZSTD_COMPRESS_HEAPMODE 0 -#endif - -/*! - * ZSTD_HASHLOG3_MAX : - * Maximum size of the hash table dedicated to find 3-bytes matches, - * in log format, aka 17 => 1 << 17 == 128Ki positions. - * This structure is only used in zstd_opt. - * Since allocation is centralized for all strategies, it has to be known here. - * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3, - * so that zstd_opt.c doesn't need to know about this constant. - */ -#ifndef ZSTD_HASHLOG3_MAX -# define ZSTD_HASHLOG3_MAX 17 -#endif - -/*-************************************* -* Helper functions -***************************************/ -/* ZSTD_compressBound() - * Note that the result from this function is only valid for - * the one-pass compression functions. - * When employing the streaming mode, - * if flushes are frequently altering the size of blocks, - * the overhead from block headers can make the compressed data larger - * than the return value of ZSTD_compressBound(). - */ -size_t ZSTD_compressBound(size_t srcSize) { - size_t const r = ZSTD_COMPRESSBOUND(srcSize); - if (r==0) return ERROR(srcSize_wrong); - return r; -} - - -/*-************************************* -* Context memory management -***************************************/ -struct ZSTD_CDict_s { - const void* dictContent; - size_t dictContentSize; - ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */ - U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */ - ZSTD_cwksp workspace; - ZSTD_matchState_t matchState; - ZSTD_compressedBlockState_t cBlockState; - ZSTD_customMem customMem; - U32 dictID; - int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ - ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use - * row-based matchfinder. Unless the cdict is reloaded, we will use - * the same greedy/lazy matchfinder at compression time. - */ -}; /* typedef'd to ZSTD_CDict within "zstd.h" */ - -ZSTD_CCtx* ZSTD_createCCtx(void) -{ - return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); -} - -static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) -{ - assert(cctx != NULL); - ZSTD_memset(cctx, 0, sizeof(*cctx)); - cctx->customMem = memManager; - cctx->bmi2 = ZSTD_cpuSupportsBmi2(); - { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters); - assert(!ZSTD_isError(err)); - (void)err; - } -} - -ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) -{ - ZSTD_STATIC_ASSERT(zcss_init==0); - ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); - if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; - { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem); - if (!cctx) return NULL; - ZSTD_initCCtx(cctx, customMem); - return cctx; - } -} - -ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) -{ - ZSTD_cwksp ws; - ZSTD_CCtx* cctx; - if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ - if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ - ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); - - cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); - if (cctx == NULL) return NULL; - - ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx)); - ZSTD_cwksp_move(&cctx->workspace, &ws); - cctx->staticSize = workspaceSize; - - /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ - if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; - cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); - cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); - cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE); - cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); - return cctx; -} - -/** - * Clears and frees all of the dictionaries in the CCtx. - */ -static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx) -{ - ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem); - ZSTD_freeCDict(cctx->localDict.cdict); - ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict)); - ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); - cctx->cdict = NULL; -} - -static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict) -{ - size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0; - size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict); - return bufferSize + cdictSize; -} - -static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) -{ - assert(cctx != NULL); - assert(cctx->staticSize == 0); - ZSTD_clearAllDicts(cctx); -#ifdef ZSTD_MULTITHREAD - ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL; -#endif - ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); -} - -size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) -{ - DEBUGLOG(3, "ZSTD_freeCCtx (address: %p)", (void*)cctx); - if (cctx==NULL) return 0; /* support free on NULL */ - RETURN_ERROR_IF(cctx->staticSize, memory_allocation, - "not compatible with static CCtx"); - { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); - ZSTD_freeCCtxContent(cctx); - if (!cctxInWorkspace) ZSTD_customFree(cctx, cctx->customMem); - } - return 0; -} - - -static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) -{ -#ifdef ZSTD_MULTITHREAD - return ZSTDMT_sizeof_CCtx(cctx->mtctx); -#else - (void)cctx; - return 0; -#endif -} - - -size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) -{ - if (cctx==NULL) return 0; /* support sizeof on NULL */ - /* cctx may be in the workspace */ - return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx)) - + ZSTD_cwksp_sizeof(&cctx->workspace) - + ZSTD_sizeof_localDict(cctx->localDict) - + ZSTD_sizeof_mtctx(cctx); -} - -size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) -{ - return ZSTD_sizeof_CCtx(zcs); /* same object */ -} - -/* private API call, for dictBuilder only */ -const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } - -/* Returns true if the strategy supports using a row based matchfinder */ -static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { - return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2); -} - -/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder - * for this compression. - */ -static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) { - assert(mode != ZSTD_ps_auto); - return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable); -} - -/* Returns row matchfinder usage given an initial mode and cParams */ -static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode, - const ZSTD_compressionParameters* const cParams) { -#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON) - int const kHasSIMD128 = 1; -#else - int const kHasSIMD128 = 0; -#endif - if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ - mode = ZSTD_ps_disable; - if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode; - if (kHasSIMD128) { - if (cParams->windowLog > 14) mode = ZSTD_ps_enable; - } else { - if (cParams->windowLog > 17) mode = ZSTD_ps_enable; - } - return mode; -} - -/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ -static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, - const ZSTD_compressionParameters* const cParams) { - if (mode != ZSTD_ps_auto) return mode; - return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable; -} - -/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ -static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, - const ZSTD_paramSwitch_e useRowMatchFinder, - const U32 forDDSDict) { - assert(useRowMatchFinder != ZSTD_ps_auto); - /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate. - * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder. - */ - return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder)); -} - -/* Returns ZSTD_ps_enable if compression parameters are such that we should - * enable long distance matching (wlog >= 27, strategy >= btopt). - * Returns ZSTD_ps_disable otherwise. - */ -static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, - const ZSTD_compressionParameters* const cParams) { - if (mode != ZSTD_ps_auto) return mode; - return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; -} - -static int ZSTD_resolveExternalSequenceValidation(int mode) { - return mode; -} - -/* Resolves maxBlockSize to the default if no value is present. */ -static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) { - if (maxBlockSize == 0) { - return ZSTD_BLOCKSIZE_MAX; - } else { - return maxBlockSize; - } -} - -static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSwitch_e value, int cLevel) { - if (value != ZSTD_ps_auto) return value; - if (cLevel < 10) { - return ZSTD_ps_disable; - } else { - return ZSTD_ps_enable; - } -} - -/* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. - * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ -static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) { - return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast; -} - -static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( - ZSTD_compressionParameters cParams) -{ - ZSTD_CCtx_params cctxParams; - /* should not matter, as all cParams are presumed properly defined */ - ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT); - cctxParams.cParams = cParams; - - /* Adjust advanced params according to cParams */ - cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams); - if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) { - ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); - assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); - assert(cctxParams.ldmParams.hashRateLog < 32); - } - cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); - cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); - cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); - cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); - cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes, - cctxParams.compressionLevel); - assert(!ZSTD_checkCParams(cParams)); - return cctxParams; -} - -static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( - ZSTD_customMem customMem) -{ - ZSTD_CCtx_params* params; - if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; - params = (ZSTD_CCtx_params*)ZSTD_customCalloc( - sizeof(ZSTD_CCtx_params), customMem); - if (!params) { return NULL; } - ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); - params->customMem = customMem; - return params; -} - -ZSTD_CCtx_params* ZSTD_createCCtxParams(void) -{ - return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); -} - -size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) -{ - if (params == NULL) { return 0; } - ZSTD_customFree(params, params->customMem); - return 0; -} - -size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params) -{ - return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); -} - -size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) { - RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); - ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); - cctxParams->compressionLevel = compressionLevel; - cctxParams->fParams.contentSizeFlag = 1; - return 0; -} - -#define ZSTD_NO_CLEVEL 0 - -/** - * Initializes `cctxParams` from `params` and `compressionLevel`. - * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. - */ -static void -ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, - const ZSTD_parameters* params, - int compressionLevel) -{ - assert(!ZSTD_checkCParams(params->cParams)); - ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); - cctxParams->cParams = params->cParams; - cctxParams->fParams = params->fParams; - /* Should not matter, as all cParams are presumed properly defined. - * But, set it for tracing anyway. - */ - cctxParams->compressionLevel = compressionLevel; - cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); - cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams); - cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); - cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); - cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); - cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel); - DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", - cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); -} - -size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) -{ - RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); - FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); - ZSTD_CCtxParams_init_internal(cctxParams, ¶ms, ZSTD_NO_CLEVEL); - return 0; -} - -/** - * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. - * @param params Validated zstd parameters. - */ -static void ZSTD_CCtxParams_setZstdParams( - ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) -{ - assert(!ZSTD_checkCParams(params->cParams)); - cctxParams->cParams = params->cParams; - cctxParams->fParams = params->fParams; - /* Should not matter, as all cParams are presumed properly defined. - * But, set it for tracing anyway. - */ - cctxParams->compressionLevel = ZSTD_NO_CLEVEL; -} - -ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) -{ - ZSTD_bounds bounds = { 0, 0, 0 }; - - switch(param) - { - case ZSTD_c_compressionLevel: - bounds.lowerBound = ZSTD_minCLevel(); - bounds.upperBound = ZSTD_maxCLevel(); - return bounds; - - case ZSTD_c_windowLog: - bounds.lowerBound = ZSTD_WINDOWLOG_MIN; - bounds.upperBound = ZSTD_WINDOWLOG_MAX; - return bounds; - - case ZSTD_c_hashLog: - bounds.lowerBound = ZSTD_HASHLOG_MIN; - bounds.upperBound = ZSTD_HASHLOG_MAX; - return bounds; - - case ZSTD_c_chainLog: - bounds.lowerBound = ZSTD_CHAINLOG_MIN; - bounds.upperBound = ZSTD_CHAINLOG_MAX; - return bounds; - - case ZSTD_c_searchLog: - bounds.lowerBound = ZSTD_SEARCHLOG_MIN; - bounds.upperBound = ZSTD_SEARCHLOG_MAX; - return bounds; - - case ZSTD_c_minMatch: - bounds.lowerBound = ZSTD_MINMATCH_MIN; - bounds.upperBound = ZSTD_MINMATCH_MAX; - return bounds; - - case ZSTD_c_targetLength: - bounds.lowerBound = ZSTD_TARGETLENGTH_MIN; - bounds.upperBound = ZSTD_TARGETLENGTH_MAX; - return bounds; - - case ZSTD_c_strategy: - bounds.lowerBound = ZSTD_STRATEGY_MIN; - bounds.upperBound = ZSTD_STRATEGY_MAX; - return bounds; - - case ZSTD_c_contentSizeFlag: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - - case ZSTD_c_checksumFlag: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - - case ZSTD_c_dictIDFlag: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - - case ZSTD_c_nbWorkers: - bounds.lowerBound = 0; -#ifdef ZSTD_MULTITHREAD - bounds.upperBound = ZSTDMT_NBWORKERS_MAX; -#else - bounds.upperBound = 0; -#endif - return bounds; - - case ZSTD_c_jobSize: - bounds.lowerBound = 0; -#ifdef ZSTD_MULTITHREAD - bounds.upperBound = ZSTDMT_JOBSIZE_MAX; -#else - bounds.upperBound = 0; -#endif - return bounds; - - case ZSTD_c_overlapLog: -#ifdef ZSTD_MULTITHREAD - bounds.lowerBound = ZSTD_OVERLAPLOG_MIN; - bounds.upperBound = ZSTD_OVERLAPLOG_MAX; -#else - bounds.lowerBound = 0; - bounds.upperBound = 0; -#endif - return bounds; - - case ZSTD_c_enableDedicatedDictSearch: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - - case ZSTD_c_enableLongDistanceMatching: - bounds.lowerBound = (int)ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_ps_disable; - return bounds; - - case ZSTD_c_ldmHashLog: - bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN; - bounds.upperBound = ZSTD_LDM_HASHLOG_MAX; - return bounds; - - case ZSTD_c_ldmMinMatch: - bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN; - bounds.upperBound = ZSTD_LDM_MINMATCH_MAX; - return bounds; - - case ZSTD_c_ldmBucketSizeLog: - bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN; - bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX; - return bounds; - - case ZSTD_c_ldmHashRateLog: - bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN; - bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX; - return bounds; - - /* experimental parameters */ - case ZSTD_c_rsyncable: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - - case ZSTD_c_forceMaxWindow : - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - - case ZSTD_c_format: - ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); - bounds.lowerBound = ZSTD_f_zstd1; - bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */ - return bounds; - - case ZSTD_c_forceAttachDict: - ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad); - bounds.lowerBound = ZSTD_dictDefaultAttach; - bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */ - return bounds; - - case ZSTD_c_literalCompressionMode: - ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable); - bounds.lowerBound = (int)ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_ps_disable; - return bounds; - - case ZSTD_c_targetCBlockSize: - bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN; - bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX; - return bounds; - - case ZSTD_c_srcSizeHint: - bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN; - bounds.upperBound = ZSTD_SRCSIZEHINT_MAX; - return bounds; - - case ZSTD_c_stableInBuffer: - case ZSTD_c_stableOutBuffer: - bounds.lowerBound = (int)ZSTD_bm_buffered; - bounds.upperBound = (int)ZSTD_bm_stable; - return bounds; - - case ZSTD_c_blockDelimiters: - bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters; - bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters; - return bounds; - - case ZSTD_c_validateSequences: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - - case ZSTD_c_useBlockSplitter: - bounds.lowerBound = (int)ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_ps_disable; - return bounds; - - case ZSTD_c_useRowMatchFinder: - bounds.lowerBound = (int)ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_ps_disable; - return bounds; - - case ZSTD_c_deterministicRefPrefix: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - - case ZSTD_c_prefetchCDictTables: - bounds.lowerBound = (int)ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_ps_disable; - return bounds; - - case ZSTD_c_enableSeqProducerFallback: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - - case ZSTD_c_maxBlockSize: - bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; - bounds.upperBound = ZSTD_BLOCKSIZE_MAX; - return bounds; - - case ZSTD_c_searchForExternalRepcodes: - bounds.lowerBound = (int)ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_ps_disable; - return bounds; - - default: - bounds.error = ERROR(parameter_unsupported); - return bounds; - } -} - -/* ZSTD_cParam_clampBounds: - * Clamps the value into the bounded range. - */ -static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) -{ - ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); - if (ZSTD_isError(bounds.error)) return bounds.error; - if (*value < bounds.lowerBound) *value = bounds.lowerBound; - if (*value > bounds.upperBound) *value = bounds.upperBound; - return 0; -} - -#define BOUNDCHECK(cParam, val) \ - do { \ - RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ - parameter_outOfBound, "Param out of bounds"); \ - } while (0) - - -static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) -{ - switch(param) - { - case ZSTD_c_compressionLevel: - case ZSTD_c_hashLog: - case ZSTD_c_chainLog: - case ZSTD_c_searchLog: - case ZSTD_c_minMatch: - case ZSTD_c_targetLength: - case ZSTD_c_strategy: - return 1; - - case ZSTD_c_format: - case ZSTD_c_windowLog: - case ZSTD_c_contentSizeFlag: - case ZSTD_c_checksumFlag: - case ZSTD_c_dictIDFlag: - case ZSTD_c_forceMaxWindow : - case ZSTD_c_nbWorkers: - case ZSTD_c_jobSize: - case ZSTD_c_overlapLog: - case ZSTD_c_rsyncable: - case ZSTD_c_enableDedicatedDictSearch: - case ZSTD_c_enableLongDistanceMatching: - case ZSTD_c_ldmHashLog: - case ZSTD_c_ldmMinMatch: - case ZSTD_c_ldmBucketSizeLog: - case ZSTD_c_ldmHashRateLog: - case ZSTD_c_forceAttachDict: - case ZSTD_c_literalCompressionMode: - case ZSTD_c_targetCBlockSize: - case ZSTD_c_srcSizeHint: - case ZSTD_c_stableInBuffer: - case ZSTD_c_stableOutBuffer: - case ZSTD_c_blockDelimiters: - case ZSTD_c_validateSequences: - case ZSTD_c_useBlockSplitter: - case ZSTD_c_useRowMatchFinder: - case ZSTD_c_deterministicRefPrefix: - case ZSTD_c_prefetchCDictTables: - case ZSTD_c_enableSeqProducerFallback: - case ZSTD_c_maxBlockSize: - case ZSTD_c_searchForExternalRepcodes: - default: - return 0; - } -} - -size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) -{ - DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value); - if (cctx->streamStage != zcss_init) { - if (ZSTD_isUpdateAuthorized(param)) { - cctx->cParamsChanged = 1; - } else { - RETURN_ERROR(stage_wrong, "can only set params in cctx init stage"); - } } - - switch(param) - { - case ZSTD_c_nbWorkers: - RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported, - "MT not compatible with static alloc"); - break; - - case ZSTD_c_compressionLevel: - case ZSTD_c_windowLog: - case ZSTD_c_hashLog: - case ZSTD_c_chainLog: - case ZSTD_c_searchLog: - case ZSTD_c_minMatch: - case ZSTD_c_targetLength: - case ZSTD_c_strategy: - case ZSTD_c_ldmHashRateLog: - case ZSTD_c_format: - case ZSTD_c_contentSizeFlag: - case ZSTD_c_checksumFlag: - case ZSTD_c_dictIDFlag: - case ZSTD_c_forceMaxWindow: - case ZSTD_c_forceAttachDict: - case ZSTD_c_literalCompressionMode: - case ZSTD_c_jobSize: - case ZSTD_c_overlapLog: - case ZSTD_c_rsyncable: - case ZSTD_c_enableDedicatedDictSearch: - case ZSTD_c_enableLongDistanceMatching: - case ZSTD_c_ldmHashLog: - case ZSTD_c_ldmMinMatch: - case ZSTD_c_ldmBucketSizeLog: - case ZSTD_c_targetCBlockSize: - case ZSTD_c_srcSizeHint: - case ZSTD_c_stableInBuffer: - case ZSTD_c_stableOutBuffer: - case ZSTD_c_blockDelimiters: - case ZSTD_c_validateSequences: - case ZSTD_c_useBlockSplitter: - case ZSTD_c_useRowMatchFinder: - case ZSTD_c_deterministicRefPrefix: - case ZSTD_c_prefetchCDictTables: - case ZSTD_c_enableSeqProducerFallback: - case ZSTD_c_maxBlockSize: - case ZSTD_c_searchForExternalRepcodes: - break; - - default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); - } - return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); -} - -size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, - ZSTD_cParameter param, int value) -{ - DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value); - switch(param) - { - case ZSTD_c_format : - BOUNDCHECK(ZSTD_c_format, value); - CCtxParams->format = (ZSTD_format_e)value; - return (size_t)CCtxParams->format; - - case ZSTD_c_compressionLevel : { - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); - if (value == 0) - CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ - else - CCtxParams->compressionLevel = value; - if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel; - return 0; /* return type (size_t) cannot represent negative values */ - } - - case ZSTD_c_windowLog : - if (value!=0) /* 0 => use default */ - BOUNDCHECK(ZSTD_c_windowLog, value); - CCtxParams->cParams.windowLog = (U32)value; - return CCtxParams->cParams.windowLog; - - case ZSTD_c_hashLog : - if (value!=0) /* 0 => use default */ - BOUNDCHECK(ZSTD_c_hashLog, value); - CCtxParams->cParams.hashLog = (U32)value; - return CCtxParams->cParams.hashLog; - - case ZSTD_c_chainLog : - if (value!=0) /* 0 => use default */ - BOUNDCHECK(ZSTD_c_chainLog, value); - CCtxParams->cParams.chainLog = (U32)value; - return CCtxParams->cParams.chainLog; - - case ZSTD_c_searchLog : - if (value!=0) /* 0 => use default */ - BOUNDCHECK(ZSTD_c_searchLog, value); - CCtxParams->cParams.searchLog = (U32)value; - return (size_t)value; - - case ZSTD_c_minMatch : - if (value!=0) /* 0 => use default */ - BOUNDCHECK(ZSTD_c_minMatch, value); - CCtxParams->cParams.minMatch = (U32)value; - return CCtxParams->cParams.minMatch; - - case ZSTD_c_targetLength : - BOUNDCHECK(ZSTD_c_targetLength, value); - CCtxParams->cParams.targetLength = (U32)value; - return CCtxParams->cParams.targetLength; - - case ZSTD_c_strategy : - if (value!=0) /* 0 => use default */ - BOUNDCHECK(ZSTD_c_strategy, value); - CCtxParams->cParams.strategy = (ZSTD_strategy)value; - return (size_t)CCtxParams->cParams.strategy; - - case ZSTD_c_contentSizeFlag : - /* Content size written in frame header _when known_ (default:1) */ - DEBUGLOG(4, "set content size flag = %u", (value!=0)); - CCtxParams->fParams.contentSizeFlag = value != 0; - return (size_t)CCtxParams->fParams.contentSizeFlag; - - case ZSTD_c_checksumFlag : - /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ - CCtxParams->fParams.checksumFlag = value != 0; - return (size_t)CCtxParams->fParams.checksumFlag; - - case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ - DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); - CCtxParams->fParams.noDictIDFlag = !value; - return !CCtxParams->fParams.noDictIDFlag; - - case ZSTD_c_forceMaxWindow : - CCtxParams->forceWindow = (value != 0); - return (size_t)CCtxParams->forceWindow; - - case ZSTD_c_forceAttachDict : { - const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; - BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref); - CCtxParams->attachDictPref = pref; - return CCtxParams->attachDictPref; - } - - case ZSTD_c_literalCompressionMode : { - const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; - BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm); - CCtxParams->literalCompressionMode = lcm; - return CCtxParams->literalCompressionMode; - } - - case ZSTD_c_nbWorkers : -#ifndef ZSTD_MULTITHREAD - RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); - return 0; -#else - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); - CCtxParams->nbWorkers = value; - return (size_t)(CCtxParams->nbWorkers); -#endif - - case ZSTD_c_jobSize : -#ifndef ZSTD_MULTITHREAD - RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); - return 0; -#else - /* Adjust to the minimum non-default value. */ - if (value != 0 && value < ZSTDMT_JOBSIZE_MIN) - value = ZSTDMT_JOBSIZE_MIN; - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); - assert(value >= 0); - CCtxParams->jobSize = value; - return CCtxParams->jobSize; -#endif - - case ZSTD_c_overlapLog : -#ifndef ZSTD_MULTITHREAD - RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); - return 0; -#else - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); - CCtxParams->overlapLog = value; - return (size_t)CCtxParams->overlapLog; -#endif - - case ZSTD_c_rsyncable : -#ifndef ZSTD_MULTITHREAD - RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); - return 0; -#else - FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); - CCtxParams->rsyncable = value; - return (size_t)CCtxParams->rsyncable; -#endif - - case ZSTD_c_enableDedicatedDictSearch : - CCtxParams->enableDedicatedDictSearch = (value!=0); - return (size_t)CCtxParams->enableDedicatedDictSearch; - - case ZSTD_c_enableLongDistanceMatching : - BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value); - CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; - return CCtxParams->ldmParams.enableLdm; - - case ZSTD_c_ldmHashLog : - if (value!=0) /* 0 ==> auto */ - BOUNDCHECK(ZSTD_c_ldmHashLog, value); - CCtxParams->ldmParams.hashLog = (U32)value; - return CCtxParams->ldmParams.hashLog; - - case ZSTD_c_ldmMinMatch : - if (value!=0) /* 0 ==> default */ - BOUNDCHECK(ZSTD_c_ldmMinMatch, value); - CCtxParams->ldmParams.minMatchLength = (U32)value; - return CCtxParams->ldmParams.minMatchLength; - - case ZSTD_c_ldmBucketSizeLog : - if (value!=0) /* 0 ==> default */ - BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); - CCtxParams->ldmParams.bucketSizeLog = (U32)value; - return CCtxParams->ldmParams.bucketSizeLog; - - case ZSTD_c_ldmHashRateLog : - if (value!=0) /* 0 ==> default */ - BOUNDCHECK(ZSTD_c_ldmHashRateLog, value); - CCtxParams->ldmParams.hashRateLog = (U32)value; - return CCtxParams->ldmParams.hashRateLog; - - case ZSTD_c_targetCBlockSize : - if (value!=0) { /* 0 ==> default */ - value = MAX(value, ZSTD_TARGETCBLOCKSIZE_MIN); - BOUNDCHECK(ZSTD_c_targetCBlockSize, value); - } - CCtxParams->targetCBlockSize = (U32)value; - return CCtxParams->targetCBlockSize; - - case ZSTD_c_srcSizeHint : - if (value!=0) /* 0 ==> default */ - BOUNDCHECK(ZSTD_c_srcSizeHint, value); - CCtxParams->srcSizeHint = value; - return (size_t)CCtxParams->srcSizeHint; - - case ZSTD_c_stableInBuffer: - BOUNDCHECK(ZSTD_c_stableInBuffer, value); - CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; - return CCtxParams->inBufferMode; - - case ZSTD_c_stableOutBuffer: - BOUNDCHECK(ZSTD_c_stableOutBuffer, value); - CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; - return CCtxParams->outBufferMode; - - case ZSTD_c_blockDelimiters: - BOUNDCHECK(ZSTD_c_blockDelimiters, value); - CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; - return CCtxParams->blockDelimiters; - - case ZSTD_c_validateSequences: - BOUNDCHECK(ZSTD_c_validateSequences, value); - CCtxParams->validateSequences = value; - return (size_t)CCtxParams->validateSequences; - - case ZSTD_c_useBlockSplitter: - BOUNDCHECK(ZSTD_c_useBlockSplitter, value); - CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value; - return CCtxParams->useBlockSplitter; - - case ZSTD_c_useRowMatchFinder: - BOUNDCHECK(ZSTD_c_useRowMatchFinder, value); - CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; - return CCtxParams->useRowMatchFinder; - - case ZSTD_c_deterministicRefPrefix: - BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value); - CCtxParams->deterministicRefPrefix = !!value; - return (size_t)CCtxParams->deterministicRefPrefix; - - case ZSTD_c_prefetchCDictTables: - BOUNDCHECK(ZSTD_c_prefetchCDictTables, value); - CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; - return CCtxParams->prefetchCDictTables; - - case ZSTD_c_enableSeqProducerFallback: - BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value); - CCtxParams->enableMatchFinderFallback = value; - return (size_t)CCtxParams->enableMatchFinderFallback; - - case ZSTD_c_maxBlockSize: - if (value!=0) /* 0 ==> default */ - BOUNDCHECK(ZSTD_c_maxBlockSize, value); - CCtxParams->maxBlockSize = value; - return CCtxParams->maxBlockSize; - - case ZSTD_c_searchForExternalRepcodes: - BOUNDCHECK(ZSTD_c_searchForExternalRepcodes, value); - CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value; - return CCtxParams->searchForExternalRepcodes; - - default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); - } -} - -size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value) -{ - return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); -} - -size_t ZSTD_CCtxParams_getParameter( - ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value) -{ - switch(param) - { - case ZSTD_c_format : - *value = CCtxParams->format; - break; - case ZSTD_c_compressionLevel : - *value = CCtxParams->compressionLevel; - break; - case ZSTD_c_windowLog : - *value = (int)CCtxParams->cParams.windowLog; - break; - case ZSTD_c_hashLog : - *value = (int)CCtxParams->cParams.hashLog; - break; - case ZSTD_c_chainLog : - *value = (int)CCtxParams->cParams.chainLog; - break; - case ZSTD_c_searchLog : - *value = CCtxParams->cParams.searchLog; - break; - case ZSTD_c_minMatch : - *value = CCtxParams->cParams.minMatch; - break; - case ZSTD_c_targetLength : - *value = CCtxParams->cParams.targetLength; - break; - case ZSTD_c_strategy : - *value = (unsigned)CCtxParams->cParams.strategy; - break; - case ZSTD_c_contentSizeFlag : - *value = CCtxParams->fParams.contentSizeFlag; - break; - case ZSTD_c_checksumFlag : - *value = CCtxParams->fParams.checksumFlag; - break; - case ZSTD_c_dictIDFlag : - *value = !CCtxParams->fParams.noDictIDFlag; - break; - case ZSTD_c_forceMaxWindow : - *value = CCtxParams->forceWindow; - break; - case ZSTD_c_forceAttachDict : - *value = CCtxParams->attachDictPref; - break; - case ZSTD_c_literalCompressionMode : - *value = CCtxParams->literalCompressionMode; - break; - case ZSTD_c_nbWorkers : -#ifndef ZSTD_MULTITHREAD - assert(CCtxParams->nbWorkers == 0); -#endif - *value = CCtxParams->nbWorkers; - break; - case ZSTD_c_jobSize : -#ifndef ZSTD_MULTITHREAD - RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); -#else - assert(CCtxParams->jobSize <= INT_MAX); - *value = (int)CCtxParams->jobSize; - break; -#endif - case ZSTD_c_overlapLog : -#ifndef ZSTD_MULTITHREAD - RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); -#else - *value = CCtxParams->overlapLog; - break; -#endif - case ZSTD_c_rsyncable : -#ifndef ZSTD_MULTITHREAD - RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); -#else - *value = CCtxParams->rsyncable; - break; -#endif - case ZSTD_c_enableDedicatedDictSearch : - *value = CCtxParams->enableDedicatedDictSearch; - break; - case ZSTD_c_enableLongDistanceMatching : - *value = CCtxParams->ldmParams.enableLdm; - break; - case ZSTD_c_ldmHashLog : - *value = CCtxParams->ldmParams.hashLog; - break; - case ZSTD_c_ldmMinMatch : - *value = CCtxParams->ldmParams.minMatchLength; - break; - case ZSTD_c_ldmBucketSizeLog : - *value = CCtxParams->ldmParams.bucketSizeLog; - break; - case ZSTD_c_ldmHashRateLog : - *value = CCtxParams->ldmParams.hashRateLog; - break; - case ZSTD_c_targetCBlockSize : - *value = (int)CCtxParams->targetCBlockSize; - break; - case ZSTD_c_srcSizeHint : - *value = (int)CCtxParams->srcSizeHint; - break; - case ZSTD_c_stableInBuffer : - *value = (int)CCtxParams->inBufferMode; - break; - case ZSTD_c_stableOutBuffer : - *value = (int)CCtxParams->outBufferMode; - break; - case ZSTD_c_blockDelimiters : - *value = (int)CCtxParams->blockDelimiters; - break; - case ZSTD_c_validateSequences : - *value = (int)CCtxParams->validateSequences; - break; - case ZSTD_c_useBlockSplitter : - *value = (int)CCtxParams->useBlockSplitter; - break; - case ZSTD_c_useRowMatchFinder : - *value = (int)CCtxParams->useRowMatchFinder; - break; - case ZSTD_c_deterministicRefPrefix: - *value = (int)CCtxParams->deterministicRefPrefix; - break; - case ZSTD_c_prefetchCDictTables: - *value = (int)CCtxParams->prefetchCDictTables; - break; - case ZSTD_c_enableSeqProducerFallback: - *value = CCtxParams->enableMatchFinderFallback; - break; - case ZSTD_c_maxBlockSize: - *value = (int)CCtxParams->maxBlockSize; - break; - case ZSTD_c_searchForExternalRepcodes: - *value = (int)CCtxParams->searchForExternalRepcodes; - break; - default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); - } - return 0; -} - -/** ZSTD_CCtx_setParametersUsingCCtxParams() : - * just applies `params` into `cctx` - * no action is performed, parameters are merely stored. - * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. - * This is possible even if a compression is ongoing. - * In which case, new parameters will be applied on the fly, starting with next compression job. - */ -size_t ZSTD_CCtx_setParametersUsingCCtxParams( - ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) -{ - DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams"); - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "The context is in the wrong stage!"); - RETURN_ERROR_IF(cctx->cdict, stage_wrong, - "Can't override parameters with cdict attached (some must " - "be inherited from the cdict)."); - - cctx->requestedParams = *params; - return 0; -} - -size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams) -{ - ZSTD_STATIC_ASSERT(sizeof(cparams) == 7 * 4 /* all params are listed below */); - DEBUGLOG(4, "ZSTD_CCtx_setCParams"); - /* only update if all parameters are valid */ - FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), ""); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, cparams.windowLog), ""); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, cparams.chainLog), ""); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, cparams.hashLog), ""); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, cparams.searchLog), ""); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, cparams.minMatch), ""); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, cparams.targetLength), ""); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, cparams.strategy), ""); - return 0; -} - -size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams) -{ - ZSTD_STATIC_ASSERT(sizeof(fparams) == 3 * 4 /* all params are listed below */); - DEBUGLOG(4, "ZSTD_CCtx_setFParams"); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0), ""); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, fparams.checksumFlag != 0), ""); - FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0), ""); - return 0; -} - -size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params) -{ - DEBUGLOG(4, "ZSTD_CCtx_setParams"); - /* First check cParams, because we want to update all or none. */ - FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); - /* Next set fParams, because this could fail if the cctx isn't in init stage. */ - FORWARD_IF_ERROR(ZSTD_CCtx_setFParams(cctx, params.fParams), ""); - /* Finally set cParams, which should succeed. */ - FORWARD_IF_ERROR(ZSTD_CCtx_setCParams(cctx, params.cParams), ""); - return 0; -} - -size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize); - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Can't set pledgedSrcSize when not in init stage."); - cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; - return 0; -} - -static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams( - int const compressionLevel, - size_t const dictSize); -static int ZSTD_dedicatedDictSearch_isSupported( - const ZSTD_compressionParameters* cParams); -static void ZSTD_dedicatedDictSearch_revertCParams( - ZSTD_compressionParameters* cParams); - -/** - * Initializes the local dictionary using requested parameters. - * NOTE: Initialization does not employ the pledged src size, - * because the dictionary may be used for multiple compressions. - */ -static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) -{ - ZSTD_localDict* const dl = &cctx->localDict; - if (dl->dict == NULL) { - /* No local dictionary. */ - assert(dl->dictBuffer == NULL); - assert(dl->cdict == NULL); - assert(dl->dictSize == 0); - return 0; - } - if (dl->cdict != NULL) { - /* Local dictionary already initialized. */ - assert(cctx->cdict == dl->cdict); - return 0; - } - assert(dl->dictSize > 0); - assert(cctx->cdict == NULL); - assert(cctx->prefixDict.dict == NULL); - - dl->cdict = ZSTD_createCDict_advanced2( - dl->dict, - dl->dictSize, - ZSTD_dlm_byRef, - dl->dictContentType, - &cctx->requestedParams, - cctx->customMem); - RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed"); - cctx->cdict = dl->cdict; - return 0; -} - -size_t ZSTD_CCtx_loadDictionary_advanced( - ZSTD_CCtx* cctx, - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType) -{ - DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Can't load a dictionary when cctx is not in init stage."); - ZSTD_clearAllDicts(cctx); /* erase any previously set dictionary */ - if (dict == NULL || dictSize == 0) /* no dictionary */ - return 0; - if (dictLoadMethod == ZSTD_dlm_byRef) { - cctx->localDict.dict = dict; - } else { - /* copy dictionary content inside CCtx to own its lifetime */ - void* dictBuffer; - RETURN_ERROR_IF(cctx->staticSize, memory_allocation, - "static CCtx can't allocate for an internal copy of dictionary"); - dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); - RETURN_ERROR_IF(dictBuffer==NULL, memory_allocation, - "allocation failed for dictionary content"); - ZSTD_memcpy(dictBuffer, dict, dictSize); - cctx->localDict.dictBuffer = dictBuffer; /* owned ptr to free */ - cctx->localDict.dict = dictBuffer; /* read-only reference */ - } - cctx->localDict.dictSize = dictSize; - cctx->localDict.dictContentType = dictContentType; - return 0; -} - -size_t ZSTD_CCtx_loadDictionary_byReference( - ZSTD_CCtx* cctx, const void* dict, size_t dictSize) -{ - return ZSTD_CCtx_loadDictionary_advanced( - cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); -} - -size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) -{ - return ZSTD_CCtx_loadDictionary_advanced( - cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); -} - - -size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) -{ - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Can't ref a dict when ctx not in init stage."); - /* Free the existing local cdict (if any) to save memory. */ - ZSTD_clearAllDicts(cctx); - cctx->cdict = cdict; - return 0; -} - -size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool) -{ - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Can't ref a pool when ctx not in init stage."); - cctx->pool = pool; - return 0; -} - -size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) -{ - return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent); -} - -size_t ZSTD_CCtx_refPrefix_advanced( - ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) -{ - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Can't ref a prefix when ctx not in init stage."); - ZSTD_clearAllDicts(cctx); - if (prefix != NULL && prefixSize > 0) { - cctx->prefixDict.dict = prefix; - cctx->prefixDict.dictSize = prefixSize; - cctx->prefixDict.dictContentType = dictContentType; - } - return 0; -} - -/*! ZSTD_CCtx_reset() : - * Also dumps dictionary */ -size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) -{ - if ( (reset == ZSTD_reset_session_only) - || (reset == ZSTD_reset_session_and_parameters) ) { - cctx->streamStage = zcss_init; - cctx->pledgedSrcSizePlusOne = 0; - } - if ( (reset == ZSTD_reset_parameters) - || (reset == ZSTD_reset_session_and_parameters) ) { - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Reset parameters is only possible during init stage."); - ZSTD_clearAllDicts(cctx); - return ZSTD_CCtxParams_reset(&cctx->requestedParams); - } - return 0; -} - - -/** ZSTD_checkCParams() : - control CParam values remain within authorized range. - @return : 0, or an error code if one value is beyond authorized range */ -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) -{ - BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog); - BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog); - BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog); - BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); - BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); - BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); - BOUNDCHECK(ZSTD_c_strategy, cParams.strategy); - return 0; -} - -/** ZSTD_clampCParams() : - * make CParam values within valid range. - * @return : valid CParams */ -static ZSTD_compressionParameters -ZSTD_clampCParams(ZSTD_compressionParameters cParams) -{ -# define CLAMP_TYPE(cParam, val, type) \ - do { \ - ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ - if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \ - } while (0) -# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) - CLAMP(ZSTD_c_windowLog, cParams.windowLog); - CLAMP(ZSTD_c_chainLog, cParams.chainLog); - CLAMP(ZSTD_c_hashLog, cParams.hashLog); - CLAMP(ZSTD_c_searchLog, cParams.searchLog); - CLAMP(ZSTD_c_minMatch, cParams.minMatch); - CLAMP(ZSTD_c_targetLength,cParams.targetLength); - CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy); - return cParams; -} - -/** ZSTD_cycleLog() : - * condition for correct operation : hashLog > 1 */ -U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) -{ - U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); - return hashLog - btScale; -} - -/** ZSTD_dictAndWindowLog() : - * Returns an adjusted window log that is large enough to fit the source and the dictionary. - * The zstd format says that the entire dictionary is valid if one byte of the dictionary - * is within the window. So the hashLog and chainLog should be large enough to reference both - * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing - * the hashLog and windowLog. - * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN. - */ -static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize) -{ - const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX; - /* No dictionary ==> No change */ - if (dictSize == 0) { - return windowLog; - } - assert(windowLog <= ZSTD_WINDOWLOG_MAX); - assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */ - { - U64 const windowSize = 1ULL << windowLog; - U64 const dictAndWindowSize = dictSize + windowSize; - /* If the window size is already large enough to fit both the source and the dictionary - * then just use the window size. Otherwise adjust so that it fits the dictionary and - * the window. - */ - if (windowSize >= dictSize + srcSize) { - return windowLog; /* Window size large enough already */ - } else if (dictAndWindowSize >= maxWindowSize) { - return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */ - } else { - return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1; - } - } -} - -/** ZSTD_adjustCParams_internal() : - * optimize `cPar` for a specified input (`srcSize` and `dictSize`). - * mostly downsize to reduce memory consumption and initialization latency. - * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. - * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`. - * note : `srcSize==0` means 0! - * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ -static ZSTD_compressionParameters -ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, - unsigned long long srcSize, - size_t dictSize, - ZSTD_cParamMode_e mode, - ZSTD_paramSwitch_e useRowMatchFinder) -{ - const U64 minSrcSize = 513; /* (1<<9) + 1 */ - const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); - assert(ZSTD_checkCParams(cPar)==0); - - /* Cascade the selected strategy down to the next-highest one built into - * this binary. */ -#ifdef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR - if (cPar.strategy == ZSTD_btultra2) { - cPar.strategy = ZSTD_btultra; - } - if (cPar.strategy == ZSTD_btultra) { - cPar.strategy = ZSTD_btopt; - } -#endif -#ifdef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR - if (cPar.strategy == ZSTD_btopt) { - cPar.strategy = ZSTD_btlazy2; - } -#endif -#ifdef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR - if (cPar.strategy == ZSTD_btlazy2) { - cPar.strategy = ZSTD_lazy2; - } -#endif -#ifdef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR - if (cPar.strategy == ZSTD_lazy2) { - cPar.strategy = ZSTD_lazy; - } -#endif -#ifdef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR - if (cPar.strategy == ZSTD_lazy) { - cPar.strategy = ZSTD_greedy; - } -#endif -#ifdef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR - if (cPar.strategy == ZSTD_greedy) { - cPar.strategy = ZSTD_dfast; - } -#endif -#ifdef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR - if (cPar.strategy == ZSTD_dfast) { - cPar.strategy = ZSTD_fast; - cPar.targetLength = 0; - } -#endif - - switch (mode) { - case ZSTD_cpm_unknown: - case ZSTD_cpm_noAttachDict: - /* If we don't know the source size, don't make any - * assumptions about it. We will already have selected - * smaller parameters if a dictionary is in use. - */ - break; - case ZSTD_cpm_createCDict: - /* Assume a small source size when creating a dictionary - * with an unknown source size. - */ - if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN) - srcSize = minSrcSize; - break; - case ZSTD_cpm_attachDict: - /* Dictionary has its own dedicated parameters which have - * already been selected. We are selecting parameters - * for only the source. - */ - dictSize = 0; - break; - default: - assert(0); - break; - } - - /* resize windowLog if input is small enough, to use less memory */ - if ( (srcSize <= maxWindowResize) - && (dictSize <= maxWindowResize) ) { - U32 const tSize = (U32)(srcSize + dictSize); - static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; - U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : - ZSTD_highbit32(tSize-1) + 1; - if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; - } - if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) { - U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize); - U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); - if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1; - if (cycleLog > dictAndWindowLog) - cPar.chainLog -= (cycleLog - dictAndWindowLog); - } - - if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) - cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ - - /* We can't use more than 32 bits of hash in total, so that means that we require: - * (hashLog + 8) <= 32 && (chainLog + 8) <= 32 - */ - if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) { - U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS; - if (cPar.hashLog > maxShortCacheHashLog) { - cPar.hashLog = maxShortCacheHashLog; - } - if (cPar.chainLog > maxShortCacheHashLog) { - cPar.chainLog = maxShortCacheHashLog; - } - } - - - /* At this point, we aren't 100% sure if we are using the row match finder. - * Unless it is explicitly disabled, conservatively assume that it is enabled. - * In this case it will only be disabled for small sources, so shrinking the - * hash log a little bit shouldn't result in any ratio loss. - */ - if (useRowMatchFinder == ZSTD_ps_auto) - useRowMatchFinder = ZSTD_ps_enable; - - /* We can't hash more than 32-bits in total. So that means that we require: - * (hashLog - rowLog + 8) <= 32 - */ - if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) { - /* Switch to 32-entry rows if searchLog is 5 (or more) */ - U32 const rowLog = BOUNDED(4, cPar.searchLog, 6); - U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS; - U32 const maxHashLog = maxRowHashLog + rowLog; - assert(cPar.hashLog >= rowLog); - if (cPar.hashLog > maxHashLog) { - cPar.hashLog = maxHashLog; - } - } - - return cPar; -} - -ZSTD_compressionParameters -ZSTD_adjustCParams(ZSTD_compressionParameters cPar, - unsigned long long srcSize, - size_t dictSize) -{ - cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ - if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; - return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto); -} - -static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); -static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); - -static void ZSTD_overrideCParams( - ZSTD_compressionParameters* cParams, - const ZSTD_compressionParameters* overrides) -{ - if (overrides->windowLog) cParams->windowLog = overrides->windowLog; - if (overrides->hashLog) cParams->hashLog = overrides->hashLog; - if (overrides->chainLog) cParams->chainLog = overrides->chainLog; - if (overrides->searchLog) cParams->searchLog = overrides->searchLog; - if (overrides->minMatch) cParams->minMatch = overrides->minMatch; - if (overrides->targetLength) cParams->targetLength = overrides->targetLength; - if (overrides->strategy) cParams->strategy = overrides->strategy; -} - -ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( - const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) -{ - ZSTD_compressionParameters cParams; - if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { - srcSizeHint = CCtxParams->srcSizeHint; - } - cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); - if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; - ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); - assert(!ZSTD_checkCParams(cParams)); - /* srcSizeHint == 0 means 0 */ - return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder); -} - -static size_t -ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, - const ZSTD_paramSwitch_e useRowMatchFinder, - const U32 enableDedicatedDictSearch, - const U32 forCCtx) -{ - /* chain table size should be 0 for fast or row-hash strategies */ - size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx) - ? ((size_t)1 << cParams->chainLog) - : 0; - size_t const hSize = ((size_t)1) << cParams->hashLog; - U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; - size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; - /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't - * surrounded by redzones in ASAN. */ - size_t const tableSpace = chainSize * sizeof(U32) - + hSize * sizeof(U32) - + h3Size * sizeof(U32); - size_t const optPotentialSpace = - ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32)) - + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32)) - + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32)) - + ZSTD_cwksp_aligned_alloc_size((1<strategy, useRowMatchFinder) - ? ZSTD_cwksp_aligned_alloc_size(hSize) - : 0; - size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt)) - ? optPotentialSpace - : 0; - size_t const slackSpace = ZSTD_cwksp_slack_space_required(); - - /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */ - ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4); - assert(useRowMatchFinder != ZSTD_ps_auto); - - DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", - (U32)chainSize, (U32)hSize, (U32)h3Size); - return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; -} - -/* Helper function for calculating memory requirements. - * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ -static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) { - U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4; - return blockSize / divider; -} - -static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( - const ZSTD_compressionParameters* cParams, - const ldmParams_t* ldmParams, - const int isStatic, - const ZSTD_paramSwitch_e useRowMatchFinder, - const size_t buffInSize, - const size_t buffOutSize, - const U64 pledgedSrcSize, - int useSequenceProducer, - size_t maxBlockSize) -{ - size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); - size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize); - size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); - size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) - + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef)) - + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); - size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE); - size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); - size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1); - - size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); - size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); - size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ? - ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; - - - size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) - + ZSTD_cwksp_alloc_size(buffOutSize); - - size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; - - size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); - size_t const externalSeqSpace = useSequenceProducer - ? ZSTD_cwksp_aligned_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence)) - : 0; - - size_t const neededSpace = - cctxSpace + - entropySpace + - blockStateSpace + - ldmSpace + - ldmSeqSpace + - matchStateSize + - tokenSpace + - bufferSpace + - externalSeqSpace; - - DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); - return neededSpace; -} - -size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) -{ - ZSTD_compressionParameters const cParams = - ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, - &cParams); - - RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); - /* estimateCCtxSize is for one-shot compression. So no buffers should - * be needed. However, we still allocate two 0-sized buffers, which can - * take space under ASAN. */ - return ZSTD_estimateCCtxSize_usingCCtxParams_internal( - &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize); -} - -size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) -{ - ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); - if (ZSTD_rowMatchFinderSupported(cParams.strategy)) { - /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ - size_t noRowCCtxSize; - size_t rowCCtxSize; - initialParams.useRowMatchFinder = ZSTD_ps_disable; - noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); - initialParams.useRowMatchFinder = ZSTD_ps_enable; - rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); - return MAX(noRowCCtxSize, rowCCtxSize); - } else { - return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); - } -} - -static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel) -{ - int tier = 0; - size_t largestSize = 0; - static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN}; - for (; tier < 4; ++tier) { - /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */ - ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict); - largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize); - } - return largestSize; -} - -size_t ZSTD_estimateCCtxSize(int compressionLevel) -{ - int level; - size_t memBudget = 0; - for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { - /* Ensure monotonically increasing memory usage as compression level increases */ - size_t const newMB = ZSTD_estimateCCtxSize_internal(level); - if (newMB > memBudget) memBudget = newMB; - } - return memBudget; -} - -size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) -{ - RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); - { ZSTD_compressionParameters const cParams = - ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog); - size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered) - ? ((size_t)1 << cParams.windowLog) + blockSize - : 0; - size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered) - ? ZSTD_compressBound(blockSize) + 1 - : 0; - ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); - - return ZSTD_estimateCCtxSize_usingCCtxParams_internal( - &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, - ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize); - } -} - -size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) -{ - ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); - if (ZSTD_rowMatchFinderSupported(cParams.strategy)) { - /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ - size_t noRowCCtxSize; - size_t rowCCtxSize; - initialParams.useRowMatchFinder = ZSTD_ps_disable; - noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); - initialParams.useRowMatchFinder = ZSTD_ps_enable; - rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); - return MAX(noRowCCtxSize, rowCCtxSize); - } else { - return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); - } -} - -static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) -{ - ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - return ZSTD_estimateCStreamSize_usingCParams(cParams); -} - -size_t ZSTD_estimateCStreamSize(int compressionLevel) -{ - int level; - size_t memBudget = 0; - for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { - size_t const newMB = ZSTD_estimateCStreamSize_internal(level); - if (newMB > memBudget) memBudget = newMB; - } - return memBudget; -} - -/* ZSTD_getFrameProgression(): - * tells how much data has been consumed (input) and produced (output) for current frame. - * able to count progression inside worker threads (non-blocking mode). - */ -ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx) -{ -#ifdef ZSTD_MULTITHREAD - if (cctx->appliedParams.nbWorkers > 0) { - return ZSTDMT_getFrameProgression(cctx->mtctx); - } -#endif - { ZSTD_frameProgression fp; - size_t const buffered = (cctx->inBuff == NULL) ? 0 : - cctx->inBuffPos - cctx->inToCompress; - if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress); - assert(buffered <= ZSTD_BLOCKSIZE_MAX); - fp.ingested = cctx->consumedSrcSize + buffered; - fp.consumed = cctx->consumedSrcSize; - fp.produced = cctx->producedCSize; - fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */ - fp.currentJobID = 0; - fp.nbActiveWorkers = 0; - return fp; -} } - -/*! ZSTD_toFlushNow() - * Only useful for multithreading scenarios currently (nbWorkers >= 1). - */ -size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx) -{ -#ifdef ZSTD_MULTITHREAD - if (cctx->appliedParams.nbWorkers > 0) { - return ZSTDMT_toFlushNow(cctx->mtctx); - } -#endif - (void)cctx; - return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */ -} - -static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, - ZSTD_compressionParameters cParams2) -{ - (void)cParams1; - (void)cParams2; - assert(cParams1.windowLog == cParams2.windowLog); - assert(cParams1.chainLog == cParams2.chainLog); - assert(cParams1.hashLog == cParams2.hashLog); - assert(cParams1.searchLog == cParams2.searchLog); - assert(cParams1.minMatch == cParams2.minMatch); - assert(cParams1.targetLength == cParams2.targetLength); - assert(cParams1.strategy == cParams2.strategy); -} - -void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) -{ - int i; - for (i = 0; i < ZSTD_REP_NUM; ++i) - bs->rep[i] = repStartValue[i]; - bs->entropy.huf.repeatMode = HUF_repeat_none; - bs->entropy.fse.offcode_repeatMode = FSE_repeat_none; - bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none; - bs->entropy.fse.litlength_repeatMode = FSE_repeat_none; -} - -/*! ZSTD_invalidateMatchState() - * Invalidate all the matches in the match finder tables. - * Requires nextSrc and base to be set (can be NULL). - */ -static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) -{ - ZSTD_window_clear(&ms->window); - - ms->nextToUpdate = ms->window.dictLimit; - ms->loadedDictEnd = 0; - ms->opt.litLengthSum = 0; /* force reset of btopt stats */ - ms->dictMatchState = NULL; -} - -/** - * Controls, for this matchState reset, whether the tables need to be cleared / - * prepared for the coming compression (ZSTDcrp_makeClean), or whether the - * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a - * subsequent operation will overwrite the table space anyways (e.g., copying - * the matchState contents in from a CDict). - */ -typedef enum { - ZSTDcrp_makeClean, - ZSTDcrp_leaveDirty -} ZSTD_compResetPolicy_e; - -/** - * Controls, for this matchState reset, whether indexing can continue where it - * left off (ZSTDirp_continue), or whether it needs to be restarted from zero - * (ZSTDirp_reset). - */ -typedef enum { - ZSTDirp_continue, - ZSTDirp_reset -} ZSTD_indexResetPolicy_e; - -typedef enum { - ZSTD_resetTarget_CDict, - ZSTD_resetTarget_CCtx -} ZSTD_resetTarget_e; - -/* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */ -static U64 ZSTD_bitmix(U64 val, U64 len) { - val ^= ZSTD_rotateRight_U64(val, 49) ^ ZSTD_rotateRight_U64(val, 24); - val *= 0x9FB21C651E98DF25ULL; - val ^= (val >> 35) + len ; - val *= 0x9FB21C651E98DF25ULL; - return val ^ (val >> 28); -} - -/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ -static void ZSTD_advanceHashSalt(ZSTD_matchState_t* ms) { - ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4); -} - -static size_t -ZSTD_reset_matchState(ZSTD_matchState_t* ms, - ZSTD_cwksp* ws, - const ZSTD_compressionParameters* cParams, - const ZSTD_paramSwitch_e useRowMatchFinder, - const ZSTD_compResetPolicy_e crp, - const ZSTD_indexResetPolicy_e forceResetIndex, - const ZSTD_resetTarget_e forWho) -{ - /* disable chain table allocation for fast or row-based strategies */ - size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, - ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict)) - ? ((size_t)1 << cParams->chainLog) - : 0; - size_t const hSize = ((size_t)1) << cParams->hashLog; - U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; - size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; - - DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset); - assert(useRowMatchFinder != ZSTD_ps_auto); - if (forceResetIndex == ZSTDirp_reset) { - ZSTD_window_init(&ms->window); - ZSTD_cwksp_mark_tables_dirty(ws); - } - - ms->hashLog3 = hashLog3; - ms->lazySkipping = 0; - - ZSTD_invalidateMatchState(ms); - - assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */ - - ZSTD_cwksp_clear_tables(ws); - - DEBUGLOG(5, "reserving table space"); - /* table Space */ - ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32)); - ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32)); - ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32)); - RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, - "failed a workspace allocation in ZSTD_reset_matchState"); - - DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty); - if (crp!=ZSTDcrp_leaveDirty) { - /* reset tables only */ - ZSTD_cwksp_clean_tables(ws); - } - - if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) { - /* Row match finder needs an additional table of hashes ("tags") */ - size_t const tagTableSize = hSize; - /* We want to generate a new salt in case we reset a Cctx, but we always want to use - * 0 when we reset a Cdict */ - if(forWho == ZSTD_resetTarget_CCtx) { - ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize); - ZSTD_advanceHashSalt(ms); - } else { - /* When we are not salting we want to always memset the memory */ - ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned(ws, tagTableSize); - ZSTD_memset(ms->tagTable, 0, tagTableSize); - ms->hashSalt = 0; - } - { /* Switch to 32-entry rows if searchLog is 5 (or more) */ - U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); - assert(cParams->hashLog >= rowLog); - ms->rowHashLog = cParams->hashLog - rowLog; - } - } - - /* opt parser space */ - if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { - DEBUGLOG(4, "reserving optimal parser space"); - ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned)); - ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned)); - ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned)); - ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t)); - ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t)); - } - - ms->cParams = *cParams; - - RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, - "failed a workspace allocation in ZSTD_reset_matchState"); - return 0; -} - -/* ZSTD_indexTooCloseToMax() : - * minor optimization : prefer memset() rather than reduceIndex() - * which is measurably slow in some circumstances (reported for Visual Studio). - * Works when re-using a context for a lot of smallish inputs : - * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN, - * memset() will be triggered before reduceIndex(). - */ -#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB) -static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) -{ - return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN); -} - -/** ZSTD_dictTooBig(): - * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in - * one go generically. So we ensure that in that case we reset the tables to zero, - * so that we can load as much of the dictionary as possible. - */ -static int ZSTD_dictTooBig(size_t const loadedDictSize) -{ - return loadedDictSize > ZSTD_CHUNKSIZE_MAX; -} - -/*! ZSTD_resetCCtx_internal() : - * @param loadedDictSize The size of the dictionary to be loaded - * into the context, if any. If no dictionary is used, or the - * dictionary is being attached / copied, then pass 0. - * note : `params` are assumed fully validated at this stage. - */ -static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, - ZSTD_CCtx_params const* params, - U64 const pledgedSrcSize, - size_t const loadedDictSize, - ZSTD_compResetPolicy_e const crp, - ZSTD_buffered_policy_e const zbuff) -{ - ZSTD_cwksp* const ws = &zc->workspace; - DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d", - (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter); - assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); - - zc->isFirstBlock = 1; - - /* Set applied params early so we can modify them for LDM, - * and point params at the applied params. - */ - zc->appliedParams = *params; - params = &zc->appliedParams; - - assert(params->useRowMatchFinder != ZSTD_ps_auto); - assert(params->useBlockSplitter != ZSTD_ps_auto); - assert(params->ldmParams.enableLdm != ZSTD_ps_auto); - assert(params->maxBlockSize != 0); - if (params->ldmParams.enableLdm == ZSTD_ps_enable) { - /* Adjust long distance matching parameters */ - ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); - assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog); - assert(params->ldmParams.hashRateLog < 32); - } - - { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize)); - size_t const blockSize = MIN(params->maxBlockSize, windowSize); - size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, ZSTD_hasExtSeqProd(params)); - size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered) - ? ZSTD_compressBound(blockSize) + 1 - : 0; - size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered) - ? windowSize + blockSize - : 0; - size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize); - - int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window); - int const dictTooBig = ZSTD_dictTooBig(loadedDictSize); - ZSTD_indexResetPolicy_e needsIndexReset = - (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue; - - size_t const neededSpace = - ZSTD_estimateCCtxSize_usingCCtxParams_internal( - ¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder, - buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize); - - FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); - - if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0); - - { /* Check if workspace is large enough, alloc a new one if needed */ - int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace; - int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); - int resizeWorkspace = workspaceTooSmall || workspaceWasteful; - DEBUGLOG(4, "Need %zu B workspace", neededSpace); - DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); - - if (resizeWorkspace) { - DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB", - ZSTD_cwksp_sizeof(ws) >> 10, - neededSpace >> 10); - - RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize"); - - needsIndexReset = ZSTDirp_reset; - - ZSTD_cwksp_free(ws, zc->customMem); - FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), ""); - - DEBUGLOG(5, "reserving object space"); - /* Statically sized space. - * entropyWorkspace never moves, - * though prev/next block swap places */ - assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t))); - zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); - RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock"); - zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); - RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); - zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE); - RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); - } } - - ZSTD_cwksp_clear(ws); - - /* init params */ - zc->blockState.matchState.cParams = params->cParams; - zc->blockState.matchState.prefetchCDictTables = params->prefetchCDictTables == ZSTD_ps_enable; - zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; - zc->consumedSrcSize = 0; - zc->producedCSize = 0; - if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) - zc->appliedParams.fParams.contentSizeFlag = 0; - DEBUGLOG(4, "pledged content size : %u ; flag : %u", - (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); - zc->blockSize = blockSize; - - XXH64_reset(&zc->xxhState, 0); - zc->stage = ZSTDcs_init; - zc->dictID = 0; - zc->dictContentSize = 0; - - ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); - - FORWARD_IF_ERROR(ZSTD_reset_matchState( - &zc->blockState.matchState, - ws, - ¶ms->cParams, - params->useRowMatchFinder, - crp, - needsIndexReset, - ZSTD_resetTarget_CCtx), ""); - - zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); - - /* ldm hash table */ - if (params->ldmParams.enableLdm == ZSTD_ps_enable) { - /* TODO: avoid memset? */ - size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; - zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); - ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); - zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); - zc->maxNbLdmSequences = maxNbLdmSeq; - - ZSTD_window_init(&zc->ldmState.window); - zc->ldmState.loadedDictEnd = 0; - } - - /* reserve space for block-level external sequences */ - if (ZSTD_hasExtSeqProd(params)) { - size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); - zc->extSeqBufCapacity = maxNbExternalSeq; - zc->extSeqBuf = - (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence)); - } - - /* buffers */ - - /* ZSTD_wildcopy() is used to copy into the literals buffer, - * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. - */ - zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH); - zc->seqStore.maxNbLit = blockSize; - - zc->bufferedPolicy = zbuff; - zc->inBuffSize = buffInSize; - zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); - zc->outBuffSize = buffOutSize; - zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); - - /* ldm bucketOffsets table */ - if (params->ldmParams.enableLdm == ZSTD_ps_enable) { - /* TODO: avoid memset? */ - size_t const numBuckets = - ((size_t)1) << (params->ldmParams.hashLog - - params->ldmParams.bucketSizeLog); - zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets); - ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets); - } - - /* sequences storage */ - ZSTD_referenceExternalSequences(zc, NULL, 0); - zc->seqStore.maxNbSeq = maxNbSeq; - zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); - zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); - zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); - - DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); - assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace)); - - zc->initialized = 1; - - return 0; - } -} - -/* ZSTD_invalidateRepCodes() : - * ensures next compression will not use repcodes from previous block. - * Note : only works with regular variant; - * do not use with extDict variant ! */ -void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { - int i; - for (i=0; iblockState.prevCBlock->rep[i] = 0; - assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); -} - -/* These are the approximate sizes for each strategy past which copying the - * dictionary tables into the working context is faster than using them - * in-place. - */ -static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = { - 8 KB, /* unused */ - 8 KB, /* ZSTD_fast */ - 16 KB, /* ZSTD_dfast */ - 32 KB, /* ZSTD_greedy */ - 32 KB, /* ZSTD_lazy */ - 32 KB, /* ZSTD_lazy2 */ - 32 KB, /* ZSTD_btlazy2 */ - 32 KB, /* ZSTD_btopt */ - 8 KB, /* ZSTD_btultra */ - 8 KB /* ZSTD_btultra2 */ -}; - -static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict, - const ZSTD_CCtx_params* params, - U64 pledgedSrcSize) -{ - size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy]; - int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch; - return dedicatedDictSearch - || ( ( pledgedSrcSize <= cutoff - || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN - || params->attachDictPref == ZSTD_dictForceAttach ) - && params->attachDictPref != ZSTD_dictForceCopy - && !params->forceWindow ); /* dictMatchState isn't correctly - * handled in _enforceMaxDist */ -} - -static size_t -ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, - const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, - U64 pledgedSrcSize, - ZSTD_buffered_policy_e zbuff) -{ - DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu", - (unsigned long long)pledgedSrcSize); - { - ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams; - unsigned const windowLog = params.cParams.windowLog; - assert(windowLog != 0); - /* Resize working context table params for input only, since the dict - * has its own tables. */ - /* pledgedSrcSize == 0 means 0! */ - - if (cdict->matchState.dedicatedDictSearch) { - ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams); - } - - params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, - cdict->dictContentSize, ZSTD_cpm_attachDict, - params.useRowMatchFinder); - params.cParams.windowLog = windowLog; - params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */ - FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, - /* loadedDictSize */ 0, - ZSTDcrp_makeClean, zbuff), ""); - assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy); - } - - { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc - - cdict->matchState.window.base); - const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit; - if (cdictLen == 0) { - /* don't even attach dictionaries with no contents */ - DEBUGLOG(4, "skipping attaching empty dictionary"); - } else { - DEBUGLOG(4, "attaching dictionary into context"); - cctx->blockState.matchState.dictMatchState = &cdict->matchState; - - /* prep working match state so dict matches never have negative indices - * when they are translated to the working context's index space. */ - if (cctx->blockState.matchState.window.dictLimit < cdictEnd) { - cctx->blockState.matchState.window.nextSrc = - cctx->blockState.matchState.window.base + cdictEnd; - ZSTD_window_clear(&cctx->blockState.matchState.window); - } - /* loadedDictEnd is expressed within the referential of the active context */ - cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; - } } - - cctx->dictID = cdict->dictID; - cctx->dictContentSize = cdict->dictContentSize; - - /* copy block state */ - ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); - - return 0; -} - -static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize, - ZSTD_compressionParameters const* cParams) { - if (ZSTD_CDictIndicesAreTagged(cParams)){ - /* Remove tags from the CDict table if they are present. - * See docs on "short cache" in zstd_compress_internal.h for context. */ - size_t i; - for (i = 0; i < tableSize; i++) { - U32 const taggedIndex = src[i]; - U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS; - dst[i] = index; - } - } else { - ZSTD_memcpy(dst, src, tableSize * sizeof(U32)); - } -} - -static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, - const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, - U64 pledgedSrcSize, - ZSTD_buffered_policy_e zbuff) -{ - const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; - - assert(!cdict->matchState.dedicatedDictSearch); - DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu", - (unsigned long long)pledgedSrcSize); - - { unsigned const windowLog = params.cParams.windowLog; - assert(windowLog != 0); - /* Copy only compression parameters related to tables. */ - params.cParams = *cdict_cParams; - params.cParams.windowLog = windowLog; - params.useRowMatchFinder = cdict->useRowMatchFinder; - FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, - /* loadedDictSize */ 0, - ZSTDcrp_leaveDirty, zbuff), ""); - assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); - assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); - assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); - } - - ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); - assert(params.useRowMatchFinder != ZSTD_ps_auto); - - /* copy tables */ - { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */) - ? ((size_t)1 << cdict_cParams->chainLog) - : 0; - size_t const hSize = (size_t)1 << cdict_cParams->hashLog; - - ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable, - cdict->matchState.hashTable, - hSize, cdict_cParams); - - /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */ - if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) { - ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable, - cdict->matchState.chainTable, - chainSize, cdict_cParams); - } - /* copy tag table */ - if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) { - size_t const tagTableSize = hSize; - ZSTD_memcpy(cctx->blockState.matchState.tagTable, - cdict->matchState.tagTable, - tagTableSize); - cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt; - } - } - - /* Zero the hashTable3, since the cdict never fills it */ - { int const h3log = cctx->blockState.matchState.hashLog3; - size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; - assert(cdict->matchState.hashLog3 == 0); - ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); - } - - ZSTD_cwksp_mark_tables_clean(&cctx->workspace); - - /* copy dictionary offsets */ - { ZSTD_matchState_t const* srcMatchState = &cdict->matchState; - ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; - dstMatchState->window = srcMatchState->window; - dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; - dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; - } - - cctx->dictID = cdict->dictID; - cctx->dictContentSize = cdict->dictContentSize; - - /* copy block state */ - ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); - - return 0; -} - -/* We have a choice between copying the dictionary context into the working - * context, or referencing the dictionary context from the working context - * in-place. We decide here which strategy to use. */ -static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, - const ZSTD_CDict* cdict, - const ZSTD_CCtx_params* params, - U64 pledgedSrcSize, - ZSTD_buffered_policy_e zbuff) -{ - - DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", - (unsigned)pledgedSrcSize); - - if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) { - return ZSTD_resetCCtx_byAttachingCDict( - cctx, cdict, *params, pledgedSrcSize, zbuff); - } else { - return ZSTD_resetCCtx_byCopyingCDict( - cctx, cdict, *params, pledgedSrcSize, zbuff); - } -} - -/*! ZSTD_copyCCtx_internal() : - * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. - * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). - * The "context", in this case, refers to the hash and chain tables, - * entropy tables, and dictionary references. - * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. - * @return : 0, or an error code */ -static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, - const ZSTD_CCtx* srcCCtx, - ZSTD_frameParameters fParams, - U64 pledgedSrcSize, - ZSTD_buffered_policy_e zbuff) -{ - RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong, - "Can't copy a ctx that's not in init stage."); - DEBUGLOG(5, "ZSTD_copyCCtx_internal"); - ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); - { ZSTD_CCtx_params params = dstCCtx->requestedParams; - /* Copy only compression parameters related to tables. */ - params.cParams = srcCCtx->appliedParams.cParams; - assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto); - assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto); - assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto); - params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; - params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; - params.ldmParams = srcCCtx->appliedParams.ldmParams; - params.fParams = fParams; - params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; - ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize, - /* loadedDictSize */ 0, - ZSTDcrp_leaveDirty, zbuff); - assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); - assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); - assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); - assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); - assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); - } - - ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); - - /* copy tables */ - { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy, - srcCCtx->appliedParams.useRowMatchFinder, - 0 /* forDDSDict */) - ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog) - : 0; - size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; - int const h3log = srcCCtx->blockState.matchState.hashLog3; - size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; - - ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable, - srcCCtx->blockState.matchState.hashTable, - hSize * sizeof(U32)); - ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable, - srcCCtx->blockState.matchState.chainTable, - chainSize * sizeof(U32)); - ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3, - srcCCtx->blockState.matchState.hashTable3, - h3Size * sizeof(U32)); - } - - ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); - - /* copy dictionary offsets */ - { - const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; - ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; - dstMatchState->window = srcMatchState->window; - dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; - dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; - } - dstCCtx->dictID = srcCCtx->dictID; - dstCCtx->dictContentSize = srcCCtx->dictContentSize; - - /* copy block state */ - ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); - - return 0; -} - -/*! ZSTD_copyCCtx() : - * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. - * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). - * pledgedSrcSize==0 means "unknown". -* @return : 0, or an error code */ -size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) -{ - ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; - ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy; - ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); - if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; - fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN); - - return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, - fParams, pledgedSrcSize, - zbuff); -} - - -#define ZSTD_ROWSIZE 16 -/*! ZSTD_reduceTable() : - * reduce table indexes by `reducerValue`, or squash to zero. - * PreserveMark preserves "unsorted mark" for btlazy2 strategy. - * It must be set to a clear 0/1 value, to remove branch during inlining. - * Presume table size is a multiple of ZSTD_ROWSIZE - * to help auto-vectorization */ -FORCE_INLINE_TEMPLATE void -ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark) -{ - int const nbRows = (int)size / ZSTD_ROWSIZE; - int cellNb = 0; - int rowNb; - /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ - U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX; - assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ - assert(size < (1U<<31)); /* can be cast to int */ - -#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) - /* To validate that the table reuse logic is sound, and that we don't - * access table space that we haven't cleaned, we re-"poison" the table - * space every time we mark it dirty. - * - * This function however is intended to operate on those dirty tables and - * re-clean them. So when this function is used correctly, we can unpoison - * the memory it operated on. This introduces a blind spot though, since - * if we now try to operate on __actually__ poisoned memory, we will not - * detect that. */ - __msan_unpoison(table, size * sizeof(U32)); -#endif - - for (rowNb=0 ; rowNb < nbRows ; rowNb++) { - int column; - for (column=0; columncParams.hashLog; - ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); - } - - if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) { - U32 const chainSize = (U32)1 << params->cParams.chainLog; - if (params->cParams.strategy == ZSTD_btlazy2) - ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); - else - ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); - } - - if (ms->hashLog3) { - U32 const h3Size = (U32)1 << ms->hashLog3; - ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); - } -} - - -/*-******************************************************* -* Block entropic compression -*********************************************************/ - -/* See doc/zstd_compression_format.md for detailed format description */ - -int ZSTD_seqToCodes(const seqStore_t* seqStorePtr) -{ - const seqDef* const sequences = seqStorePtr->sequencesStart; - BYTE* const llCodeTable = seqStorePtr->llCode; - BYTE* const ofCodeTable = seqStorePtr->ofCode; - BYTE* const mlCodeTable = seqStorePtr->mlCode; - U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - U32 u; - int longOffsets = 0; - assert(nbSeq <= seqStorePtr->maxNbSeq); - for (u=0; u= STREAM_ACCUMULATOR_MIN)); - if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN) - longOffsets = 1; - } - if (seqStorePtr->longLengthType==ZSTD_llt_literalLength) - llCodeTable[seqStorePtr->longLengthPos] = MaxLL; - if (seqStorePtr->longLengthType==ZSTD_llt_matchLength) - mlCodeTable[seqStorePtr->longLengthPos] = MaxML; - return longOffsets; -} - -/* ZSTD_useTargetCBlockSize(): - * Returns if target compressed block size param is being used. - * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. - * Returns 1 if true, 0 otherwise. */ -static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) -{ - DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize); - return (cctxParams->targetCBlockSize != 0); -} - -/* ZSTD_blockSplitterEnabled(): - * Returns if block splitting param is being used - * If used, compression will do best effort to split a block in order to improve compression ratio. - * At the time this function is called, the parameter must be finalized. - * Returns 1 if true, 0 otherwise. */ -static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams) -{ - DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter); - assert(cctxParams->useBlockSplitter != ZSTD_ps_auto); - return (cctxParams->useBlockSplitter == ZSTD_ps_enable); -} - -/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types - * and size of the sequences statistics - */ -typedef struct { - U32 LLtype; - U32 Offtype; - U32 MLtype; - size_t size; - size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ - int longOffsets; -} ZSTD_symbolEncodingTypeStats_t; - -/* ZSTD_buildSequencesStatistics(): - * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field. - * Modifies `nextEntropy` to have the appropriate values as a side effect. - * nbSeq must be greater than 0. - * - * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) - */ -static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildSequencesStatistics( - const seqStore_t* seqStorePtr, size_t nbSeq, - const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, - BYTE* dst, const BYTE* const dstEnd, - ZSTD_strategy strategy, unsigned* countWorkspace, - void* entropyWorkspace, size_t entropyWkspSize) -{ - BYTE* const ostart = dst; - const BYTE* const oend = dstEnd; - BYTE* op = ostart; - FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable; - FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable; - FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable; - const BYTE* const ofCodeTable = seqStorePtr->ofCode; - const BYTE* const llCodeTable = seqStorePtr->llCode; - const BYTE* const mlCodeTable = seqStorePtr->mlCode; - ZSTD_symbolEncodingTypeStats_t stats; - - stats.lastCountSize = 0; - /* convert length/distances into codes */ - stats.longOffsets = ZSTD_seqToCodes(seqStorePtr); - assert(op <= oend); - assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */ - /* build CTable for Literal Lengths */ - { unsigned max = MaxLL; - size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ - DEBUGLOG(5, "Building LL table"); - nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; - stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, - countWorkspace, max, mostFrequent, nbSeq, - LLFSELog, prevEntropy->litlengthCTable, - LL_defaultNorm, LL_defaultNormLog, - ZSTD_defaultAllowed, strategy); - assert(set_basic < set_compressed && set_rle < set_compressed); - assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ - { size_t const countSize = ZSTD_buildCTable( - op, (size_t)(oend - op), - CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype, - countWorkspace, max, llCodeTable, nbSeq, - LL_defaultNorm, LL_defaultNormLog, MaxLL, - prevEntropy->litlengthCTable, - sizeof(prevEntropy->litlengthCTable), - entropyWorkspace, entropyWkspSize); - if (ZSTD_isError(countSize)) { - DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed"); - stats.size = countSize; - return stats; - } - if (stats.LLtype == set_compressed) - stats.lastCountSize = countSize; - op += countSize; - assert(op <= oend); - } } - /* build CTable for Offsets */ - { unsigned max = MaxOff; - size_t const mostFrequent = HIST_countFast_wksp( - countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ - /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ - ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; - DEBUGLOG(5, "Building OF table"); - nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; - stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, - countWorkspace, max, mostFrequent, nbSeq, - OffFSELog, prevEntropy->offcodeCTable, - OF_defaultNorm, OF_defaultNormLog, - defaultPolicy, strategy); - assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ - { size_t const countSize = ZSTD_buildCTable( - op, (size_t)(oend - op), - CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype, - countWorkspace, max, ofCodeTable, nbSeq, - OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, - prevEntropy->offcodeCTable, - sizeof(prevEntropy->offcodeCTable), - entropyWorkspace, entropyWkspSize); - if (ZSTD_isError(countSize)) { - DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed"); - stats.size = countSize; - return stats; - } - if (stats.Offtype == set_compressed) - stats.lastCountSize = countSize; - op += countSize; - assert(op <= oend); - } } - /* build CTable for MatchLengths */ - { unsigned max = MaxML; - size_t const mostFrequent = HIST_countFast_wksp( - countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ - DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); - nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; - stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, - countWorkspace, max, mostFrequent, nbSeq, - MLFSELog, prevEntropy->matchlengthCTable, - ML_defaultNorm, ML_defaultNormLog, - ZSTD_defaultAllowed, strategy); - assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ - { size_t const countSize = ZSTD_buildCTable( - op, (size_t)(oend - op), - CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype, - countWorkspace, max, mlCodeTable, nbSeq, - ML_defaultNorm, ML_defaultNormLog, MaxML, - prevEntropy->matchlengthCTable, - sizeof(prevEntropy->matchlengthCTable), - entropyWorkspace, entropyWkspSize); - if (ZSTD_isError(countSize)) { - DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed"); - stats.size = countSize; - return stats; - } - if (stats.MLtype == set_compressed) - stats.lastCountSize = countSize; - op += countSize; - assert(op <= oend); - } } - stats.size = (size_t)(op-ostart); - return stats; -} - -/* ZSTD_entropyCompressSeqStore_internal(): - * compresses both literals and sequences - * Returns compressed size of block, or a zstd error. - */ -#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 -MEM_STATIC size_t -ZSTD_entropyCompressSeqStore_internal( - const seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - void* entropyWorkspace, size_t entropyWkspSize, - const int bmi2) -{ - ZSTD_strategy const strategy = cctxParams->cParams.strategy; - unsigned* count = (unsigned*)entropyWorkspace; - FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; - FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; - FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; - const seqDef* const sequences = seqStorePtr->sequencesStart; - const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - const BYTE* const ofCodeTable = seqStorePtr->ofCode; - const BYTE* const llCodeTable = seqStorePtr->llCode; - const BYTE* const mlCodeTable = seqStorePtr->mlCode; - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + dstCapacity; - BYTE* op = ostart; - size_t lastCountSize; - int longOffsets = 0; - - entropyWorkspace = count + (MaxSeq + 1); - entropyWkspSize -= (MaxSeq + 1) * sizeof(*count); - - DEBUGLOG(5, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu, dstCapacity=%zu)", nbSeq, dstCapacity); - ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= HUF_WORKSPACE_SIZE); - - /* Compress literals */ - { const BYTE* const literals = seqStorePtr->litStart; - size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - size_t const numLiterals = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); - /* Base suspicion of uncompressibility on ratio of literals to sequences */ - unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); - size_t const litSize = (size_t)(seqStorePtr->lit - literals); - - size_t const cSize = ZSTD_compressLiterals( - op, dstCapacity, - literals, litSize, - entropyWorkspace, entropyWkspSize, - &prevEntropy->huf, &nextEntropy->huf, - cctxParams->cParams.strategy, - ZSTD_literalsCompressionIsDisabled(cctxParams), - suspectUncompressible, bmi2); - FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); - assert(cSize <= dstCapacity); - op += cSize; - } - - /* Sequences Header */ - RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, - dstSize_tooSmall, "Can't fit seq hdr in output buf!"); - if (nbSeq < 128) { - *op++ = (BYTE)nbSeq; - } else if (nbSeq < LONGNBSEQ) { - op[0] = (BYTE)((nbSeq>>8) + 0x80); - op[1] = (BYTE)nbSeq; - op+=2; - } else { - op[0]=0xFF; - MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)); - op+=3; - } - assert(op <= oend); - if (nbSeq==0) { - /* Copy the old tables over as if we repeated them */ - ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); - return (size_t)(op - ostart); - } - { BYTE* const seqHead = op++; - /* build stats for sequences */ - const ZSTD_symbolEncodingTypeStats_t stats = - ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, - &prevEntropy->fse, &nextEntropy->fse, - op, oend, - strategy, count, - entropyWorkspace, entropyWkspSize); - FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); - *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2)); - lastCountSize = stats.lastCountSize; - op += stats.size; - longOffsets = stats.longOffsets; - } - - { size_t const bitstreamSize = ZSTD_encodeSequences( - op, (size_t)(oend - op), - CTable_MatchLength, mlCodeTable, - CTable_OffsetBits, ofCodeTable, - CTable_LitLength, llCodeTable, - sequences, nbSeq, - longOffsets, bmi2); - FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); - op += bitstreamSize; - assert(op <= oend); - /* zstd versions <= 1.3.4 mistakenly report corruption when - * FSE_readNCount() receives a buffer < 4 bytes. - * Fixed by https://github.com/facebook/zstd/pull/1146. - * This can happen when the last set_compressed table present is 2 - * bytes and the bitstream is only one byte. - * In this exceedingly rare case, we will simply emit an uncompressed - * block, since it isn't worth optimizing. - */ - if (lastCountSize && (lastCountSize + bitstreamSize) < 4) { - /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ - assert(lastCountSize + bitstreamSize == 3); - DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " - "emitting an uncompressed block."); - return 0; - } - } - - DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart)); - return (size_t)(op - ostart); -} - -MEM_STATIC size_t -ZSTD_entropyCompressSeqStore( - const seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - size_t srcSize, - void* entropyWorkspace, size_t entropyWkspSize, - int bmi2) -{ - size_t const cSize = ZSTD_entropyCompressSeqStore_internal( - seqStorePtr, prevEntropy, nextEntropy, cctxParams, - dst, dstCapacity, - entropyWorkspace, entropyWkspSize, bmi2); - if (cSize == 0) return 0; - /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. - * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. - */ - if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) { - DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity); - return 0; /* block not compressed */ - } - FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed"); - - /* Check compressibility */ - { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); - if (cSize >= maxCSize) return 0; /* block not compressed */ - } - DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); - /* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly. - * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above. - */ - assert(cSize < ZSTD_BLOCKSIZE_MAX); - return cSize; -} - -/* ZSTD_selectBlockCompressor() : - * Not static, but internal use only (used by long distance matcher) - * assumption : strat is a valid strategy */ -ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) -{ - static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { - { ZSTD_compressBlock_fast /* default for 0 */, - ZSTD_compressBlock_fast, - ZSTD_COMPRESSBLOCK_DOUBLEFAST, - ZSTD_COMPRESSBLOCK_GREEDY, - ZSTD_COMPRESSBLOCK_LAZY, - ZSTD_COMPRESSBLOCK_LAZY2, - ZSTD_COMPRESSBLOCK_BTLAZY2, - ZSTD_COMPRESSBLOCK_BTOPT, - ZSTD_COMPRESSBLOCK_BTULTRA, - ZSTD_COMPRESSBLOCK_BTULTRA2 - }, - { ZSTD_compressBlock_fast_extDict /* default for 0 */, - ZSTD_compressBlock_fast_extDict, - ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT, - ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT, - ZSTD_COMPRESSBLOCK_LAZY_EXTDICT, - ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT, - ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT, - ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT, - ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT, - ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT - }, - { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */, - ZSTD_compressBlock_fast_dictMatchState, - ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE, - ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE, - ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE, - ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE, - ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE, - ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE, - ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE, - ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE - }, - { NULL /* default for 0 */, - NULL, - NULL, - ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH, - ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH, - ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH, - NULL, - NULL, - NULL, - NULL } - }; - ZSTD_blockCompressor selectedCompressor; - ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); - - assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); - DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); - if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) { - static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = { - { - ZSTD_COMPRESSBLOCK_GREEDY_ROW, - ZSTD_COMPRESSBLOCK_LAZY_ROW, - ZSTD_COMPRESSBLOCK_LAZY2_ROW - }, - { - ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW, - ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW, - ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW - }, - { - ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW, - ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW, - ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW - }, - { - ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW, - ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW, - ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW - } - }; - DEBUGLOG(4, "Selecting a row-based matchfinder"); - assert(useRowMatchFinder != ZSTD_ps_auto); - selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy]; - } else { - selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; - } - assert(selectedCompressor != NULL); - return selectedCompressor; -} - -static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, - const BYTE* anchor, size_t lastLLSize) -{ - ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; -} - -void ZSTD_resetSeqStore(seqStore_t* ssPtr) -{ - ssPtr->lit = ssPtr->litStart; - ssPtr->sequences = ssPtr->sequencesStart; - ssPtr->longLengthType = ZSTD_llt_none; -} - -/* ZSTD_postProcessSequenceProducerResult() : - * Validates and post-processes sequences obtained through the external matchfinder API: - * - Checks whether nbExternalSeqs represents an error condition. - * - Appends a block delimiter to outSeqs if one is not already present. - * See zstd.h for context regarding block delimiters. - * Returns the number of sequences after post-processing, or an error code. */ -static size_t ZSTD_postProcessSequenceProducerResult( - ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize -) { - RETURN_ERROR_IF( - nbExternalSeqs > outSeqsCapacity, - sequenceProducer_failed, - "External sequence producer returned error code %lu", - (unsigned long)nbExternalSeqs - ); - - RETURN_ERROR_IF( - nbExternalSeqs == 0 && srcSize > 0, - sequenceProducer_failed, - "Got zero sequences from external sequence producer for a non-empty src buffer!" - ); - - if (srcSize == 0) { - ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence)); - return 1; - } - - { - ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1]; - - /* We can return early if lastSeq is already a block delimiter. */ - if (lastSeq.offset == 0 && lastSeq.matchLength == 0) { - return nbExternalSeqs; - } - - /* This error condition is only possible if the external matchfinder - * produced an invalid parse, by definition of ZSTD_sequenceBound(). */ - RETURN_ERROR_IF( - nbExternalSeqs == outSeqsCapacity, - sequenceProducer_failed, - "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!" - ); - - /* lastSeq is not a block delimiter, so we need to append one. */ - ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence)); - return nbExternalSeqs + 1; - } -} - -/* ZSTD_fastSequenceLengthSum() : - * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. - * Similar to another function in zstd_compress.c (determine_blockSize), - * except it doesn't check for a block delimiter to end summation. - * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). - * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ -static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) { - size_t matchLenSum, litLenSum, i; - matchLenSum = 0; - litLenSum = 0; - for (i = 0; i < seqBufSize; i++) { - litLenSum += seqBuf[i].litLength; - matchLenSum += seqBuf[i].matchLength; - } - return litLenSum + matchLenSum; -} - -typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; - -static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) -{ - ZSTD_matchState_t* const ms = &zc->blockState.matchState; - DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); - assert(srcSize <= ZSTD_BLOCKSIZE_MAX); - /* Assert that we have correctly flushed the ctx params into the ms's copy */ - ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); - /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding - * additional 1. We need to revisit and change this logic to be more consistent */ - if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { - if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) { - ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); - } else { - ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); - } - return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */ - } - ZSTD_resetSeqStore(&(zc->seqStore)); - /* required for optimal parser to read stats from dictionary */ - ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; - /* tell the optimal parser how we expect to compress literals */ - ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; - /* a gap between an attached dict and the current window is not safe, - * they must remain adjacent, - * and when that stops being the case, the dict must be unset */ - assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit); - - /* limited update after a very long match */ - { const BYTE* const base = ms->window.base; - const BYTE* const istart = (const BYTE*)src; - const U32 curr = (U32)(istart-base); - if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */ - if (curr > ms->nextToUpdate + 384) - ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384)); - } - - /* select and store sequences */ - { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms); - size_t lastLLSize; - { int i; - for (i = 0; i < ZSTD_REP_NUM; ++i) - zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; - } - if (zc->externSeqStore.pos < zc->externSeqStore.size) { - assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); - - /* External matchfinder + LDM is technically possible, just not implemented yet. - * We need to revisit soon and implement it. */ - RETURN_ERROR_IF( - ZSTD_hasExtSeqProd(&zc->appliedParams), - parameter_combination_unsupported, - "Long-distance matching with external sequence producer enabled is not currently supported." - ); - - /* Updates ldmSeqStore.pos */ - lastLLSize = - ZSTD_ldm_blockCompress(&zc->externSeqStore, - ms, &zc->seqStore, - zc->blockState.nextCBlock->rep, - zc->appliedParams.useRowMatchFinder, - src, srcSize); - assert(zc->externSeqStore.pos <= zc->externSeqStore.size); - } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { - rawSeqStore_t ldmSeqStore = kNullRawSeqStore; - - /* External matchfinder + LDM is technically possible, just not implemented yet. - * We need to revisit soon and implement it. */ - RETURN_ERROR_IF( - ZSTD_hasExtSeqProd(&zc->appliedParams), - parameter_combination_unsupported, - "Long-distance matching with external sequence producer enabled is not currently supported." - ); - - ldmSeqStore.seq = zc->ldmSequences; - ldmSeqStore.capacity = zc->maxNbLdmSequences; - /* Updates ldmSeqStore.size */ - FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, - &zc->appliedParams.ldmParams, - src, srcSize), ""); - /* Updates ldmSeqStore.pos */ - lastLLSize = - ZSTD_ldm_blockCompress(&ldmSeqStore, - ms, &zc->seqStore, - zc->blockState.nextCBlock->rep, - zc->appliedParams.useRowMatchFinder, - src, srcSize); - assert(ldmSeqStore.pos == ldmSeqStore.size); - } else if (ZSTD_hasExtSeqProd(&zc->appliedParams)) { - assert( - zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize) - ); - assert(zc->appliedParams.extSeqProdFunc != NULL); - - { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog; - - size_t const nbExternalSeqs = (zc->appliedParams.extSeqProdFunc)( - zc->appliedParams.extSeqProdState, - zc->extSeqBuf, - zc->extSeqBufCapacity, - src, srcSize, - NULL, 0, /* dict and dictSize, currently not supported */ - zc->appliedParams.compressionLevel, - windowSize - ); - - size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( - zc->extSeqBuf, - nbExternalSeqs, - zc->extSeqBufCapacity, - srcSize - ); - - /* Return early if there is no error, since we don't need to worry about last literals */ - if (!ZSTD_isError(nbPostProcessedSeqs)) { - ZSTD_sequencePosition seqPos = {0,0,0}; - size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs); - RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!"); - FORWARD_IF_ERROR( - ZSTD_copySequencesToSeqStoreExplicitBlockDelim( - zc, &seqPos, - zc->extSeqBuf, nbPostProcessedSeqs, - src, srcSize, - zc->appliedParams.searchForExternalRepcodes - ), - "Failed to copy external sequences to seqStore!" - ); - ms->ldmSeqStore = NULL; - DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs); - return ZSTDbss_compress; - } - - /* Propagate the error if fallback is disabled */ - if (!zc->appliedParams.enableMatchFinderFallback) { - return nbPostProcessedSeqs; - } - - /* Fallback to software matchfinder */ - { ZSTD_blockCompressor const blockCompressor = - ZSTD_selectBlockCompressor( - zc->appliedParams.cParams.strategy, - zc->appliedParams.useRowMatchFinder, - dictMode); - ms->ldmSeqStore = NULL; - DEBUGLOG( - 5, - "External sequence producer returned error code %lu. Falling back to internal parser.", - (unsigned long)nbExternalSeqs - ); - lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); - } } - } else { /* not long range mode and no external matchfinder */ - ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor( - zc->appliedParams.cParams.strategy, - zc->appliedParams.useRowMatchFinder, - dictMode); - ms->ldmSeqStore = NULL; - lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); - } - { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; - ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); - } } - return ZSTDbss_compress; -} - -static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const seqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM]) -{ - const seqDef* inSeqs = seqStore->sequencesStart; - const size_t nbInSequences = seqStore->sequences - inSeqs; - const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart); - - ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex; - const size_t nbOutSequences = nbInSequences + 1; - size_t nbOutLiterals = 0; - repcodes_t repcodes; - size_t i; - - /* Bounds check that we have enough space for every input sequence - * and the block delimiter - */ - assert(seqCollector->seqIndex <= seqCollector->maxSequences); - RETURN_ERROR_IF( - nbOutSequences > (size_t)(seqCollector->maxSequences - seqCollector->seqIndex), - dstSize_tooSmall, - "Not enough space to copy sequences"); - - ZSTD_memcpy(&repcodes, prevRepcodes, sizeof(repcodes)); - for (i = 0; i < nbInSequences; ++i) { - U32 rawOffset; - outSeqs[i].litLength = inSeqs[i].litLength; - outSeqs[i].matchLength = inSeqs[i].mlBase + MINMATCH; - outSeqs[i].rep = 0; - - /* Handle the possible single length >= 64K - * There can only be one because we add MINMATCH to every match length, - * and blocks are at most 128K. - */ - if (i == seqStore->longLengthPos) { - if (seqStore->longLengthType == ZSTD_llt_literalLength) { - outSeqs[i].litLength += 0x10000; - } else if (seqStore->longLengthType == ZSTD_llt_matchLength) { - outSeqs[i].matchLength += 0x10000; - } - } - - /* Determine the raw offset given the offBase, which may be a repcode. */ - if (OFFBASE_IS_REPCODE(inSeqs[i].offBase)) { - const U32 repcode = OFFBASE_TO_REPCODE(inSeqs[i].offBase); - assert(repcode > 0); - outSeqs[i].rep = repcode; - if (outSeqs[i].litLength != 0) { - rawOffset = repcodes.rep[repcode - 1]; - } else { - if (repcode == 3) { - assert(repcodes.rep[0] > 1); - rawOffset = repcodes.rep[0] - 1; - } else { - rawOffset = repcodes.rep[repcode]; - } - } - } else { - rawOffset = OFFBASE_TO_OFFSET(inSeqs[i].offBase); - } - outSeqs[i].offset = rawOffset; - - /* Update repcode history for the sequence */ - ZSTD_updateRep(repcodes.rep, - inSeqs[i].offBase, - inSeqs[i].litLength == 0); - - nbOutLiterals += outSeqs[i].litLength; - } - /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0. - * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker - * for the block boundary, according to the API. - */ - assert(nbInLiterals >= nbOutLiterals); - { - const size_t lastLLSize = nbInLiterals - nbOutLiterals; - outSeqs[nbInSequences].litLength = (U32)lastLLSize; - outSeqs[nbInSequences].matchLength = 0; - outSeqs[nbInSequences].offset = 0; - assert(nbOutSequences == nbInSequences + 1); - } - seqCollector->seqIndex += nbOutSequences; - assert(seqCollector->seqIndex <= seqCollector->maxSequences); - - return 0; -} - -size_t ZSTD_sequenceBound(size_t srcSize) { - const size_t maxNbSeq = (srcSize / ZSTD_MINMATCH_MIN) + 1; - const size_t maxNbDelims = (srcSize / ZSTD_BLOCKSIZE_MAX_MIN) + 1; - return maxNbSeq + maxNbDelims; -} - -size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, - size_t outSeqsSize, const void* src, size_t srcSize) -{ - const size_t dstCapacity = ZSTD_compressBound(srcSize); - void* dst; /* Make C90 happy. */ - SeqCollector seqCollector; - { - int targetCBlockSize; - FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_targetCBlockSize, &targetCBlockSize), ""); - RETURN_ERROR_IF(targetCBlockSize != 0, parameter_unsupported, "targetCBlockSize != 0"); - } - { - int nbWorkers; - FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers), ""); - RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0"); - } - - dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); - RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!"); - - seqCollector.collectSequences = 1; - seqCollector.seqStart = outSeqs; - seqCollector.seqIndex = 0; - seqCollector.maxSequences = outSeqsSize; - zc->seqCollector = seqCollector; - - { - const size_t ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); - ZSTD_customFree(dst, ZSTD_defaultCMem); - FORWARD_IF_ERROR(ret, "ZSTD_compress2 failed"); - } - assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize)); - return zc->seqCollector.seqIndex; -} - -size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) { - size_t in = 0; - size_t out = 0; - for (; in < seqsSize; ++in) { - if (sequences[in].offset == 0 && sequences[in].matchLength == 0) { - if (in != seqsSize - 1) { - sequences[in+1].litLength += sequences[in].litLength; - } - } else { - sequences[out] = sequences[in]; - ++out; - } - } - return out; -} - -/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */ -static int ZSTD_isRLE(const BYTE* src, size_t length) { - const BYTE* ip = src; - const BYTE value = ip[0]; - const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL); - const size_t unrollSize = sizeof(size_t) * 4; - const size_t unrollMask = unrollSize - 1; - const size_t prefixLength = length & unrollMask; - size_t i; - if (length == 1) return 1; - /* Check if prefix is RLE first before using unrolled loop */ - if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) { - return 0; - } - for (i = prefixLength; i != length; i += unrollSize) { - size_t u; - for (u = 0; u < unrollSize; u += sizeof(size_t)) { - if (MEM_readST(ip + i + u) != valueST) { - return 0; - } } } - return 1; -} - -/* Returns true if the given block may be RLE. - * This is just a heuristic based on the compressibility. - * It may return both false positives and false negatives. - */ -static int ZSTD_maybeRLE(seqStore_t const* seqStore) -{ - size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); - size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); - - return nbSeqs < 4 && nbLits < 10; -} - -static void -ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) -{ - ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock; - bs->prevCBlock = bs->nextCBlock; - bs->nextCBlock = tmp; -} - -/* Writes the block header */ -static void -writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) -{ - U32 const cBlockHeader = cSize == 1 ? - lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : - lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); - MEM_writeLE24(op, cBlockHeader); - DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock); -} - -/** ZSTD_buildBlockEntropyStats_literals() : - * Builds entropy for the literals. - * Stores literals block type (raw, rle, compressed, repeat) and - * huffman description table to hufMetadata. - * Requires ENTROPY_WORKSPACE_SIZE workspace - * @return : size of huffman description table, or an error code - */ -static size_t -ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, - const ZSTD_hufCTables_t* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_hufCTablesMetadata_t* hufMetadata, - const int literalsCompressionIsDisabled, - void* workspace, size_t wkspSize, - int hufFlags) -{ - BYTE* const wkspStart = (BYTE*)workspace; - BYTE* const wkspEnd = wkspStart + wkspSize; - BYTE* const countWkspStart = wkspStart; - unsigned* const countWksp = (unsigned*)workspace; - const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); - BYTE* const nodeWksp = countWkspStart + countWkspSize; - const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp); - unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; - unsigned huffLog = LitHufLog; - HUF_repeat repeat = prevHuf->repeatMode; - DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize); - - /* Prepare nextEntropy assuming reusing the existing table */ - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - - if (literalsCompressionIsDisabled) { - DEBUGLOG(5, "set_basic - disabled"); - hufMetadata->hType = set_basic; - return 0; - } - - /* small ? don't even attempt compression (speed opt) */ -#ifndef COMPRESS_LITERALS_SIZE_MIN -# define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */ -#endif - { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; - if (srcSize <= minLitSize) { - DEBUGLOG(5, "set_basic - too small"); - hufMetadata->hType = set_basic; - return 0; - } } - - /* Scan input and build symbol stats */ - { size_t const largest = - HIST_count_wksp (countWksp, &maxSymbolValue, - (const BYTE*)src, srcSize, - workspace, wkspSize); - FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); - if (largest == srcSize) { - /* only one literal symbol */ - DEBUGLOG(5, "set_rle"); - hufMetadata->hType = set_rle; - return 0; - } - if (largest <= (srcSize >> 7)+4) { - /* heuristic: likely not compressible */ - DEBUGLOG(5, "set_basic - no gain"); - hufMetadata->hType = set_basic; - return 0; - } } - - /* Validate the previous Huffman table */ - if (repeat == HUF_repeat_check - && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { - repeat = HUF_repeat_none; - } - - /* Build Huffman Tree */ - ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags); - assert(huffLog <= LitHufLog); - { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, - maxSymbolValue, huffLog, - nodeWksp, nodeWkspSize); - FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); - huffLog = (U32)maxBits; - } - { /* Build and write the CTable */ - size_t const newCSize = HUF_estimateCompressedSize( - (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); - size_t const hSize = HUF_writeCTable_wksp( - hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), - (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, - nodeWksp, nodeWkspSize); - /* Check against repeating the previous CTable */ - if (repeat != HUF_repeat_none) { - size_t const oldCSize = HUF_estimateCompressedSize( - (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); - if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { - DEBUGLOG(5, "set_repeat - smaller"); - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - hufMetadata->hType = set_repeat; - return 0; - } } - if (newCSize + hSize >= srcSize) { - DEBUGLOG(5, "set_basic - no gains"); - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - hufMetadata->hType = set_basic; - return 0; - } - DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); - hufMetadata->hType = set_compressed; - nextHuf->repeatMode = HUF_repeat_check; - return hSize; - } -} - - -/* ZSTD_buildDummySequencesStatistics(): - * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic, - * and updates nextEntropy to the appropriate repeatMode. - */ -static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) -{ - ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0}; - nextEntropy->litlength_repeatMode = FSE_repeat_none; - nextEntropy->offcode_repeatMode = FSE_repeat_none; - nextEntropy->matchlength_repeatMode = FSE_repeat_none; - return stats; -} - -/** ZSTD_buildBlockEntropyStats_sequences() : - * Builds entropy for the sequences. - * Stores symbol compression modes and fse table to fseMetadata. - * Requires ENTROPY_WORKSPACE_SIZE wksp. - * @return : size of fse tables or error code */ -static size_t -ZSTD_buildBlockEntropyStats_sequences( - const seqStore_t* seqStorePtr, - const ZSTD_fseCTables_t* prevEntropy, - ZSTD_fseCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, size_t wkspSize) -{ - ZSTD_strategy const strategy = cctxParams->cParams.strategy; - size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - BYTE* const ostart = fseMetadata->fseTablesBuffer; - BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); - BYTE* op = ostart; - unsigned* countWorkspace = (unsigned*)workspace; - unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1); - size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace); - ZSTD_symbolEncodingTypeStats_t stats; - - DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq); - stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, - prevEntropy, nextEntropy, op, oend, - strategy, countWorkspace, - entropyWorkspace, entropyWorkspaceSize) - : ZSTD_buildDummySequencesStatistics(nextEntropy); - FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); - fseMetadata->llType = (symbolEncodingType_e) stats.LLtype; - fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype; - fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype; - fseMetadata->lastCountSize = stats.lastCountSize; - return stats.size; -} - - -/** ZSTD_buildBlockEntropyStats() : - * Builds entropy for the block. - * Requires workspace size ENTROPY_WORKSPACE_SIZE - * @return : 0 on success, or an error code - * Note : also employed in superblock - */ -size_t ZSTD_buildBlockEntropyStats( - const seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize) -{ - size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); - int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD); - int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0; - - entropyMetadata->hufMetadata.hufDesSize = - ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, - &prevEntropy->huf, &nextEntropy->huf, - &entropyMetadata->hufMetadata, - ZSTD_literalsCompressionIsDisabled(cctxParams), - workspace, wkspSize, hufFlags); - - FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); - entropyMetadata->fseMetadata.fseTablesSize = - ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, - &prevEntropy->fse, &nextEntropy->fse, - cctxParams, - &entropyMetadata->fseMetadata, - workspace, wkspSize); - FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed"); - return 0; -} - -/* Returns the size estimate for the literals section (header + content) of a block */ -static size_t -ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, - const ZSTD_hufCTables_t* huf, - const ZSTD_hufCTablesMetadata_t* hufMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) -{ - unsigned* const countWksp = (unsigned*)workspace; - unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; - size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB); - U32 singleStream = litSize < 256; - - if (hufMetadata->hType == set_basic) return litSize; - else if (hufMetadata->hType == set_rle) return 1; - else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { - size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); - if (ZSTD_isError(largest)) return litSize; - { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); - if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; - if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */ - return cLitSizeEstimate + literalSectionHeaderSize; - } } - assert(0); /* impossible */ - return 0; -} - -/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ -static size_t -ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, - const BYTE* codeTable, size_t nbSeq, unsigned maxCode, - const FSE_CTable* fseCTable, - const U8* additionalBits, - short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, - void* workspace, size_t wkspSize) -{ - unsigned* const countWksp = (unsigned*)workspace; - const BYTE* ctp = codeTable; - const BYTE* const ctStart = ctp; - const BYTE* const ctEnd = ctStart + nbSeq; - size_t cSymbolTypeSizeEstimateInBits = 0; - unsigned max = maxCode; - - HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ - if (type == set_basic) { - /* We selected this encoding type, so it must be valid. */ - assert(max <= defaultMax); - (void)defaultMax; - cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max); - } else if (type == set_rle) { - cSymbolTypeSizeEstimateInBits = 0; - } else if (type == set_compressed || type == set_repeat) { - cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); - } - if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) { - return nbSeq * 10; - } - while (ctp < ctEnd) { - if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; - else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ - ctp++; - } - return cSymbolTypeSizeEstimateInBits >> 3; -} - -/* Returns the size estimate for the sequences section (header + content) of a block */ -static size_t -ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_fseCTables_t* fseTables, - const ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) -{ - size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ); - size_t cSeqSizeEstimate = 0; - cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff, - fseTables->offcodeCTable, NULL, - OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, - workspace, wkspSize); - cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL, - fseTables->litlengthCTable, LL_bits, - LL_defaultNorm, LL_defaultNormLog, MaxLL, - workspace, wkspSize); - cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML, - fseTables->matchlengthCTable, ML_bits, - ML_defaultNorm, ML_defaultNormLog, MaxML, - workspace, wkspSize); - if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; - return cSeqSizeEstimate + sequencesSectionHeaderSize; -} - -/* Returns the size estimate for a given stream of literals, of, ll, ml */ -static size_t -ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, - const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_entropyCTables_t* entropy, - const ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize, - int writeLitEntropy, int writeSeqEntropy) -{ - size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, - &entropy->huf, &entropyMetadata->hufMetadata, - workspace, wkspSize, writeLitEntropy); - size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, - nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, - workspace, wkspSize, writeSeqEntropy); - return seqSize + literalsSize + ZSTD_blockHeaderSize; -} - -/* Builds entropy statistics and uses them for blocksize estimation. - * - * @return: estimated compressed size of the seqStore, or a zstd error. - */ -static size_t -ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) -{ - ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata; - DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); - FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore, - &zc->blockState.prevCBlock->entropy, - &zc->blockState.nextCBlock->entropy, - &zc->appliedParams, - entropyMetadata, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE), ""); - return ZSTD_estimateBlockSize( - seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), - seqStore->ofCode, seqStore->llCode, seqStore->mlCode, - (size_t)(seqStore->sequences - seqStore->sequencesStart), - &zc->blockState.nextCBlock->entropy, - entropyMetadata, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, - (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); -} - -/* Returns literals bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) -{ - size_t literalsBytes = 0; - size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); - size_t i; - for (i = 0; i < nbSeqs; ++i) { - seqDef const seq = seqStore->sequencesStart[i]; - literalsBytes += seq.litLength; - if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) { - literalsBytes += 0x10000; - } } - return literalsBytes; -} - -/* Returns match bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) -{ - size_t matchBytes = 0; - size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); - size_t i; - for (i = 0; i < nbSeqs; ++i) { - seqDef seq = seqStore->sequencesStart[i]; - matchBytes += seq.mlBase + MINMATCH; - if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) { - matchBytes += 0x10000; - } } - return matchBytes; -} - -/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). - * Stores the result in resultSeqStore. - */ -static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, - const seqStore_t* originalSeqStore, - size_t startIdx, size_t endIdx) -{ - *resultSeqStore = *originalSeqStore; - if (startIdx > 0) { - resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; - resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); - } - - /* Move longLengthPos into the correct position if necessary */ - if (originalSeqStore->longLengthType != ZSTD_llt_none) { - if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) { - resultSeqStore->longLengthType = ZSTD_llt_none; - } else { - resultSeqStore->longLengthPos -= (U32)startIdx; - } - } - resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; - resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; - if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) { - /* This accounts for possible last literals if the derived chunk reaches the end of the block */ - assert(resultSeqStore->lit == originalSeqStore->lit); - } else { - size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); - resultSeqStore->lit = resultSeqStore->litStart + literalsBytes; - } - resultSeqStore->llCode += startIdx; - resultSeqStore->mlCode += startIdx; - resultSeqStore->ofCode += startIdx; -} - -/** - * Returns the raw offset represented by the combination of offBase, ll0, and repcode history. - * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq(). - */ -static U32 -ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0) -{ - U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */ - assert(OFFBASE_IS_REPCODE(offBase)); - if (adjustedRepCode == ZSTD_REP_NUM) { - assert(ll0); - /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 - * This is only valid if it results in a valid offset value, aka > 0. - * Note : it may happen that `rep[0]==1` in exceptional circumstances. - * In which case this function will return 0, which is an invalid offset. - * It's not an issue though, since this value will be - * compared and discarded within ZSTD_seqStore_resolveOffCodes(). - */ - return rep[0] - 1; - } - return rep[adjustedRepCode]; -} - -/** - * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise - * due to emission of RLE/raw blocks that disturb the offset history, - * and replaces any repcodes within the seqStore that may be invalid. - * - * dRepcodes are updated as would be on the decompression side. - * cRepcodes are updated exactly in accordance with the seqStore. - * - * Note : this function assumes seq->offBase respects the following numbering scheme : - * 0 : invalid - * 1-3 : repcode 1-3 - * 4+ : real_offset+3 - */ -static void -ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, - const seqStore_t* const seqStore, U32 const nbSeq) -{ - U32 idx = 0; - U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq; - for (; idx < nbSeq; ++idx) { - seqDef* const seq = seqStore->sequencesStart + idx; - U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx); - U32 const offBase = seq->offBase; - assert(offBase > 0); - if (OFFBASE_IS_REPCODE(offBase)) { - U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); - U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0); - /* Adjust simulated decompression repcode history if we come across a mismatch. Replace - * the repcode with the offset it actually references, determined by the compression - * repcode history. - */ - if (dRawOffset != cRawOffset) { - seq->offBase = OFFSET_TO_OFFBASE(cRawOffset); - } - } - /* Compression repcode history is always updated with values directly from the unmodified seqStore. - * Decompression repcode history may use modified seq->offset value taken from compression repcode history. - */ - ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0); - ZSTD_updateRep(cRepcodes->rep, offBase, ll0); - } -} - -/* ZSTD_compressSeqStore_singleBlock(): - * Compresses a seqStore into a block with a block header, into the buffer dst. - * - * Returns the total size of that block (including header) or a ZSTD error code. - */ -static size_t -ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, - const seqStore_t* const seqStore, - repcodes_t* const dRep, repcodes_t* const cRep, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - U32 lastBlock, U32 isPartition) -{ - const U32 rleMaxLength = 25; - BYTE* op = (BYTE*)dst; - const BYTE* ip = (const BYTE*)src; - size_t cSize; - size_t cSeqsSize; - - /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ - repcodes_t const dRepOriginal = *dRep; - DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock"); - if (isPartition) - ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart)); - - RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit"); - cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore, - &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, - &zc->appliedParams, - op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, - srcSize, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, - zc->bmi2); - FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!"); - - if (!zc->isFirstBlock && - cSeqsSize < rleMaxLength && - ZSTD_isRLE((BYTE const*)src, srcSize)) { - /* We don't want to emit our first block as a RLE even if it qualifies because - * doing so will cause the decoder (cli only) to throw a "should consume all input error." - * This is only an issue for zstd <= v1.4.3 - */ - cSeqsSize = 1; - } - - /* Sequence collection not supported when block splitting */ - if (zc->seqCollector.collectSequences) { - FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep), "copyBlockSequences failed"); - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); - return 0; - } - - if (cSeqsSize == 0) { - cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); - FORWARD_IF_ERROR(cSize, "Nocompress block failed"); - DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize); - *dRep = dRepOriginal; /* reset simulated decompression repcode history */ - } else if (cSeqsSize == 1) { - cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock); - FORWARD_IF_ERROR(cSize, "RLE compress block failed"); - DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize); - *dRep = dRepOriginal; /* reset simulated decompression repcode history */ - } else { - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); - writeBlockHeader(op, cSeqsSize, srcSize, lastBlock); - cSize = ZSTD_blockHeaderSize + cSeqsSize; - DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize); - } - - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - - return cSize; -} - -/* Struct to keep track of where we are in our recursive calls. */ -typedef struct { - U32* splitLocations; /* Array of split indices */ - size_t idx; /* The current index within splitLocations being worked on */ -} seqStoreSplits; - -#define MIN_SEQUENCES_BLOCK_SPLITTING 300 - -/* Helper function to perform the recursive search for block splits. - * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. - * If advantageous to split, then we recurse down the two sub-blocks. - * If not, or if an error occurred in estimation, then we do not recurse. - * - * Note: The recursion depth is capped by a heuristic minimum number of sequences, - * defined by MIN_SEQUENCES_BLOCK_SPLITTING. - * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). - * In practice, recursion depth usually doesn't go beyond 4. - * - * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. - * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize - * maximum of 128 KB, this value is actually impossible to reach. - */ -static void -ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, - ZSTD_CCtx* zc, const seqStore_t* origSeqStore) -{ - seqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; - seqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; - seqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; - size_t estimatedOriginalSize; - size_t estimatedFirstHalfSize; - size_t estimatedSecondHalfSize; - size_t midIdx = (startIdx + endIdx)/2; - - DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); - assert(endIdx >= startIdx); - if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { - DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx); - return; - } - ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); - ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); - ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); - estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); - estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); - estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); - DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", - estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize); - if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) { - return; - } - if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) { - DEBUGLOG(5, "split decided at seqNb:%zu", midIdx); - ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); - splits->splitLocations[splits->idx] = (U32)midIdx; - splits->idx++; - ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore); - } -} - -/* Base recursive function. - * Populates a table with intra-block partition indices that can improve compression ratio. - * - * @return: number of splits made (which equals the size of the partition table - 1). - */ -static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) -{ - seqStoreSplits splits; - splits.splitLocations = partitions; - splits.idx = 0; - if (nbSeq <= 4) { - DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq); - /* Refuse to try and split anything with less than 4 sequences */ - return 0; - } - ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore); - splits.splitLocations[splits.idx] = nbSeq; - DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1); - return splits.idx; -} - -/* ZSTD_compressBlock_splitBlock(): - * Attempts to split a given block into multiple blocks to improve compression ratio. - * - * Returns combined size of all blocks (which includes headers), or a ZSTD error code. - */ -static size_t -ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t blockSize, - U32 lastBlock, U32 nbSeq) -{ - size_t cSize = 0; - const BYTE* ip = (const BYTE*)src; - BYTE* op = (BYTE*)dst; - size_t i = 0; - size_t srcBytesTotal = 0; - U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ - seqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore; - seqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore; - size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); - - /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history - * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two - * separate repcode histories that simulate repcode history on compression and decompression side, - * and use the histories to determine whether we must replace a particular repcode with its raw offset. - * - * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed - * or RLE. This allows us to retrieve the offset value that an invalid repcode references within - * a nocompress/RLE block. - * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use - * the replacement offset value rather than the original repcode to update the repcode history. - * dRep also will be the final repcode history sent to the next block. - * - * See ZSTD_seqStore_resolveOffCodes() for more details. - */ - repcodes_t dRep; - repcodes_t cRep; - ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); - ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); - ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); - - DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", - (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, - (unsigned)zc->blockState.matchState.nextToUpdate); - - if (numSplits == 0) { - size_t cSizeSingleBlock = - ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, - &dRep, &cRep, - op, dstCapacity, - ip, blockSize, - lastBlock, 0 /* isPartition */); - FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!"); - DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits"); - assert(zc->blockSize <= ZSTD_BLOCKSIZE_MAX); - assert(cSizeSingleBlock <= zc->blockSize + ZSTD_blockHeaderSize); - return cSizeSingleBlock; - } - - ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); - for (i = 0; i <= numSplits; ++i) { - size_t cSizeChunk; - U32 const lastPartition = (i == numSplits); - U32 lastBlockEntireSrc = 0; - - size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); - srcBytesTotal += srcBytes; - if (lastPartition) { - /* This is the final partition, need to account for possible last literals */ - srcBytes += blockSize - srcBytesTotal; - lastBlockEntireSrc = lastBlock; - } else { - ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]); - } - - cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore, - &dRep, &cRep, - op, dstCapacity, - ip, srcBytes, - lastBlockEntireSrc, 1 /* isPartition */); - DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size", - ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); - FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); - - ip += srcBytes; - op += cSizeChunk; - dstCapacity -= cSizeChunk; - cSize += cSizeChunk; - *currSeqStore = *nextSeqStore; - assert(cSizeChunk <= zc->blockSize + ZSTD_blockHeaderSize); - } - /* cRep and dRep may have diverged during the compression. - * If so, we use the dRep repcodes for the next block. - */ - ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t)); - return cSize; -} - -static size_t -ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, U32 lastBlock) -{ - U32 nbSeq; - size_t cSize; - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); - assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable); - - { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); - FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); - if (bss == ZSTDbss_noCompress) { - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); - cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); - FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block"); - return cSize; - } - nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart); - } - - cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq); - FORWARD_IF_ERROR(cSize, "Splitting blocks failed!"); - return cSize; -} - -static size_t -ZSTD_compressBlock_internal(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, U32 frame) -{ - /* This is an estimated upper bound for the length of an rle block. - * This isn't the actual upper bound. - * Finding the real threshold needs further investigation. - */ - const U32 rleMaxLength = 25; - size_t cSize; - const BYTE* ip = (const BYTE*)src; - BYTE* op = (BYTE*)dst; - DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", - (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, - (unsigned)zc->blockState.matchState.nextToUpdate); - - { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); - FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); - if (bss == ZSTDbss_noCompress) { - RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); - cSize = 0; - goto out; - } - } - - if (zc->seqCollector.collectSequences) { - FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep), "copyBlockSequences failed"); - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); - return 0; - } - - /* encode sequences and literals */ - cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore, - &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, - &zc->appliedParams, - dst, dstCapacity, - srcSize, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, - zc->bmi2); - - if (frame && - /* We don't want to emit our first block as a RLE even if it qualifies because - * doing so will cause the decoder (cli only) to throw a "should consume all input error." - * This is only an issue for zstd <= v1.4.3 - */ - !zc->isFirstBlock && - cSize < rleMaxLength && - ZSTD_isRLE(ip, srcSize)) - { - cSize = 1; - op[0] = ip[0]; - } - -out: - if (!ZSTD_isError(cSize) && cSize > 1) { - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); - } - /* We check that dictionaries have offset codes available for the first - * block. After the first block, the offcode table might not have large - * enough codes to represent the offsets in the data. - */ - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - - return cSize; -} - -static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const size_t bss, U32 lastBlock) -{ - DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()"); - if (bss == ZSTDbss_compress) { - if (/* We don't want to emit our first block as a RLE even if it qualifies because - * doing so will cause the decoder (cli only) to throw a "should consume all input error." - * This is only an issue for zstd <= v1.4.3 - */ - !zc->isFirstBlock && - ZSTD_maybeRLE(&zc->seqStore) && - ZSTD_isRLE((BYTE const*)src, srcSize)) - { - return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock); - } - /* Attempt superblock compression. - * - * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the - * standard ZSTD_compressBound(). This is a problem, because even if we have - * space now, taking an extra byte now could cause us to run out of space later - * and violate ZSTD_compressBound(). - * - * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize. - * - * In order to respect ZSTD_compressBound() we must attempt to emit a raw - * uncompressed block in these cases: - * * cSize == 0: Return code for an uncompressed block. - * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize). - * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of - * output space. - * * cSize >= blockBound(srcSize): We have expanded the block too much so - * emit an uncompressed block. - */ - { size_t const cSize = - ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); - if (cSize != ERROR(dstSize_tooSmall)) { - size_t const maxCSize = - srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); - FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); - if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); - return cSize; - } - } - } - } /* if (bss == ZSTDbss_compress)*/ - - DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); - /* Superblock compression failed, attempt to emit a single no compress block. - * The decoder will be able to stream this block since it is uncompressed. - */ - return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); -} - -static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - U32 lastBlock) -{ - size_t cSize = 0; - const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); - DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)", - (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize); - FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); - - cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock); - FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed"); - - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - - return cSize; -} - -static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, - ZSTD_cwksp* ws, - ZSTD_CCtx_params const* params, - void const* ip, - void const* iend) -{ - U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy); - U32 const maxDist = (U32)1 << params->cParams.windowLog; - if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) { - U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); - ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); - ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); - ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); - ZSTD_cwksp_mark_tables_dirty(ws); - ZSTD_reduceIndex(ms, params, correction); - ZSTD_cwksp_mark_tables_clean(ws); - if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; - else ms->nextToUpdate -= correction; - /* invalidate dictionaries on overflow correction */ - ms->loadedDictEnd = 0; - ms->dictMatchState = NULL; - } -} - -/*! ZSTD_compress_frameChunk() : -* Compress a chunk of data into one or multiple blocks. -* All blocks will be terminated, all input will be consumed. -* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. -* Frame is supposed already started (header already produced) -* @return : compressed size, or an error code -*/ -static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - U32 lastFrameChunk) -{ - size_t blockSize = cctx->blockSize; - size_t remaining = srcSize; - const BYTE* ip = (const BYTE*)src; - BYTE* const ostart = (BYTE*)dst; - BYTE* op = ostart; - U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; - - assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); - - DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize); - if (cctx->appliedParams.fParams.checksumFlag && srcSize) - XXH64_update(&cctx->xxhState, src, srcSize); - - while (remaining) { - ZSTD_matchState_t* const ms = &cctx->blockState.matchState; - U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); - - /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding - * additional 1. We need to revisit and change this logic to be more consistent */ - RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1, - dstSize_tooSmall, - "not enough space to store compressed block"); - if (remaining < blockSize) blockSize = remaining; - - ZSTD_overflowCorrectIfNeeded( - ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); - ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); - ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); - - /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ - if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; - - { size_t cSize; - if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) { - cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed"); - assert(cSize > 0); - assert(cSize <= blockSize + ZSTD_blockHeaderSize); - } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) { - cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed"); - assert(cSize > 0 || cctx->seqCollector.collectSequences == 1); - } else { - cSize = ZSTD_compressBlock_internal(cctx, - op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, - ip, blockSize, 1 /* frame */); - FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed"); - - if (cSize == 0) { /* block is not compressible */ - cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); - } else { - U32 const cBlockHeader = cSize == 1 ? - lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : - lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); - MEM_writeLE24(op, cBlockHeader); - cSize += ZSTD_blockHeaderSize; - } - } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/ - - - ip += blockSize; - assert(remaining >= blockSize); - remaining -= blockSize; - op += cSize; - assert(dstCapacity >= cSize); - dstCapacity -= cSize; - cctx->isFirstBlock = 0; - DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", - (unsigned)cSize); - } } - - if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; - return (size_t)(op-ostart); -} - - -static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, - const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID) -{ BYTE* const op = (BYTE*)dst; - U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ - U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ - U32 const checksumFlag = params->fParams.checksumFlag>0; - U32 const windowSize = (U32)1 << params->cParams.windowLog; - U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); - BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); - U32 const fcsCode = params->fParams.contentSizeFlag ? - (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ - BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); - size_t pos=0; - - assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); - RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall, - "dst buf is too small to fit worst-case frame header size."); - DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", - !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode); - if (params->format == ZSTD_f_zstd1) { - MEM_writeLE32(dst, ZSTD_MAGICNUMBER); - pos = 4; - } - op[pos++] = frameHeaderDescriptionByte; - if (!singleSegment) op[pos++] = windowLogByte; - switch(dictIDSizeCode) - { - default: - assert(0); /* impossible */ - ZSTD_FALLTHROUGH; - case 0 : break; - case 1 : op[pos] = (BYTE)(dictID); pos++; break; - case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; - case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; - } - switch(fcsCode) - { - default: - assert(0); /* impossible */ - ZSTD_FALLTHROUGH; - case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; - case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; - case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; - case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; - } - return pos; -} - -/* ZSTD_writeSkippableFrame_advanced() : - * Writes out a skippable frame with the specified magic number variant (16 are supported), - * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data. - * - * Returns the total number of bytes written, or a ZSTD error code. - */ -size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, - const void* src, size_t srcSize, unsigned magicVariant) { - BYTE* op = (BYTE*)dst; - RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */, - dstSize_tooSmall, "Not enough room for skippable frame"); - RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame"); - RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported"); - - MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant)); - MEM_writeLE32(op+4, (U32)srcSize); - ZSTD_memcpy(op+8, src, srcSize); - return srcSize + ZSTD_SKIPPABLEHEADERSIZE; -} - -/* ZSTD_writeLastEmptyBlock() : - * output an empty Block with end-of-frame mark to complete a frame - * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) - * or an error code if `dstCapacity` is too small (stage == ZSTDcs_init); - assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_ps_enable); - cctx->externSeqStore.seq = seq; - cctx->externSeqStore.size = nbSeq; - cctx->externSeqStore.capacity = nbSeq; - cctx->externSeqStore.pos = 0; - cctx->externSeqStore.posInSequence = 0; -} - - -static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - U32 frame, U32 lastFrameChunk) -{ - ZSTD_matchState_t* const ms = &cctx->blockState.matchState; - size_t fhSize = 0; - - DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", - cctx->stage, (unsigned)srcSize); - RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong, - "missing init (ZSTD_compressBegin)"); - - if (frame && (cctx->stage==ZSTDcs_init)) { - fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, - cctx->pledgedSrcSizePlusOne-1, cctx->dictID); - FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); - assert(fhSize <= dstCapacity); - dstCapacity -= fhSize; - dst = (char*)dst + fhSize; - cctx->stage = ZSTDcs_ongoing; - } - - if (!srcSize) return fhSize; /* do not generate an empty block if no input */ - - if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) { - ms->forceNonContiguous = 0; - ms->nextToUpdate = ms->window.dictLimit; - } - if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { - ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0); - } - - if (!frame) { - /* overflow check and correction for block mode */ - ZSTD_overflowCorrectIfNeeded( - ms, &cctx->workspace, &cctx->appliedParams, - src, (BYTE const*)src + srcSize); - } - - DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize); - { size_t const cSize = frame ? - ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : - ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); - FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed"); - cctx->consumedSrcSize += srcSize; - cctx->producedCSize += (cSize + fhSize); - assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); - if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ - ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); - RETURN_ERROR_IF( - cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne, - srcSize_wrong, - "error : pledgedSrcSize = %u, while realSrcSize >= %u", - (unsigned)cctx->pledgedSrcSizePlusOne-1, - (unsigned)cctx->consumedSrcSize); - } - return cSize + fhSize; - } -} - -size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) -{ - DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); - return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); -} - -/* NOTE: Must just wrap ZSTD_compressContinue_public() */ -size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) -{ - return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); -} - -static size_t ZSTD_getBlockSize_deprecated(const ZSTD_CCtx* cctx) -{ - ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; - assert(!ZSTD_checkCParams(cParams)); - return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog); -} - -/* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ -size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) -{ - return ZSTD_getBlockSize_deprecated(cctx); -} - -/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ -size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); - { size_t const blockSizeMax = ZSTD_getBlockSize_deprecated(cctx); - RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); } - - return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); -} - -/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ -size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); -} - -/*! ZSTD_loadDictionaryContent() : - * @return : 0, or an error code - */ -static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, - ldmState_t* ls, - ZSTD_cwksp* ws, - ZSTD_CCtx_params const* params, - const void* src, size_t srcSize, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp) -{ - const BYTE* ip = (const BYTE*) src; - const BYTE* const iend = ip + srcSize; - int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL; - - /* Assert that the ms params match the params we're being given */ - ZSTD_assertEqualCParams(params->cParams, ms->cParams); - - { /* Ensure large dictionaries can't cause index overflow */ - - /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. - * Dictionaries right at the edge will immediately trigger overflow - * correction, but I don't want to insert extra constraints here. - */ - U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX; - - int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(¶ms->cParams); - if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) { - /* Some dictionary matchfinders in zstd use "short cache", - * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each - * CDict hashtable entry as a tag rather than as part of an index. - * When short cache is used, we need to truncate the dictionary - * so that its indices don't overlap with the tag. */ - U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX; - maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize); - assert(!loadLdmDict); - } - - /* If the dictionary is too large, only load the suffix of the dictionary. */ - if (srcSize > maxDictSize) { - ip = iend - maxDictSize; - src = ip; - srcSize = maxDictSize; - } - } - - if (srcSize > ZSTD_CHUNKSIZE_MAX) { - /* We must have cleared our windows when our source is this large. */ - assert(ZSTD_window_isEmpty(ms->window)); - if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window)); - } - ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0); - - DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder); - - if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */ - ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0); - ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); - ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); - } - - /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */ - if (params->cParams.strategy < ZSTD_btultra) { - U32 maxDictSize = 8U << MIN(MAX(params->cParams.hashLog, params->cParams.chainLog), 28); - if (srcSize > maxDictSize) { - ip = iend - maxDictSize; - src = ip; - srcSize = maxDictSize; - } - } - - ms->nextToUpdate = (U32)(ip - ms->window.base); - ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); - ms->forceNonContiguous = params->deterministicRefPrefix; - - if (srcSize <= HASH_READ_SIZE) return 0; - - ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend); - - switch(params->cParams.strategy) - { - case ZSTD_fast: - ZSTD_fillHashTable(ms, iend, dtlm, tfp); - break; - case ZSTD_dfast: -#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR - ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); -#else - assert(0); /* shouldn't be called: cparams should've been adjusted. */ -#endif - break; - - case ZSTD_greedy: - case ZSTD_lazy: - case ZSTD_lazy2: -#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) - assert(srcSize >= HASH_READ_SIZE); - if (ms->dedicatedDictSearch) { - assert(ms->chainTable != NULL); - ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE); - } else { - assert(params->useRowMatchFinder != ZSTD_ps_auto); - if (params->useRowMatchFinder == ZSTD_ps_enable) { - size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog); - ZSTD_memset(ms->tagTable, 0, tagTableSize); - ZSTD_row_update(ms, iend-HASH_READ_SIZE); - DEBUGLOG(4, "Using row-based hash table for lazy dict"); - } else { - ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE); - DEBUGLOG(4, "Using chain-based hash table for lazy dict"); - } - } -#else - assert(0); /* shouldn't be called: cparams should've been adjusted. */ -#endif - break; - - case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ - case ZSTD_btopt: - case ZSTD_btultra: - case ZSTD_btultra2: -#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) - assert(srcSize >= HASH_READ_SIZE); - ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend); -#else - assert(0); /* shouldn't be called: cparams should've been adjusted. */ -#endif - break; - - default: - assert(0); /* not possible : not a valid strategy id */ - } - - ms->nextToUpdate = (U32)(iend - ms->window.base); - return 0; -} - - -/* Dictionaries that assign zero probability to symbols that show up causes problems - * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check - * and only dictionaries with 100% valid symbols can be assumed valid. - */ -static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) -{ - U32 s; - if (dictMaxSymbolValue < maxSymbolValue) { - return FSE_repeat_check; - } - for (s = 0; s <= maxSymbolValue; ++s) { - if (normalizedCounter[s] == 0) { - return FSE_repeat_check; - } - } - return FSE_repeat_valid; -} - -size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, - const void* const dict, size_t dictSize) -{ - short offcodeNCount[MaxOff+1]; - unsigned offcodeMaxValue = MaxOff; - const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */ - const BYTE* const dictEnd = dictPtr + dictSize; - dictPtr += 8; - bs->entropy.huf.repeatMode = HUF_repeat_check; - - { unsigned maxSymbolValue = 255; - unsigned hasZeroWeights = 1; - size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, - dictEnd-dictPtr, &hasZeroWeights); - - /* We only set the loaded table as valid if it contains all non-zero - * weights. Otherwise, we set it to check */ - if (!hasZeroWeights && maxSymbolValue == 255) - bs->entropy.huf.repeatMode = HUF_repeat_valid; - - RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, ""); - dictPtr += hufHeaderSize; - } - - { unsigned offcodeLog; - size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); - RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); - /* fill all offset symbols to avoid garbage at end of table */ - RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( - bs->entropy.fse.offcodeCTable, - offcodeNCount, MaxOff, offcodeLog, - workspace, HUF_WORKSPACE_SIZE)), - dictionary_corrupted, ""); - /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ - dictPtr += offcodeHeaderSize; - } - - { short matchlengthNCount[MaxML+1]; - unsigned matchlengthMaxValue = MaxML, matchlengthLog; - size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); - RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); - RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( - bs->entropy.fse.matchlengthCTable, - matchlengthNCount, matchlengthMaxValue, matchlengthLog, - workspace, HUF_WORKSPACE_SIZE)), - dictionary_corrupted, ""); - bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML); - dictPtr += matchlengthHeaderSize; - } - - { short litlengthNCount[MaxLL+1]; - unsigned litlengthMaxValue = MaxLL, litlengthLog; - size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); - RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); - RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); - RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( - bs->entropy.fse.litlengthCTable, - litlengthNCount, litlengthMaxValue, litlengthLog, - workspace, HUF_WORKSPACE_SIZE)), - dictionary_corrupted, ""); - bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL); - dictPtr += litlengthHeaderSize; - } - - RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); - bs->rep[0] = MEM_readLE32(dictPtr+0); - bs->rep[1] = MEM_readLE32(dictPtr+4); - bs->rep[2] = MEM_readLE32(dictPtr+8); - dictPtr += 12; - - { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); - U32 offcodeMax = MaxOff; - if (dictContentSize <= ((U32)-1) - 128 KB) { - U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ - offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ - } - /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */ - bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)); - - /* All repCodes must be <= dictContentSize and != 0 */ - { U32 u; - for (u=0; u<3; u++) { - RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, ""); - RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, ""); - } } } - - return dictPtr - (const BYTE*)dict; -} - -/* Dictionary format : - * See : - * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format - */ -/*! ZSTD_loadZstdDictionary() : - * @return : dictID, or an error code - * assumptions : magic number supposed already checked - * dictSize supposed >= 8 - */ -static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, - ZSTD_matchState_t* ms, - ZSTD_cwksp* ws, - ZSTD_CCtx_params const* params, - const void* dict, size_t dictSize, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp, - void* workspace) -{ - const BYTE* dictPtr = (const BYTE*)dict; - const BYTE* const dictEnd = dictPtr + dictSize; - size_t dictID; - size_t eSize; - ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= 8); - assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY); - - dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ ); - eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize); - FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed"); - dictPtr += eSize; - - { - size_t const dictContentSize = (size_t)(dictEnd - dictPtr); - FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( - ms, NULL, ws, params, dictPtr, dictContentSize, dtlm, tfp), ""); - } - return dictID; -} - -/** ZSTD_compress_insertDictionary() : -* @return : dictID, or an error code */ -static size_t -ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, - ZSTD_matchState_t* ms, - ldmState_t* ls, - ZSTD_cwksp* ws, - const ZSTD_CCtx_params* params, - const void* dict, size_t dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp, - void* workspace) -{ - DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); - if ((dict==NULL) || (dictSize<8)) { - RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); - return 0; - } - - ZSTD_reset_compressedBlockState(bs); - - /* dict restricted modes */ - if (dictContentType == ZSTD_dct_rawContent) - return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm, tfp); - - if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { - if (dictContentType == ZSTD_dct_auto) { - DEBUGLOG(4, "raw content dictionary detected"); - return ZSTD_loadDictionaryContent( - ms, ls, ws, params, dict, dictSize, dtlm, tfp); - } - RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); - assert(0); /* impossible */ - } - - /* dict as full zstd dictionary */ - return ZSTD_loadZstdDictionary( - bs, ms, ws, params, dict, dictSize, dtlm, tfp, workspace); -} - -#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) -#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL) - -/*! ZSTD_compressBegin_internal() : - * Assumption : either @dict OR @cdict (or none) is non-NULL, never both - * @return : 0, or an error code */ -static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, - const void* dict, size_t dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - const ZSTD_CDict* cdict, - const ZSTD_CCtx_params* params, U64 pledgedSrcSize, - ZSTD_buffered_policy_e zbuff) -{ - size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize; -#if ZSTD_TRACE - cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0; -#endif - DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog); - /* params are supposed to be fully validated at this point */ - assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); - assert(!((dict) && (cdict))); /* either dict or cdict, not both */ - if ( (cdict) - && (cdict->dictContentSize > 0) - && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF - || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER - || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN - || cdict->compressionLevel == 0) - && (params->attachDictPref != ZSTD_dictForceLoad) ) { - return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff); - } - - FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, - dictContentSize, - ZSTDcrp_makeClean, zbuff) , ""); - { size_t const dictID = cdict ? - ZSTD_compress_insertDictionary( - cctx->blockState.prevCBlock, &cctx->blockState.matchState, - &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, - cdict->dictContentSize, cdict->dictContentType, dtlm, - ZSTD_tfp_forCCtx, cctx->entropyWorkspace) - : ZSTD_compress_insertDictionary( - cctx->blockState.prevCBlock, &cctx->blockState.matchState, - &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, - dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->entropyWorkspace); - FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); - assert(dictID <= UINT_MAX); - cctx->dictID = (U32)dictID; - cctx->dictContentSize = dictContentSize; - } - return 0; -} - -size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, - const void* dict, size_t dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - const ZSTD_CDict* cdict, - const ZSTD_CCtx_params* params, - unsigned long long pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog); - /* compression parameters verification and optimization */ - FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , ""); - return ZSTD_compressBegin_internal(cctx, - dict, dictSize, dictContentType, dtlm, - cdict, - params, pledgedSrcSize, - ZSTDb_not_buffered); -} - -/*! ZSTD_compressBegin_advanced() : -* @return : 0, or an error code */ -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, - const void* dict, size_t dictSize, - ZSTD_parameters params, unsigned long long pledgedSrcSize) -{ - ZSTD_CCtx_params cctxParams; - ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, ZSTD_NO_CLEVEL); - return ZSTD_compressBegin_advanced_internal(cctx, - dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, - NULL /*cdict*/, - &cctxParams, pledgedSrcSize); -} - -static size_t -ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) -{ - ZSTD_CCtx_params cctxParams; - { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); - ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel); - } - DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); - return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, - &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); -} - -size_t -ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) -{ - return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); -} - -size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) -{ - return ZSTD_compressBegin_usingDict_deprecated(cctx, NULL, 0, compressionLevel); -} - - -/*! ZSTD_writeEpilogue() : -* Ends a frame. -* @return : nb of bytes written into dst (or an error code) */ -static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) -{ - BYTE* const ostart = (BYTE*)dst; - BYTE* op = ostart; - - DEBUGLOG(4, "ZSTD_writeEpilogue"); - RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing"); - - /* special case : empty frame */ - if (cctx->stage == ZSTDcs_init) { - size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); - FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); - dstCapacity -= fhSize; - op += fhSize; - cctx->stage = ZSTDcs_ongoing; - } - - if (cctx->stage != ZSTDcs_ending) { - /* write one last empty block, make it the "last" block */ - U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; - ZSTD_STATIC_ASSERT(ZSTD_BLOCKHEADERSIZE == 3); - RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "no room for epilogue"); - MEM_writeLE24(op, cBlockHeader24); - op += ZSTD_blockHeaderSize; - dstCapacity -= ZSTD_blockHeaderSize; - } - - if (cctx->appliedParams.fParams.checksumFlag) { - U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); - RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); - DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum); - MEM_writeLE32(op, checksum); - op += 4; - } - - cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ - return op-ostart; -} - -void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) -{ -#if ZSTD_TRACE - if (cctx->traceCtx && ZSTD_trace_compress_end != NULL) { - int const streaming = cctx->inBuffSize > 0 || cctx->outBuffSize > 0 || cctx->appliedParams.nbWorkers > 0; - ZSTD_Trace trace; - ZSTD_memset(&trace, 0, sizeof(trace)); - trace.version = ZSTD_VERSION_NUMBER; - trace.streaming = streaming; - trace.dictionaryID = cctx->dictID; - trace.dictionarySize = cctx->dictContentSize; - trace.uncompressedSize = cctx->consumedSrcSize; - trace.compressedSize = cctx->producedCSize + extraCSize; - trace.params = &cctx->appliedParams; - trace.cctx = cctx; - ZSTD_trace_compress_end(cctx->traceCtx, &trace); - } - cctx->traceCtx = 0; -#else - (void)cctx; - (void)extraCSize; -#endif -} - -size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) -{ - size_t endResult; - size_t const cSize = ZSTD_compressContinue_internal(cctx, - dst, dstCapacity, src, srcSize, - 1 /* frame mode */, 1 /* last chunk */); - FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed"); - endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); - FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed"); - assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); - if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ - ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); - DEBUGLOG(4, "end of frame : controlling src size"); - RETURN_ERROR_IF( - cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1, - srcSize_wrong, - "error : pledgedSrcSize = %u, while realSrcSize = %u", - (unsigned)cctx->pledgedSrcSizePlusOne-1, - (unsigned)cctx->consumedSrcSize); - } - ZSTD_CCtx_trace(cctx, endResult); - return cSize + endResult; -} - -/* NOTE: Must just wrap ZSTD_compressEnd_public() */ -size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) -{ - return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); -} - -size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - ZSTD_parameters params) -{ - DEBUGLOG(4, "ZSTD_compress_advanced"); - FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); - ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, ZSTD_NO_CLEVEL); - return ZSTD_compress_advanced_internal(cctx, - dst, dstCapacity, - src, srcSize, - dict, dictSize, - &cctx->simpleApiParams); -} - -/* Internal */ -size_t ZSTD_compress_advanced_internal( - ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - const ZSTD_CCtx_params* params) -{ - DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize); - FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, - dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, - params, srcSize, ZSTDb_not_buffered) , ""); - return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); -} - -size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict, size_t dictSize, - int compressionLevel) -{ - { - ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict); - assert(params.fParams.contentSizeFlag == 1); - ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel); - } - DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize); - return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams); -} - -size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel) -{ - DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize); - assert(cctx != NULL); - return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); -} - -size_t ZSTD_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel) -{ - size_t result; -#if ZSTD_COMPRESS_HEAPMODE - ZSTD_CCtx* cctx = ZSTD_createCCtx(); - RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed"); - result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel); - ZSTD_freeCCtx(cctx); -#else - ZSTD_CCtx ctxBody; - ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); - result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); - ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */ -#endif - return result; -} - - -/* ===== Dictionary API ===== */ - -/*! ZSTD_estimateCDictSize_advanced() : - * Estimate amount of memory that will be needed to create a dictionary with following arguments */ -size_t ZSTD_estimateCDictSize_advanced( - size_t dictSize, ZSTD_compressionParameters cParams, - ZSTD_dictLoadMethod_e dictLoadMethod) -{ - DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict)); - return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) - + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) - /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small - * in case we are using DDS with row-hash. */ - + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams), - /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0) - + (dictLoadMethod == ZSTD_dlm_byRef ? 0 - : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *)))); -} - -size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) -{ - ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); - return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); -} - -size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) -{ - if (cdict==NULL) return 0; /* support sizeof on NULL */ - DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict)); - /* cdict may be in the workspace */ - return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict)) - + ZSTD_cwksp_sizeof(&cdict->workspace); -} - -static size_t ZSTD_initCDict_internal( - ZSTD_CDict* cdict, - const void* dictBuffer, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_CCtx_params params) -{ - DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType); - assert(!ZSTD_checkCParams(params.cParams)); - cdict->matchState.cParams = params.cParams; - cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch; - if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { - cdict->dictContent = dictBuffer; - } else { - void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*))); - RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!"); - cdict->dictContent = internalBuffer; - ZSTD_memcpy(internalBuffer, dictBuffer, dictSize); - } - cdict->dictContentSize = dictSize; - cdict->dictContentType = dictContentType; - - cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE); - - - /* Reset the state to no dictionary */ - ZSTD_reset_compressedBlockState(&cdict->cBlockState); - FORWARD_IF_ERROR(ZSTD_reset_matchState( - &cdict->matchState, - &cdict->workspace, - ¶ms.cParams, - params.useRowMatchFinder, - ZSTDcrp_makeClean, - ZSTDirp_reset, - ZSTD_resetTarget_CDict), ""); - /* (Maybe) load the dictionary - * Skips loading the dictionary if it is < 8 bytes. - */ - { params.compressionLevel = ZSTD_CLEVEL_DEFAULT; - params.fParams.contentSizeFlag = 1; - { size_t const dictID = ZSTD_compress_insertDictionary( - &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, - ¶ms, cdict->dictContent, cdict->dictContentSize, - dictContentType, ZSTD_dtlm_full, ZSTD_tfp_forCDict, cdict->entropyWorkspace); - FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); - assert(dictID <= (size_t)(U32)-1); - cdict->dictID = (U32)dictID; - } - } - - return 0; -} - -static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_compressionParameters cParams, - ZSTD_paramSwitch_e useRowMatchFinder, - U32 enableDedicatedDictSearch, - ZSTD_customMem customMem) -{ - if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; - - { size_t const workspaceSize = - ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + - ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + - ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) + - (dictLoadMethod == ZSTD_dlm_byRef ? 0 - : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))); - void* const workspace = ZSTD_customMalloc(workspaceSize, customMem); - ZSTD_cwksp ws; - ZSTD_CDict* cdict; - - if (!workspace) { - ZSTD_customFree(workspace, customMem); - return NULL; - } - - ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc); - - cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); - assert(cdict != NULL); - ZSTD_cwksp_move(&cdict->workspace, &ws); - cdict->customMem = customMem; - cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */ - cdict->useRowMatchFinder = useRowMatchFinder; - return cdict; - } -} - -ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams, - ZSTD_customMem customMem) -{ - ZSTD_CCtx_params cctxParams; - ZSTD_memset(&cctxParams, 0, sizeof(cctxParams)); - ZSTD_CCtxParams_init(&cctxParams, 0); - cctxParams.cParams = cParams; - cctxParams.customMem = customMem; - return ZSTD_createCDict_advanced2( - dictBuffer, dictSize, - dictLoadMethod, dictContentType, - &cctxParams, customMem); -} - -ZSTD_CDict* ZSTD_createCDict_advanced2( - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - const ZSTD_CCtx_params* originalCctxParams, - ZSTD_customMem customMem) -{ - ZSTD_CCtx_params cctxParams = *originalCctxParams; - ZSTD_compressionParameters cParams; - ZSTD_CDict* cdict; - - DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType); - if (!customMem.customAlloc ^ !customMem.customFree) return NULL; - - if (cctxParams.enableDedicatedDictSearch) { - cParams = ZSTD_dedicatedDictSearch_getCParams( - cctxParams.compressionLevel, dictSize); - ZSTD_overrideCParams(&cParams, &cctxParams.cParams); - } else { - cParams = ZSTD_getCParamsFromCCtxParams( - &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); - } - - if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) { - /* Fall back to non-DDSS params */ - cctxParams.enableDedicatedDictSearch = 0; - cParams = ZSTD_getCParamsFromCCtxParams( - &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); - } - - DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch); - cctxParams.cParams = cParams; - cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); - - cdict = ZSTD_createCDict_advanced_internal(dictSize, - dictLoadMethod, cctxParams.cParams, - cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch, - customMem); - - if (!cdict || ZSTD_isError( ZSTD_initCDict_internal(cdict, - dict, dictSize, - dictLoadMethod, dictContentType, - cctxParams) )) { - ZSTD_freeCDict(cdict); - return NULL; - } - - return cdict; -} - -ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) -{ - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); - ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, - ZSTD_dlm_byCopy, ZSTD_dct_auto, - cParams, ZSTD_defaultCMem); - if (cdict) - cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; - return cdict; -} - -ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) -{ - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); - ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, - ZSTD_dlm_byRef, ZSTD_dct_auto, - cParams, ZSTD_defaultCMem); - if (cdict) - cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; - return cdict; -} - -size_t ZSTD_freeCDict(ZSTD_CDict* cdict) -{ - if (cdict==NULL) return 0; /* support free on NULL */ - { ZSTD_customMem const cMem = cdict->customMem; - int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); - ZSTD_cwksp_free(&cdict->workspace, cMem); - if (!cdictInWorkspace) { - ZSTD_customFree(cdict, cMem); - } - return 0; - } -} - -/*! ZSTD_initStaticCDict_advanced() : - * Generate a digested dictionary in provided memory area. - * workspace: The memory area to emplace the dictionary into. - * Provided pointer must 8-bytes aligned. - * It must outlive dictionary usage. - * workspaceSize: Use ZSTD_estimateCDictSize() - * to determine how large workspace must be. - * cParams : use ZSTD_getCParams() to transform a compression level - * into its relevant cParams. - * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) - * Note : there is no corresponding "free" function. - * Since workspace was allocated externally, it must be freed externally. - */ -const ZSTD_CDict* ZSTD_initStaticCDict( - void* workspace, size_t workspaceSize, - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams) -{ - ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); - /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */ - size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0); - size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) - + (dictLoadMethod == ZSTD_dlm_byRef ? 0 - : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))) - + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) - + matchStateSize; - ZSTD_CDict* cdict; - ZSTD_CCtx_params params; - - if ((size_t)workspace & 7) return NULL; /* 8-aligned */ - - { - ZSTD_cwksp ws; - ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); - cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); - if (cdict == NULL) return NULL; - ZSTD_cwksp_move(&cdict->workspace, &ws); - } - - DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", - (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize)); - if (workspaceSize < neededSize) return NULL; - - ZSTD_CCtxParams_init(¶ms, 0); - params.cParams = cParams; - params.useRowMatchFinder = useRowMatchFinder; - cdict->useRowMatchFinder = useRowMatchFinder; - cdict->compressionLevel = ZSTD_NO_CLEVEL; - - if (ZSTD_isError( ZSTD_initCDict_internal(cdict, - dict, dictSize, - dictLoadMethod, dictContentType, - params) )) - return NULL; - - return cdict; -} - -ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) -{ - assert(cdict != NULL); - return cdict->matchState.cParams; -} - -/*! ZSTD_getDictID_fromCDict() : - * Provides the dictID of the dictionary loaded into `cdict`. - * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. - * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ -unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict) -{ - if (cdict==NULL) return 0; - return cdict->dictID; -} - -/* ZSTD_compressBegin_usingCDict_internal() : - * Implementation of various ZSTD_compressBegin_usingCDict* functions. - */ -static size_t ZSTD_compressBegin_usingCDict_internal( - ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, - ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) -{ - ZSTD_CCtx_params cctxParams; - DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal"); - RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!"); - /* Initialize the cctxParams from the cdict */ - { - ZSTD_parameters params; - params.fParams = fParams; - params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF - || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER - || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN - || cdict->compressionLevel == 0 ) ? - ZSTD_getCParamsFromCDict(cdict) - : ZSTD_getCParams(cdict->compressionLevel, - pledgedSrcSize, - cdict->dictContentSize); - ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, cdict->compressionLevel); - } - /* Increase window log to fit the entire dictionary and source if the - * source size is known. Limit the increase to 19, which is the - * window log for compression level 1 with the largest source size. - */ - if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { - U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); - U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; - cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog); - } - return ZSTD_compressBegin_internal(cctx, - NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, - cdict, - &cctxParams, pledgedSrcSize, - ZSTDb_not_buffered); -} - - -/* ZSTD_compressBegin_usingCDict_advanced() : - * This function is DEPRECATED. - * cdict must be != NULL */ -size_t ZSTD_compressBegin_usingCDict_advanced( - ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, - ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) -{ - return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize); -} - -/* ZSTD_compressBegin_usingCDict() : - * cdict must be != NULL */ -size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) -{ - ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; - return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); -} - -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) -{ - return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict); -} - -/*! ZSTD_compress_usingCDict_internal(): - * Implementation of various ZSTD_compress_usingCDict* functions. - */ -static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) -{ - FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ - return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); -} - -/*! ZSTD_compress_usingCDict_advanced(): - * This function is DEPRECATED. - */ -size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) -{ - return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); -} - -/*! ZSTD_compress_usingCDict() : - * Compression using a digested Dictionary. - * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. - * Note that compression parameters are decided at CDict creation time - * while frame parameters are hardcoded */ -size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict) -{ - ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; - return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); -} - - - -/* ****************************************************************** -* Streaming -********************************************************************/ - -ZSTD_CStream* ZSTD_createCStream(void) -{ - DEBUGLOG(3, "ZSTD_createCStream"); - return ZSTD_createCStream_advanced(ZSTD_defaultCMem); -} - -ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) -{ - return ZSTD_initStaticCCtx(workspace, workspaceSize); -} - -ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) -{ /* CStream and CCtx are now same object */ - return ZSTD_createCCtx_advanced(customMem); -} - -size_t ZSTD_freeCStream(ZSTD_CStream* zcs) -{ - return ZSTD_freeCCtx(zcs); /* same object */ -} - - - -/*====== Initialization ======*/ - -size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } - -size_t ZSTD_CStreamOutSize(void) -{ - return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; -} - -static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) -{ - if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) - return ZSTD_cpm_attachDict; - else - return ZSTD_cpm_noAttachDict; -} - -/* ZSTD_resetCStream(): - * pledgedSrcSize == 0 means "unknown" */ -size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss) -{ - /* temporary : 0 interpreted as "unknown" during transition period. - * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. - * 0 will be interpreted as "empty" in the future. - */ - U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; - DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); - return 0; -} - -/*! ZSTD_initCStream_internal() : - * Note : for lib/compress only. Used by zstdmt_compress.c. - * Assumption 1 : params are valid - * Assumption 2 : either dict, or cdict, is defined, not both */ -size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, - const void* dict, size_t dictSize, const ZSTD_CDict* cdict, - const ZSTD_CCtx_params* params, - unsigned long long pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_initCStream_internal"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); - assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); - zcs->requestedParams = *params; - assert(!((dict) && (cdict))); /* either dict or cdict, not both */ - if (dict) { - FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); - } else { - /* Dictionary is cleared if !cdict */ - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); - } - return 0; -} - -/* ZSTD_initCStream_usingCDict_advanced() : - * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ -size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, - const ZSTD_CDict* cdict, - ZSTD_frameParameters fParams, - unsigned long long pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); - zcs->requestedParams.fParams = fParams; - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); - return 0; -} - -/* note : cdict must outlive compression session */ -size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) -{ - DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); - return 0; -} - - -/* ZSTD_initCStream_advanced() : - * pledgedSrcSize must be exact. - * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. - * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ -size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, - const void* dict, size_t dictSize, - ZSTD_parameters params, unsigned long long pss) -{ - /* for compatibility with older programs relying on this behavior. - * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. - * This line will be removed in the future. - */ - U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; - DEBUGLOG(4, "ZSTD_initCStream_advanced"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); - FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); - ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, ¶ms); - FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); - return 0; -} - -size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) -{ - DEBUGLOG(4, "ZSTD_initCStream_usingDict"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); - return 0; -} - -size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) -{ - /* temporary : 0 interpreted as "unknown" during transition period. - * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. - * 0 will be interpreted as "empty" in the future. - */ - U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; - DEBUGLOG(4, "ZSTD_initCStream_srcSize"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); - return 0; -} - -size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) -{ - DEBUGLOG(4, "ZSTD_initCStream"); - FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); - FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); - return 0; -} - -/*====== Compression ======*/ - -static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) -{ - if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { - return cctx->blockSize - cctx->stableIn_notConsumed; - } - assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered); - { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; - if (hintInSize==0) hintInSize = cctx->blockSize; - return hintInSize; - } -} - -/** ZSTD_compressStream_generic(): - * internal function for all *compressStream*() variants - * @return : hint size for next input to complete ongoing block */ -static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, - ZSTD_outBuffer* output, - ZSTD_inBuffer* input, - ZSTD_EndDirective const flushMode) -{ - const char* const istart = (assert(input != NULL), (const char*)input->src); - const char* const iend = (istart != NULL) ? istart + input->size : istart; - const char* ip = (istart != NULL) ? istart + input->pos : istart; - char* const ostart = (assert(output != NULL), (char*)output->dst); - char* const oend = (ostart != NULL) ? ostart + output->size : ostart; - char* op = (ostart != NULL) ? ostart + output->pos : ostart; - U32 someMoreWork = 1; - - /* check expectations */ - DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos); - assert(zcs != NULL); - if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) { - assert(input->pos >= zcs->stableIn_notConsumed); - input->pos -= zcs->stableIn_notConsumed; - if (ip) ip -= zcs->stableIn_notConsumed; - zcs->stableIn_notConsumed = 0; - } - if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { - assert(zcs->inBuff != NULL); - assert(zcs->inBuffSize > 0); - } - if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) { - assert(zcs->outBuff != NULL); - assert(zcs->outBuffSize > 0); - } - if (input->src == NULL) assert(input->size == 0); - assert(input->pos <= input->size); - if (output->dst == NULL) assert(output->size == 0); - assert(output->pos <= output->size); - assert((U32)flushMode <= (U32)ZSTD_e_end); - - while (someMoreWork) { - switch(zcs->streamStage) - { - case zcss_init: - RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!"); - - case zcss_load: - if ( (flushMode == ZSTD_e_end) - && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */ - || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */ - && (zcs->inBuffPos == 0) ) { - /* shortcut to compression pass directly into output buffer */ - size_t const cSize = ZSTD_compressEnd_public(zcs, - op, oend-op, ip, iend-ip); - DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); - FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); - ip = iend; - op += cSize; - zcs->frameEnded = 1; - ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - someMoreWork = 0; break; - } - /* complete loading into inBuffer in buffered mode */ - if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { - size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; - size_t const loaded = ZSTD_limitCopy( - zcs->inBuff + zcs->inBuffPos, toLoad, - ip, iend-ip); - zcs->inBuffPos += loaded; - if (ip) ip += loaded; - if ( (flushMode == ZSTD_e_continue) - && (zcs->inBuffPos < zcs->inBuffTarget) ) { - /* not enough input to fill full block : stop here */ - someMoreWork = 0; break; - } - if ( (flushMode == ZSTD_e_flush) - && (zcs->inBuffPos == zcs->inToCompress) ) { - /* empty */ - someMoreWork = 0; break; - } - } else { - assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable); - if ( (flushMode == ZSTD_e_continue) - && ( (size_t)(iend - ip) < zcs->blockSize) ) { - /* can't compress a full block : stop here */ - zcs->stableIn_notConsumed = (size_t)(iend - ip); - ip = iend; /* pretend to have consumed input */ - someMoreWork = 0; break; - } - if ( (flushMode == ZSTD_e_flush) - && (ip == iend) ) { - /* empty */ - someMoreWork = 0; break; - } - } - /* compress current block (note : this stage cannot be stopped in the middle) */ - DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); - { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered); - void* cDst; - size_t cSize; - size_t oSize = oend-op; - size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress - : MIN((size_t)(iend - ip), zcs->blockSize); - if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) - cDst = op; /* compress into output buffer, to skip flush stage */ - else - cDst = zcs->outBuff, oSize = zcs->outBuffSize; - if (inputBuffered) { - unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); - cSize = lastBlock ? - ZSTD_compressEnd_public(zcs, cDst, oSize, - zcs->inBuff + zcs->inToCompress, iSize) : - ZSTD_compressContinue_public(zcs, cDst, oSize, - zcs->inBuff + zcs->inToCompress, iSize); - FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); - zcs->frameEnded = lastBlock; - /* prepare next block */ - zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; - if (zcs->inBuffTarget > zcs->inBuffSize) - zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; - DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", - (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); - if (!lastBlock) - assert(zcs->inBuffTarget <= zcs->inBuffSize); - zcs->inToCompress = zcs->inBuffPos; - } else { /* !inputBuffered, hence ZSTD_bm_stable */ - unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend); - cSize = lastBlock ? - ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) : - ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); - /* Consume the input prior to error checking to mirror buffered mode. */ - if (ip) ip += iSize; - FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); - zcs->frameEnded = lastBlock; - if (lastBlock) assert(ip == iend); - } - if (cDst == op) { /* no need to flush */ - op += cSize; - if (zcs->frameEnded) { - DEBUGLOG(5, "Frame completed directly in outBuffer"); - someMoreWork = 0; - ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - } - break; - } - zcs->outBuffContentSize = cSize; - zcs->outBuffFlushedSize = 0; - zcs->streamStage = zcss_flush; /* pass-through to flush stage */ - } - ZSTD_FALLTHROUGH; - case zcss_flush: - DEBUGLOG(5, "flush stage"); - assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered); - { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; - size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op), - zcs->outBuff + zcs->outBuffFlushedSize, toFlush); - DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", - (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed); - if (flushed) - op += flushed; - zcs->outBuffFlushedSize += flushed; - if (toFlush!=flushed) { - /* flush not fully completed, presumably because dst is too small */ - assert(op==oend); - someMoreWork = 0; - break; - } - zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; - if (zcs->frameEnded) { - DEBUGLOG(5, "Frame completed on flush"); - someMoreWork = 0; - ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - break; - } - zcs->streamStage = zcss_load; - break; - } - - default: /* impossible */ - assert(0); - } - } - - input->pos = ip - istart; - output->pos = op - ostart; - if (zcs->frameEnded) return 0; - return ZSTD_nextInputSizeHint(zcs); -} - -static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx) -{ -#ifdef ZSTD_MULTITHREAD - if (cctx->appliedParams.nbWorkers >= 1) { - assert(cctx->mtctx != NULL); - return ZSTDMT_nextInputSizeHint(cctx->mtctx); - } -#endif - return ZSTD_nextInputSizeHint(cctx); - -} - -size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) -{ - FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , ""); - return ZSTD_nextInputSizeHint_MTorST(zcs); -} - -/* After a compression call set the expected input/output buffer. - * This is validated at the start of the next compression call. - */ -static void -ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, const ZSTD_outBuffer* output, const ZSTD_inBuffer* input) -{ - DEBUGLOG(5, "ZSTD_setBufferExpectations (for advanced stable in/out modes)"); - if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { - cctx->expectedInBuffer = *input; - } - if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { - cctx->expectedOutBufferSize = output->size - output->pos; - } -} - -/* Validate that the input/output buffers match the expectations set by - * ZSTD_setBufferExpectations. - */ -static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx, - ZSTD_outBuffer const* output, - ZSTD_inBuffer const* input, - ZSTD_EndDirective endOp) -{ - if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { - ZSTD_inBuffer const expect = cctx->expectedInBuffer; - if (expect.src != input->src || expect.pos != input->pos) - RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!"); - } - (void)endOp; - if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { - size_t const outBufferSize = output->size - output->pos; - if (cctx->expectedOutBufferSize != outBufferSize) - RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableOutBuffer enabled but output size differs!"); - } - return 0; -} - -static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, - ZSTD_EndDirective endOp, - size_t inSize) -{ - ZSTD_CCtx_params params = cctx->requestedParams; - ZSTD_prefixDict const prefixDict = cctx->prefixDict; - FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ - ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ - assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ - if (cctx->cdict && !cctx->localDict.cdict) { - /* Let the cdict's compression level take priority over the requested params. - * But do not take the cdict's compression level if the "cdict" is actually a localDict - * generated from ZSTD_initLocalDict(). - */ - params.compressionLevel = cctx->cdict->compressionLevel; - } - DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); - if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */ - - { size_t const dictSize = prefixDict.dict - ? prefixDict.dictSize - : (cctx->cdict ? cctx->cdict->dictContentSize : 0); - ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); - params.cParams = ZSTD_getCParamsFromCCtxParams( - ¶ms, cctx->pledgedSrcSizePlusOne-1, - dictSize, mode); - } - - params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams); - params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams); - params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); - params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences); - params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize); - params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel); - -#ifdef ZSTD_MULTITHREAD - /* If external matchfinder is enabled, make sure to fail before checking job size (for consistency) */ - RETURN_ERROR_IF( - ZSTD_hasExtSeqProd(¶ms) && params.nbWorkers >= 1, - parameter_combination_unsupported, - "External sequence producer isn't supported with nbWorkers >= 1" - ); - - if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { - params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ - } - if (params.nbWorkers > 0) { -#if ZSTD_TRACE - cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0; -#endif - /* mt context creation */ - if (cctx->mtctx == NULL) { - DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u", - params.nbWorkers); - cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem, cctx->pool); - RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!"); - } - /* mt compression */ - DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers); - FORWARD_IF_ERROR( ZSTDMT_initCStream_internal( - cctx->mtctx, - prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, - cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , ""); - cctx->dictID = cctx->cdict ? cctx->cdict->dictID : 0; - cctx->dictContentSize = cctx->cdict ? cctx->cdict->dictContentSize : prefixDict.dictSize; - cctx->consumedSrcSize = 0; - cctx->producedCSize = 0; - cctx->streamStage = zcss_load; - cctx->appliedParams = params; - } else -#endif /* ZSTD_MULTITHREAD */ - { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; - assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); - FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, - prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast, - cctx->cdict, - ¶ms, pledgedSrcSize, - ZSTDb_buffered) , ""); - assert(cctx->appliedParams.nbWorkers == 0); - cctx->inToCompress = 0; - cctx->inBuffPos = 0; - if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) { - /* for small input: avoid automatic flush on reaching end of block, since - * it would require to add a 3-bytes null block to end frame - */ - cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize); - } else { - cctx->inBuffTarget = 0; - } - cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; - cctx->streamStage = zcss_load; - cctx->frameEnded = 0; - } - return 0; -} - -/* @return provides a minimum amount of data remaining to be flushed from internal buffers - */ -size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, - ZSTD_outBuffer* output, - ZSTD_inBuffer* input, - ZSTD_EndDirective endOp) -{ - DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp); - /* check conditions */ - RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer"); - RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer"); - RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective"); - assert(cctx != NULL); - - /* transparent initialization stage */ - if (cctx->streamStage == zcss_init) { - size_t const inputSize = input->size - input->pos; /* no obligation to start from pos==0 */ - size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed; - if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */ - && (endOp == ZSTD_e_continue) /* no flush requested, more input to come */ - && (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) { /* not even reached one block yet */ - if (cctx->stableIn_notConsumed) { /* not the first time */ - /* check stable source guarantees */ - RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer"); - RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos"); - } - /* pretend input was consumed, to give a sense forward progress */ - input->pos = input->size; - /* save stable inBuffer, for later control, and flush/end */ - cctx->expectedInBuffer = *input; - /* but actually input wasn't consumed, so keep track of position from where compression shall resume */ - cctx->stableIn_notConsumed += inputSize; - /* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */ - return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format); /* at least some header to produce */ - } - FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed"); - ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */ - } - /* end of transparent initialization stage */ - - FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers"); - /* compression stage */ -#ifdef ZSTD_MULTITHREAD - if (cctx->appliedParams.nbWorkers > 0) { - size_t flushMin; - if (cctx->cParamsChanged) { - ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); - cctx->cParamsChanged = 0; - } - if (cctx->stableIn_notConsumed) { - assert(cctx->appliedParams.inBufferMode == ZSTD_bm_stable); - /* some early data was skipped - make it available for consumption */ - assert(input->pos >= cctx->stableIn_notConsumed); - input->pos -= cctx->stableIn_notConsumed; - cctx->stableIn_notConsumed = 0; - } - for (;;) { - size_t const ipos = input->pos; - size_t const opos = output->pos; - flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); - cctx->consumedSrcSize += (U64)(input->pos - ipos); - cctx->producedCSize += (U64)(output->pos - opos); - if ( ZSTD_isError(flushMin) - || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ - if (flushMin == 0) - ZSTD_CCtx_trace(cctx, 0); - ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); - } - FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed"); - - if (endOp == ZSTD_e_continue) { - /* We only require some progress with ZSTD_e_continue, not maximal progress. - * We're done if we've consumed or produced any bytes, or either buffer is - * full. - */ - if (input->pos != ipos || output->pos != opos || input->pos == input->size || output->pos == output->size) - break; - } else { - assert(endOp == ZSTD_e_flush || endOp == ZSTD_e_end); - /* We require maximal progress. We're done when the flush is complete or the - * output buffer is full. - */ - if (flushMin == 0 || output->pos == output->size) - break; - } - } - DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic"); - /* Either we don't require maximum forward progress, we've finished the - * flush, or we are out of output space. - */ - assert(endOp == ZSTD_e_continue || flushMin == 0 || output->pos == output->size); - ZSTD_setBufferExpectations(cctx, output, input); - return flushMin; - } -#endif /* ZSTD_MULTITHREAD */ - FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , ""); - DEBUGLOG(5, "completed ZSTD_compressStream2"); - ZSTD_setBufferExpectations(cctx, output, input); - return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ -} - -size_t ZSTD_compressStream2_simpleArgs ( - ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, size_t* dstPos, - const void* src, size_t srcSize, size_t* srcPos, - ZSTD_EndDirective endOp) -{ - ZSTD_outBuffer output; - ZSTD_inBuffer input; - output.dst = dst; - output.size = dstCapacity; - output.pos = *dstPos; - input.src = src; - input.size = srcSize; - input.pos = *srcPos; - /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ - { size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; - } -} - -size_t ZSTD_compress2(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) -{ - ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode; - ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode; - DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize); - ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); - /* Enable stable input/output buffers. */ - cctx->requestedParams.inBufferMode = ZSTD_bm_stable; - cctx->requestedParams.outBufferMode = ZSTD_bm_stable; - { size_t oPos = 0; - size_t iPos = 0; - size_t const result = ZSTD_compressStream2_simpleArgs(cctx, - dst, dstCapacity, &oPos, - src, srcSize, &iPos, - ZSTD_e_end); - /* Reset to the original values. */ - cctx->requestedParams.inBufferMode = originalInBufferMode; - cctx->requestedParams.outBufferMode = originalOutBufferMode; - - FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); - if (result != 0) { /* compression not completed, due to lack of output space */ - assert(oPos == dstCapacity); - RETURN_ERROR(dstSize_tooSmall, ""); - } - assert(iPos == srcSize); /* all input is expected consumed */ - return oPos; - } -} - -/* ZSTD_validateSequence() : - * @offCode : is presumed to follow format required by ZSTD_storeSeq() - * @returns a ZSTD error code if sequence is not valid - */ -static size_t -ZSTD_validateSequence(U32 offCode, U32 matchLength, U32 minMatch, - size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer) -{ - U32 const windowSize = 1u << windowLog; - /* posInSrc represents the amount of data the decoder would decode up to this point. - * As long as the amount of data decoded is less than or equal to window size, offsets may be - * larger than the total length of output decoded in order to reference the dict, even larger than - * window size. After output surpasses windowSize, we're limited to windowSize offsets again. - */ - size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; - size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4; - RETURN_ERROR_IF(offCode > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!"); - /* Validate maxNbSeq is large enough for the given matchLength and minMatch */ - RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch"); - return 0; -} - -/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ -static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) -{ - U32 offBase = OFFSET_TO_OFFBASE(rawOffset); - - if (!ll0 && rawOffset == rep[0]) { - offBase = REPCODE1_TO_OFFBASE; - } else if (rawOffset == rep[1]) { - offBase = REPCODE_TO_OFFBASE(2 - ll0); - } else if (rawOffset == rep[2]) { - offBase = REPCODE_TO_OFFBASE(3 - ll0); - } else if (ll0 && rawOffset == rep[0] - 1) { - offBase = REPCODE3_TO_OFFBASE; - } - return offBase; -} - -size_t -ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, - ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize, - ZSTD_paramSwitch_e externalRepSearch) -{ - U32 idx = seqPos->idx; - U32 const startIdx = idx; - BYTE const* ip = (BYTE const*)(src); - const BYTE* const iend = ip + blockSize; - repcodes_t updatedRepcodes; - U32 dictSize; - - DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreExplicitBlockDelim (blockSize = %zu)", blockSize); - - if (cctx->cdict) { - dictSize = (U32)cctx->cdict->dictContentSize; - } else if (cctx->prefixDict.dict) { - dictSize = (U32)cctx->prefixDict.dictSize; - } else { - dictSize = 0; - } - ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); - for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) { - U32 const litLength = inSeqs[idx].litLength; - U32 const matchLength = inSeqs[idx].matchLength; - U32 offBase; - - if (externalRepSearch == ZSTD_ps_disable) { - offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset); - } else { - U32 const ll0 = (litLength == 0); - offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); - ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); - } - - DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); - if (cctx->appliedParams.validateSequences) { - seqPos->posInSrc += litLength + matchLength; - FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)), - "Sequence validation failed"); - } - RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, - "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); - ip += matchLength + litLength; - } - - /* If we skipped repcode search while parsing, we need to update repcodes now */ - assert(externalRepSearch != ZSTD_ps_auto); - assert(idx >= startIdx); - if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) { - U32* const rep = updatedRepcodes.rep; - U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */ - - if (lastSeqIdx >= startIdx + 2) { - rep[2] = inSeqs[lastSeqIdx - 2].offset; - rep[1] = inSeqs[lastSeqIdx - 1].offset; - rep[0] = inSeqs[lastSeqIdx].offset; - } else if (lastSeqIdx == startIdx + 1) { - rep[2] = rep[0]; - rep[1] = inSeqs[lastSeqIdx - 1].offset; - rep[0] = inSeqs[lastSeqIdx].offset; - } else { - assert(lastSeqIdx == startIdx); - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = inSeqs[lastSeqIdx].offset; - } - } - - ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); - - if (inSeqs[idx].litLength) { - DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength); - ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength); - ip += inSeqs[idx].litLength; - seqPos->posInSrc += inSeqs[idx].litLength; - } - RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!"); - seqPos->idx = idx+1; - return 0; -} - -size_t -ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch) -{ - U32 idx = seqPos->idx; - U32 startPosInSequence = seqPos->posInSequence; - U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize; - size_t dictSize; - BYTE const* ip = (BYTE const*)(src); - BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */ - repcodes_t updatedRepcodes; - U32 bytesAdjustment = 0; - U32 finalMatchSplit = 0; - - /* TODO(embg) support fast parsing mode in noBlockDelim mode */ - (void)externalRepSearch; - - if (cctx->cdict) { - dictSize = cctx->cdict->dictContentSize; - } else if (cctx->prefixDict.dict) { - dictSize = cctx->prefixDict.dictSize; - } else { - dictSize = 0; - } - DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreNoBlockDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); - DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); - ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); - while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { - const ZSTD_Sequence currSeq = inSeqs[idx]; - U32 litLength = currSeq.litLength; - U32 matchLength = currSeq.matchLength; - U32 const rawOffset = currSeq.offset; - U32 offBase; - - /* Modify the sequence depending on where endPosInSequence lies */ - if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { - if (startPosInSequence >= litLength) { - startPosInSequence -= litLength; - litLength = 0; - matchLength -= startPosInSequence; - } else { - litLength -= startPosInSequence; - } - /* Move to the next sequence */ - endPosInSequence -= currSeq.litLength + currSeq.matchLength; - startPosInSequence = 0; - } else { - /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence - does not reach the end of the match. So, we have to split the sequence */ - DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u", - currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence); - if (endPosInSequence > litLength) { - U32 firstHalfMatchLength; - litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; - firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength; - if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) { - /* Only ever split the match if it is larger than the block size */ - U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence; - if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) { - /* Move the endPosInSequence backward so that it creates match of minMatch length */ - endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; - bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; - firstHalfMatchLength -= bytesAdjustment; - } - matchLength = firstHalfMatchLength; - /* Flag that we split the last match - after storing the sequence, exit the loop, - but keep the value of endPosInSequence */ - finalMatchSplit = 1; - } else { - /* Move the position in sequence backwards so that we don't split match, and break to store - * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence - * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so - * would cause the first half of the match to be too small - */ - bytesAdjustment = endPosInSequence - currSeq.litLength; - endPosInSequence = currSeq.litLength; - break; - } - } else { - /* This sequence ends inside the literals, break to store the last literals */ - break; - } - } - /* Check if this offset can be represented with a repcode */ - { U32 const ll0 = (litLength == 0); - offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0); - ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); - } - - if (cctx->appliedParams.validateSequences) { - seqPos->posInSrc += litLength + matchLength; - FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)), - "Sequence validation failed"); - } - DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); - RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, - "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); - ip += matchLength + litLength; - if (!finalMatchSplit) - idx++; /* Next Sequence */ - } - DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); - assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); - seqPos->idx = idx; - seqPos->posInSequence = endPosInSequence; - ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); - - iend -= bytesAdjustment; - if (ip != iend) { - /* Store any last literals */ - U32 lastLLSize = (U32)(iend - ip); - assert(ip <= iend); - DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize); - ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); - seqPos->posInSrc += lastLLSize; - } - - return bytesAdjustment; -} - -typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); -static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) -{ - ZSTD_sequenceCopier sequenceCopier = NULL; - assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode)); - if (mode == ZSTD_sf_explicitBlockDelimiters) { - return ZSTD_copySequencesToSeqStoreExplicitBlockDelim; - } else if (mode == ZSTD_sf_noBlockDelimiters) { - return ZSTD_copySequencesToSeqStoreNoBlockDelim; - } - assert(sequenceCopier != NULL); - return sequenceCopier; -} - -/* Discover the size of next block by searching for the delimiter. - * Note that a block delimiter **must** exist in this mode, - * otherwise it's an input error. - * The block size retrieved will be later compared to ensure it remains within bounds */ -static size_t -blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) -{ - int end = 0; - size_t blockSize = 0; - size_t spos = seqPos.idx; - DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize); - assert(spos <= inSeqsSize); - while (spos < inSeqsSize) { - end = (inSeqs[spos].offset == 0); - blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; - if (end) { - if (inSeqs[spos].matchLength != 0) - RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0"); - break; - } - spos++; - } - if (!end) - RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter"); - return blockSize; -} - -/* More a "target" block size */ -static size_t blockSize_noDelimiter(size_t blockSize, size_t remaining) -{ - int const lastBlock = (remaining <= blockSize); - return lastBlock ? remaining : blockSize; -} - -static size_t determine_blockSize(ZSTD_sequenceFormat_e mode, - size_t blockSize, size_t remaining, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) -{ - DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining); - if (mode == ZSTD_sf_noBlockDelimiters) - return blockSize_noDelimiter(blockSize, remaining); - { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); - FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters"); - if (explicitBlockSize > blockSize) - RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block"); - if (explicitBlockSize > remaining) - RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source"); - return explicitBlockSize; - } -} - -/* Compress, block-by-block, all of the sequences given. - * - * Returns the cumulative size of all compressed blocks (including their headers), - * otherwise a ZSTD error. - */ -static size_t -ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, - const void* src, size_t srcSize) -{ - size_t cSize = 0; - size_t remaining = srcSize; - ZSTD_sequencePosition seqPos = {0, 0, 0}; - - BYTE const* ip = (BYTE const*)src; - BYTE* op = (BYTE*)dst; - ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); - - DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize); - /* Special case: empty frame */ - if (remaining == 0) { - U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); - RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header"); - MEM_writeLE32(op, cBlockHeader24); - op += ZSTD_blockHeaderSize; - dstCapacity -= ZSTD_blockHeaderSize; - cSize += ZSTD_blockHeaderSize; - } - - while (remaining) { - size_t compressedSeqsSize; - size_t cBlockSize; - size_t additionalByteAdjustment; - size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, - cctx->blockSize, remaining, - inSeqs, inSeqsSize, seqPos); - U32 const lastBlock = (blockSize == remaining); - FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size"); - assert(blockSize <= remaining); - ZSTD_resetSeqStore(&cctx->seqStore); - DEBUGLOG(5, "Working on new block. Blocksize: %zu (total:%zu)", blockSize, (ip - (const BYTE*)src) + blockSize); - - additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize, cctx->appliedParams.searchForExternalRepcodes); - FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy"); - blockSize -= additionalByteAdjustment; - - /* If blocks are too small, emit as a nocompress block */ - /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding - * additional 1. We need to revisit and change this logic to be more consistent */ - if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { - cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); - DEBUGLOG(5, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); - cSize += cBlockSize; - ip += blockSize; - op += cBlockSize; - remaining -= blockSize; - dstCapacity -= cBlockSize; - continue; - } - - RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); - compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore, - &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, - &cctx->appliedParams, - op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, - blockSize, - cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, - cctx->bmi2); - FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); - DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); - - if (!cctx->isFirstBlock && - ZSTD_maybeRLE(&cctx->seqStore) && - ZSTD_isRLE(ip, blockSize)) { - /* We don't want to emit our first block as a RLE even if it qualifies because - * doing so will cause the decoder (cli only) to throw a "should consume all input error." - * This is only an issue for zstd <= v1.4.3 - */ - compressedSeqsSize = 1; - } - - if (compressedSeqsSize == 0) { - /* ZSTD_noCompressBlock writes the block header as well */ - cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed"); - DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize); - } else if (compressedSeqsSize == 1) { - cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed"); - DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize); - } else { - U32 cBlockHeader; - /* Error checking and repcodes update */ - ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); - if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) - cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - - /* Write block header into beginning of block*/ - cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); - MEM_writeLE24(op, cBlockHeader); - cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; - DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); - } - - cSize += cBlockSize; - - if (lastBlock) { - break; - } else { - ip += blockSize; - op += cBlockSize; - remaining -= blockSize; - dstCapacity -= cBlockSize; - cctx->isFirstBlock = 0; - } - DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); - } - - DEBUGLOG(4, "cSize final total: %zu", cSize); - return cSize; -} - -size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, - const void* src, size_t srcSize) -{ - BYTE* op = (BYTE*)dst; - size_t cSize = 0; - size_t compressedBlocksSize = 0; - size_t frameHeaderSize = 0; - - /* Transparent initialization stage, same as compressStream2() */ - DEBUGLOG(4, "ZSTD_compressSequences (dstCapacity=%zu)", dstCapacity); - assert(cctx != NULL); - FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); - /* Begin writing output, starting with frame header */ - frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID); - op += frameHeaderSize; - dstCapacity -= frameHeaderSize; - cSize += frameHeaderSize; - if (cctx->appliedParams.fParams.checksumFlag && srcSize) { - XXH64_update(&cctx->xxhState, src, srcSize); - } - /* cSize includes block header size and compressed sequences size */ - compressedBlocksSize = ZSTD_compressSequences_internal(cctx, - op, dstCapacity, - inSeqs, inSeqsSize, - src, srcSize); - FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!"); - cSize += compressedBlocksSize; - dstCapacity -= compressedBlocksSize; - - if (cctx->appliedParams.fParams.checksumFlag) { - U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); - RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); - DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum); - MEM_writeLE32((char*)dst + cSize, checksum); - cSize += 4; - } - - DEBUGLOG(4, "Final compressed size: %zu", cSize); - return cSize; -} - -/*====== Finalize ======*/ - -static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs) -{ - const ZSTD_inBuffer nullInput = { NULL, 0, 0 }; - const int stableInput = (zcs->appliedParams.inBufferMode == ZSTD_bm_stable); - return stableInput ? zcs->expectedInBuffer : nullInput; -} - -/*! ZSTD_flushStream() : - * @return : amount of data remaining to flush */ -size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) -{ - ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); - input.size = input.pos; /* do not ingest more input during flush */ - return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); -} - - -size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) -{ - ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); - size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); - FORWARD_IF_ERROR(remainingToFlush , "ZSTD_compressStream2(,,ZSTD_e_end) failed"); - if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ - /* single thread mode : attempt to calculate remaining to flush more precisely */ - { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; - size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4); - size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize; - DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush); - return toFlush; - } -} - - -/*-===== Pre-defined compression levels =====-*/ -#include "clevels.h" - -int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } -int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } -int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; } - -static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize) -{ - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict); - switch (cParams.strategy) { - case ZSTD_fast: - case ZSTD_dfast: - break; - case ZSTD_greedy: - case ZSTD_lazy: - case ZSTD_lazy2: - cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG; - break; - case ZSTD_btlazy2: - case ZSTD_btopt: - case ZSTD_btultra: - case ZSTD_btultra2: - break; - } - return cParams; -} - -static int ZSTD_dedicatedDictSearch_isSupported( - ZSTD_compressionParameters const* cParams) -{ - return (cParams->strategy >= ZSTD_greedy) - && (cParams->strategy <= ZSTD_lazy2) - && (cParams->hashLog > cParams->chainLog) - && (cParams->chainLog <= 24); -} - -/** - * Reverses the adjustment applied to cparams when enabling dedicated dict - * search. This is used to recover the params set to be used in the working - * context. (Otherwise, those tables would also grow.) - */ -static void ZSTD_dedicatedDictSearch_revertCParams( - ZSTD_compressionParameters* cParams) { - switch (cParams->strategy) { - case ZSTD_fast: - case ZSTD_dfast: - break; - case ZSTD_greedy: - case ZSTD_lazy: - case ZSTD_lazy2: - cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG; - if (cParams->hashLog < ZSTD_HASHLOG_MIN) { - cParams->hashLog = ZSTD_HASHLOG_MIN; - } - break; - case ZSTD_btlazy2: - case ZSTD_btopt: - case ZSTD_btultra: - case ZSTD_btultra2: - break; - } -} - -static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) -{ - switch (mode) { - case ZSTD_cpm_unknown: - case ZSTD_cpm_noAttachDict: - case ZSTD_cpm_createCDict: - break; - case ZSTD_cpm_attachDict: - dictSize = 0; - break; - default: - assert(0); - break; - } - { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN; - size_t const addedSize = unknown && dictSize > 0 ? 500 : 0; - return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize; - } -} - -/*! ZSTD_getCParams_internal() : - * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. - * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. - * Use dictSize == 0 for unknown or unused. - * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */ -static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) -{ - U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); - U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); - int row; - DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel); - - /* row */ - if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ - else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ - else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; - else row = compressionLevel; - - { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; - DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy); - /* acceleration factor */ - if (compressionLevel < 0) { - int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel); - cp.targetLength = (unsigned)(-clampedCompressionLevel); - } - /* refine parameters based on srcSize & dictSize */ - return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto); - } -} - -/*! ZSTD_getCParams() : - * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. - * Size values are optional, provide 0 if not known or unused */ -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) -{ - if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; - return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); -} - -/*! ZSTD_getParams() : - * same idea as ZSTD_getCParams() - * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). - * Fields of `ZSTD_frameParameters` are set to default values */ -static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { - ZSTD_parameters params; - ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode); - DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); - ZSTD_memset(¶ms, 0, sizeof(params)); - params.cParams = cParams; - params.fParams.contentSizeFlag = 1; - return params; -} - -/*! ZSTD_getParams() : - * same idea as ZSTD_getCParams() - * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). - * Fields of `ZSTD_frameParameters` are set to default values */ -ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { - if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; - return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); -} - -void ZSTD_registerSequenceProducer( - ZSTD_CCtx* zc, - void* extSeqProdState, - ZSTD_sequenceProducer_F extSeqProdFunc -) { - assert(zc != NULL); - ZSTD_CCtxParams_registerSequenceProducer( - &zc->requestedParams, extSeqProdState, extSeqProdFunc - ); -} - -void ZSTD_CCtxParams_registerSequenceProducer( - ZSTD_CCtx_params* params, - void* extSeqProdState, - ZSTD_sequenceProducer_F extSeqProdFunc -) { - assert(params != NULL); - if (extSeqProdFunc != NULL) { - params->extSeqProdFunc = extSeqProdFunc; - params->extSeqProdState = extSeqProdState; - } else { - params->extSeqProdFunc = NULL; - params->extSeqProdState = NULL; - } -} diff --git a/zstandard_android/src/compress/zstd_compress_internal.h b/zstandard_android/src/compress/zstd_compress_internal.h deleted file mode 100644 index 6229033..0000000 --- a/zstandard_android/src/compress/zstd_compress_internal.h +++ /dev/null @@ -1,1562 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -/* This header contains definitions - * that shall **only** be used by modules within lib/compress. - */ - -#ifndef ZSTD_COMPRESS_H -#define ZSTD_COMPRESS_H - -/*-************************************* -* Dependencies -***************************************/ -#include "../common/zstd_internal.h" -#include "zstd_cwksp.h" -#ifdef ZSTD_MULTITHREAD -# include "zstdmt_compress.h" -#endif -#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */ - -#if defined (__cplusplus) -extern "C" { -#endif - -/*-************************************* -* Constants -***************************************/ -#define kSearchStrength 8 -#define HASH_READ_SIZE 8 -#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted". - It could be confused for a real successor at index "1", if sorted as larger than its predecessor. - It's not a big deal though : candidate will just be sorted again. - Additionally, candidate position 1 will be lost. - But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss. - The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table reuse with a different strategy. - This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */ - - -/*-************************************* -* Context memory management -***************************************/ -typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; -typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage; - -typedef struct ZSTD_prefixDict_s { - const void* dict; - size_t dictSize; - ZSTD_dictContentType_e dictContentType; -} ZSTD_prefixDict; - -typedef struct { - void* dictBuffer; - void const* dict; - size_t dictSize; - ZSTD_dictContentType_e dictContentType; - ZSTD_CDict* cdict; -} ZSTD_localDict; - -typedef struct { - HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)]; - HUF_repeat repeatMode; -} ZSTD_hufCTables_t; - -typedef struct { - FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)]; - FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)]; - FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)]; - FSE_repeat offcode_repeatMode; - FSE_repeat matchlength_repeatMode; - FSE_repeat litlength_repeatMode; -} ZSTD_fseCTables_t; - -typedef struct { - ZSTD_hufCTables_t huf; - ZSTD_fseCTables_t fse; -} ZSTD_entropyCTables_t; - -/*********************************************** -* Entropy buffer statistics structs and funcs * -***********************************************/ -/** ZSTD_hufCTablesMetadata_t : - * Stores Literals Block Type for a super-block in hType, and - * huffman tree description in hufDesBuffer. - * hufDesSize refers to the size of huffman tree description in bytes. - * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */ -typedef struct { - symbolEncodingType_e hType; - BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE]; - size_t hufDesSize; -} ZSTD_hufCTablesMetadata_t; - -/** ZSTD_fseCTablesMetadata_t : - * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and - * fse tables in fseTablesBuffer. - * fseTablesSize refers to the size of fse tables in bytes. - * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */ -typedef struct { - symbolEncodingType_e llType; - symbolEncodingType_e ofType; - symbolEncodingType_e mlType; - BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE]; - size_t fseTablesSize; - size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ -} ZSTD_fseCTablesMetadata_t; - -typedef struct { - ZSTD_hufCTablesMetadata_t hufMetadata; - ZSTD_fseCTablesMetadata_t fseMetadata; -} ZSTD_entropyCTablesMetadata_t; - -/** ZSTD_buildBlockEntropyStats() : - * Builds entropy for the block. - * @return : 0 on success or error code */ -size_t ZSTD_buildBlockEntropyStats( - const seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize); - -/********************************* -* Compression internals structs * -*********************************/ - -typedef struct { - U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */ - U32 len; /* Raw length of match */ -} ZSTD_match_t; - -typedef struct { - U32 offset; /* Offset of sequence */ - U32 litLength; /* Length of literals prior to match */ - U32 matchLength; /* Raw length of match */ -} rawSeq; - -typedef struct { - rawSeq* seq; /* The start of the sequences */ - size_t pos; /* The index in seq where reading stopped. pos <= size. */ - size_t posInSequence; /* The position within the sequence at seq[pos] where reading - stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */ - size_t size; /* The number of sequences. <= capacity. */ - size_t capacity; /* The capacity starting from `seq` pointer */ -} rawSeqStore_t; - -typedef struct { - U32 idx; /* Index in array of ZSTD_Sequence */ - U32 posInSequence; /* Position within sequence at idx */ - size_t posInSrc; /* Number of bytes given by sequences provided so far */ -} ZSTD_sequencePosition; - -UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; - -typedef struct { - int price; /* price from beginning of segment to this position */ - U32 off; /* offset of previous match */ - U32 mlen; /* length of previous match */ - U32 litlen; /* nb of literals since previous match */ - U32 rep[ZSTD_REP_NUM]; /* offset history after previous match */ -} ZSTD_optimal_t; - -typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e; - -#define ZSTD_OPT_SIZE (ZSTD_OPT_NUM+3) -typedef struct { - /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */ - unsigned* litFreq; /* table of literals statistics, of size 256 */ - unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */ - unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */ - unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */ - ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_SIZE */ - ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */ - - U32 litSum; /* nb of literals */ - U32 litLengthSum; /* nb of litLength codes */ - U32 matchLengthSum; /* nb of matchLength codes */ - U32 offCodeSum; /* nb of offset codes */ - U32 litSumBasePrice; /* to compare to log2(litfreq) */ - U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */ - U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */ - U32 offCodeSumBasePrice; /* to compare to log2(offreq) */ - ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */ - const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */ - ZSTD_paramSwitch_e literalCompressionMode; -} optState_t; - -typedef struct { - ZSTD_entropyCTables_t entropy; - U32 rep[ZSTD_REP_NUM]; -} ZSTD_compressedBlockState_t; - -typedef struct { - BYTE const* nextSrc; /* next block here to continue on current prefix */ - BYTE const* base; /* All regular indexes relative to this position */ - BYTE const* dictBase; /* extDict indexes relative to this position */ - U32 dictLimit; /* below that point, need extDict */ - U32 lowLimit; /* below that point, no more valid data */ - U32 nbOverflowCorrections; /* Number of times overflow correction has run since - * ZSTD_window_init(). Useful for debugging coredumps - * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY. - */ -} ZSTD_window_t; - -#define ZSTD_WINDOW_START_INDEX 2 - -typedef struct ZSTD_matchState_t ZSTD_matchState_t; - -#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */ - -struct ZSTD_matchState_t { - ZSTD_window_t window; /* State for window round buffer management */ - U32 loadedDictEnd; /* index of end of dictionary, within context's referential. - * When loadedDictEnd != 0, a dictionary is in use, and still valid. - * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance. - * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity(). - * When dict referential is copied into active context (i.e. not attached), - * loadedDictEnd == dictSize, since referential starts from zero. - */ - U32 nextToUpdate; /* index from which to continue table update */ - U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */ - - U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/ - BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */ - U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */ - U64 hashSalt; /* For row-based matchFinder: salts the hash for reuse of tag table */ - U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */ - - U32* hashTable; - U32* hashTable3; - U32* chainTable; - - U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */ - - int dedicatedDictSearch; /* Indicates whether this matchState is using the - * dedicated dictionary search structure. - */ - optState_t opt; /* optimal parser state */ - const ZSTD_matchState_t* dictMatchState; - ZSTD_compressionParameters cParams; - const rawSeqStore_t* ldmSeqStore; - - /* Controls prefetching in some dictMatchState matchfinders. - * This behavior is controlled from the cctx ms. - * This parameter has no effect in the cdict ms. */ - int prefetchCDictTables; - - /* When == 0, lazy match finders insert every position. - * When != 0, lazy match finders only insert positions they search. - * This allows them to skip much faster over incompressible data, - * at a small cost to compression ratio. - */ - int lazySkipping; -}; - -typedef struct { - ZSTD_compressedBlockState_t* prevCBlock; - ZSTD_compressedBlockState_t* nextCBlock; - ZSTD_matchState_t matchState; -} ZSTD_blockState_t; - -typedef struct { - U32 offset; - U32 checksum; -} ldmEntry_t; - -typedef struct { - BYTE const* split; - U32 hash; - U32 checksum; - ldmEntry_t* bucket; -} ldmMatchCandidate_t; - -#define LDM_BATCH_SIZE 64 - -typedef struct { - ZSTD_window_t window; /* State for the window round buffer management */ - ldmEntry_t* hashTable; - U32 loadedDictEnd; - BYTE* bucketOffsets; /* Next position in bucket to insert entry */ - size_t splitIndices[LDM_BATCH_SIZE]; - ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE]; -} ldmState_t; - -typedef struct { - ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ - U32 hashLog; /* Log size of hashTable */ - U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ - U32 minMatchLength; /* Minimum match length */ - U32 hashRateLog; /* Log number of entries to skip */ - U32 windowLog; /* Window log for the LDM */ -} ldmParams_t; - -typedef struct { - int collectSequences; - ZSTD_Sequence* seqStart; - size_t seqIndex; - size_t maxSequences; -} SeqCollector; - -struct ZSTD_CCtx_params_s { - ZSTD_format_e format; - ZSTD_compressionParameters cParams; - ZSTD_frameParameters fParams; - - int compressionLevel; - int forceWindow; /* force back-references to respect limit of - * 1< 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; -} - -/* ZSTD_MLcode() : - * note : mlBase = matchLength - MINMATCH; - * because it's the format it's stored in seqStore->sequences */ -MEM_STATIC U32 ZSTD_MLcode(U32 mlBase) -{ - static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; - static const U32 ML_deltaCode = 36; - return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; -} - -/* ZSTD_cParam_withinBounds: - * @return 1 if value is within cParam bounds, - * 0 otherwise */ -MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) -{ - ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); - if (ZSTD_isError(bounds.error)) return 0; - if (value < bounds.lowerBound) return 0; - if (value > bounds.upperBound) return 0; - return 1; -} - -/* ZSTD_selectAddr: - * @return index >= lowLimit ? candidate : backup, - * tries to force branchless codegen. */ -MEM_STATIC const BYTE* -ZSTD_selectAddr(U32 index, U32 lowLimit, const BYTE* candidate, const BYTE* backup) -{ -#if defined(__GNUC__) && defined(__x86_64__) - __asm__ ( - "cmp %1, %2\n" - "cmova %3, %0\n" - : "+r"(candidate) - : "r"(index), "r"(lowLimit), "r"(backup) - ); - return candidate; -#else - return index >= lowLimit ? candidate : backup; -#endif -} - -/* ZSTD_noCompressBlock() : - * Writes uncompressed block to dst buffer from given src. - * Returns the size of the block */ -MEM_STATIC size_t -ZSTD_noCompressBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) -{ - U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3); - DEBUGLOG(5, "ZSTD_noCompressBlock (srcSize=%zu, dstCapacity=%zu)", srcSize, dstCapacity); - RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity, - dstSize_tooSmall, "dst buf too small for uncompressed block"); - MEM_writeLE24(dst, cBlockHeader24); - ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); - return ZSTD_blockHeaderSize + srcSize; -} - -MEM_STATIC size_t -ZSTD_rleCompressBlock(void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) -{ - BYTE* const op = (BYTE*)dst; - U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3); - RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, ""); - MEM_writeLE24(op, cBlockHeader); - op[3] = src; - return 4; -} - - -/* ZSTD_minGain() : - * minimum compression required - * to generate a compress block or a compressed literals section. - * note : use same formula for both situations */ -MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) -{ - U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; - ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); - assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); - return (srcSize >> minlog) + 2; -} - -MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams) -{ - switch (cctxParams->literalCompressionMode) { - case ZSTD_ps_enable: - return 0; - case ZSTD_ps_disable: - return 1; - default: - assert(0 /* impossible: pre-validated */); - ZSTD_FALLTHROUGH; - case ZSTD_ps_auto: - return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); - } -} - -/*! ZSTD_safecopyLiterals() : - * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w. - * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single - * large copies. - */ -static void -ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) -{ - assert(iend > ilimit_w); - if (ip <= ilimit_w) { - ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap); - op += ilimit_w - ip; - ip = ilimit_w; - } - while (ip < iend) *op++ = *ip++; -} - - -#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1) -#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2) -#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3) -#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */ -#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM) -#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM) -#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM) -#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM) -#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */ - -/*! ZSTD_storeSeq() : - * Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t. - * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). - * @matchLength : must be >= MINMATCH - * Allowed to over-read literals up to litLimit. -*/ -HINT_INLINE UNUSED_ATTR void -ZSTD_storeSeq(seqStore_t* seqStorePtr, - size_t litLength, const BYTE* literals, const BYTE* litLimit, - U32 offBase, - size_t matchLength) -{ - BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH; - BYTE const* const litEnd = literals + litLength; -#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6) - static const BYTE* g_start = NULL; - if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ - { U32 const pos = (U32)((const BYTE*)literals - g_start); - DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u", - pos, (U32)litLength, (U32)matchLength, (U32)offBase); - } -#endif - assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); - /* copy Literals */ - assert(seqStorePtr->maxNbLit <= 128 KB); - assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); - assert(literals + litLength <= litLimit); - if (litEnd <= litLimit_w) { - /* Common case we can use wildcopy. - * First copy 16 bytes, because literals are likely short. - */ - ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16); - ZSTD_copy16(seqStorePtr->lit, literals); - if (litLength > 16) { - ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap); - } - } else { - ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); - } - seqStorePtr->lit += litLength; - - /* literal Length */ - if (litLength>0xFFFF) { - assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ - seqStorePtr->longLengthType = ZSTD_llt_literalLength; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - } - seqStorePtr->sequences[0].litLength = (U16)litLength; - - /* match offset */ - seqStorePtr->sequences[0].offBase = offBase; - - /* match Length */ - assert(matchLength >= MINMATCH); - { size_t const mlBase = matchLength - MINMATCH; - if (mlBase>0xFFFF) { - assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ - seqStorePtr->longLengthType = ZSTD_llt_matchLength; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - } - seqStorePtr->sequences[0].mlBase = (U16)mlBase; - } - - seqStorePtr->sequences++; -} - -/* ZSTD_updateRep() : - * updates in-place @rep (array of repeat offsets) - * @offBase : sum-type, using numeric representation of ZSTD_storeSeq() - */ -MEM_STATIC void -ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) -{ - if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */ - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = OFFBASE_TO_OFFSET(offBase); - } else { /* repcode */ - U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; - if (repCode > 0) { /* note : if repCode==0, no change */ - U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; - rep[2] = (repCode >= 2) ? rep[1] : rep[2]; - rep[1] = rep[0]; - rep[0] = currentOffset; - } else { /* repCode == 0 */ - /* nothing to do */ - } - } -} - -typedef struct repcodes_s { - U32 rep[3]; -} repcodes_t; - -MEM_STATIC repcodes_t -ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) -{ - repcodes_t newReps; - ZSTD_memcpy(&newReps, rep, sizeof(newReps)); - ZSTD_updateRep(newReps.rep, offBase, ll0); - return newReps; -} - - -/*-************************************* -* Match length counter -***************************************/ -MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) -{ - const BYTE* const pStart = pIn; - const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); - - if (pIn < pInLoopLimit) { - { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); - if (diff) return ZSTD_NbCommonBytes(diff); } - pIn+=sizeof(size_t); pMatch+=sizeof(size_t); - while (pIn < pInLoopLimit) { - size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); - if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } - pIn += ZSTD_NbCommonBytes(diff); - return (size_t)(pIn - pStart); - } } - if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } - if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } - if ((pIn> (32-h) ; } -MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h, 0); } /* only in zstd_opt.h */ -MEM_STATIC size_t ZSTD_hash3PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash3(MEM_readLE32(ptr), h, s); } - -static const U32 prime4bytes = 2654435761U; -static U32 ZSTD_hash4(U32 u, U32 h, U32 s) { assert(h <= 32); return ((u * prime4bytes) ^ s) >> (32-h) ; } -static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h, 0); } -static size_t ZSTD_hash4PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash4(MEM_readLE32(ptr), h, s); } - -static const U64 prime5bytes = 889523592379ULL; -static size_t ZSTD_hash5(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-40)) * prime5bytes) ^ s) >> (64-h)) ; } -static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h, 0); } -static size_t ZSTD_hash5PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash5(MEM_readLE64(p), h, s); } - -static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-48)) * prime6bytes) ^ s) >> (64-h)) ; } -static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h, 0); } -static size_t ZSTD_hash6PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash6(MEM_readLE64(p), h, s); } - -static const U64 prime7bytes = 58295818150454627ULL; -static size_t ZSTD_hash7(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-56)) * prime7bytes) ^ s) >> (64-h)) ; } -static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h, 0); } -static size_t ZSTD_hash7PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash7(MEM_readLE64(p), h, s); } - -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u) * prime8bytes) ^ s) >> (64-h)) ; } -static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h, 0); } -static size_t ZSTD_hash8PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash8(MEM_readLE64(p), h, s); } - - -MEM_STATIC FORCE_INLINE_ATTR -size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) -{ - /* Although some of these hashes do support hBits up to 64, some do not. - * To be on the safe side, always avoid hBits > 32. */ - assert(hBits <= 32); - - switch(mls) - { - default: - case 4: return ZSTD_hash4Ptr(p, hBits); - case 5: return ZSTD_hash5Ptr(p, hBits); - case 6: return ZSTD_hash6Ptr(p, hBits); - case 7: return ZSTD_hash7Ptr(p, hBits); - case 8: return ZSTD_hash8Ptr(p, hBits); - } -} - -MEM_STATIC FORCE_INLINE_ATTR -size_t ZSTD_hashPtrSalted(const void* p, U32 hBits, U32 mls, const U64 hashSalt) { - /* Although some of these hashes do support hBits up to 64, some do not. - * To be on the safe side, always avoid hBits > 32. */ - assert(hBits <= 32); - - switch(mls) - { - default: - case 4: return ZSTD_hash4PtrS(p, hBits, (U32)hashSalt); - case 5: return ZSTD_hash5PtrS(p, hBits, hashSalt); - case 6: return ZSTD_hash6PtrS(p, hBits, hashSalt); - case 7: return ZSTD_hash7PtrS(p, hBits, hashSalt); - case 8: return ZSTD_hash8PtrS(p, hBits, hashSalt); - } -} - - -/** ZSTD_ipow() : - * Return base^exponent. - */ -static U64 ZSTD_ipow(U64 base, U64 exponent) -{ - U64 power = 1; - while (exponent) { - if (exponent & 1) power *= base; - exponent >>= 1; - base *= base; - } - return power; -} - -#define ZSTD_ROLL_HASH_CHAR_OFFSET 10 - -/** ZSTD_rollingHash_append() : - * Add the buffer to the hash value. - */ -static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size) -{ - BYTE const* istart = (BYTE const*)buf; - size_t pos; - for (pos = 0; pos < size; ++pos) { - hash *= prime8bytes; - hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET; - } - return hash; -} - -/** ZSTD_rollingHash_compute() : - * Compute the rolling hash value of the buffer. - */ -MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size) -{ - return ZSTD_rollingHash_append(0, buf, size); -} - -/** ZSTD_rollingHash_primePower() : - * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash - * over a window of length bytes. - */ -MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length) -{ - return ZSTD_ipow(prime8bytes, length - 1); -} - -/** ZSTD_rollingHash_rotate() : - * Rotate the rolling hash by one byte. - */ -MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower) -{ - hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower; - hash *= prime8bytes; - hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET; - return hash; -} - -/*-************************************* -* Round buffer management -***************************************/ -/* Max @current value allowed: - * In 32-bit mode: we want to avoid crossing the 2 GB limit, - * reducing risks of side effects in case of signed operations on indexes. - * In 64-bit mode: we want to ensure that adding the maximum job size (512 MB) - * doesn't overflow U32 index capacity (4 GB) */ -#define ZSTD_CURRENT_MAX (MEM_64bits() ? 3500U MB : 2000U MB) -/* Maximum chunk size before overflow correction needs to be called again */ -#define ZSTD_CHUNKSIZE_MAX \ - ( ((U32)-1) /* Maximum ending current index */ \ - - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */ - -/** - * ZSTD_window_clear(): - * Clears the window containing the history by simply setting it to empty. - */ -MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window) -{ - size_t const endT = (size_t)(window->nextSrc - window->base); - U32 const end = (U32)endT; - - window->lowLimit = end; - window->dictLimit = end; -} - -MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window) -{ - return window.dictLimit == ZSTD_WINDOW_START_INDEX && - window.lowLimit == ZSTD_WINDOW_START_INDEX && - (window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX; -} - -/** - * ZSTD_window_hasExtDict(): - * Returns non-zero if the window has a non-empty extDict. - */ -MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window) -{ - return window.lowLimit < window.dictLimit; -} - -/** - * ZSTD_matchState_dictMode(): - * Inspects the provided matchState and figures out what dictMode should be - * passed to the compressor. - */ -MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms) -{ - return ZSTD_window_hasExtDict(ms->window) ? - ZSTD_extDict : - ms->dictMatchState != NULL ? - (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) : - ZSTD_noDict; -} - -/* Defining this macro to non-zero tells zstd to run the overflow correction - * code much more frequently. This is very inefficient, and should only be - * used for tests and fuzzers. - */ -#ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY -# ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1 -# else -# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0 -# endif -#endif - -/** - * ZSTD_window_canOverflowCorrect(): - * Returns non-zero if the indices are large enough for overflow correction - * to work correctly without impacting compression ratio. - */ -MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window, - U32 cycleLog, - U32 maxDist, - U32 loadedDictEnd, - void const* src) -{ - U32 const cycleSize = 1u << cycleLog; - U32 const curr = (U32)((BYTE const*)src - window.base); - U32 const minIndexToOverflowCorrect = cycleSize - + MAX(maxDist, cycleSize) - + ZSTD_WINDOW_START_INDEX; - - /* Adjust the min index to backoff the overflow correction frequency, - * so we don't waste too much CPU in overflow correction. If this - * computation overflows we don't really care, we just need to make - * sure it is at least minIndexToOverflowCorrect. - */ - U32 const adjustment = window.nbOverflowCorrections + 1; - U32 const adjustedIndex = MAX(minIndexToOverflowCorrect * adjustment, - minIndexToOverflowCorrect); - U32 const indexLargeEnough = curr > adjustedIndex; - - /* Only overflow correct early if the dictionary is invalidated already, - * so we don't hurt compression ratio. - */ - U32 const dictionaryInvalidated = curr > maxDist + loadedDictEnd; - - return indexLargeEnough && dictionaryInvalidated; -} - -/** - * ZSTD_window_needOverflowCorrection(): - * Returns non-zero if the indices are getting too large and need overflow - * protection. - */ -MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, - U32 cycleLog, - U32 maxDist, - U32 loadedDictEnd, - void const* src, - void const* srcEnd) -{ - U32 const curr = (U32)((BYTE const*)srcEnd - window.base); - if (ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) { - if (ZSTD_window_canOverflowCorrect(window, cycleLog, maxDist, loadedDictEnd, src)) { - return 1; - } - } - return curr > ZSTD_CURRENT_MAX; -} - -/** - * ZSTD_window_correctOverflow(): - * Reduces the indices to protect from index overflow. - * Returns the correction made to the indices, which must be applied to every - * stored index. - * - * The least significant cycleLog bits of the indices must remain the same, - * which may be 0. Every index up to maxDist in the past must be valid. - */ -MEM_STATIC -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, - U32 maxDist, void const* src) -{ - /* preemptive overflow correction: - * 1. correction is large enough: - * lowLimit > (3<<29) ==> current > 3<<29 + 1< (3<<29 + 1< (3<<29) - (1< (3<<29) - (1<<30) (NOTE: chainLog <= 30) - * > 1<<29 - * - * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow: - * After correction, current is less than (1<base < 1<<32. - * 3. (cctx->lowLimit + 1< 3<<29 + 1<base); - U32 const currentCycle = curr & cycleMask; - /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */ - U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX - ? MAX(cycleSize, ZSTD_WINDOW_START_INDEX) - : 0; - U32 const newCurrent = currentCycle - + currentCycleCorrection - + MAX(maxDist, cycleSize); - U32 const correction = curr - newCurrent; - /* maxDist must be a power of two so that: - * (newCurrent & cycleMask) == (curr & cycleMask) - * This is required to not corrupt the chains / binary tree. - */ - assert((maxDist & (maxDist - 1)) == 0); - assert((curr & cycleMask) == (newCurrent & cycleMask)); - assert(curr > newCurrent); - if (!ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) { - /* Loose bound, should be around 1<<29 (see above) */ - assert(correction > 1<<28); - } - - window->base += correction; - window->dictBase += correction; - if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) { - window->lowLimit = ZSTD_WINDOW_START_INDEX; - } else { - window->lowLimit -= correction; - } - if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) { - window->dictLimit = ZSTD_WINDOW_START_INDEX; - } else { - window->dictLimit -= correction; - } - - /* Ensure we can still reference the full window. */ - assert(newCurrent >= maxDist); - assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX); - /* Ensure that lowLimit and dictLimit didn't underflow. */ - assert(window->lowLimit <= newCurrent); - assert(window->dictLimit <= newCurrent); - - ++window->nbOverflowCorrections; - - DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction, - window->lowLimit); - return correction; -} - -/** - * ZSTD_window_enforceMaxDist(): - * Updates lowLimit so that: - * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd - * - * It ensures index is valid as long as index >= lowLimit. - * This must be called before a block compression call. - * - * loadedDictEnd is only defined if a dictionary is in use for current compression. - * As the name implies, loadedDictEnd represents the index at end of dictionary. - * The value lies within context's referential, it can be directly compared to blockEndIdx. - * - * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0. - * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit. - * This is because dictionaries are allowed to be referenced fully - * as long as the last byte of the dictionary is in the window. - * Once input has progressed beyond window size, dictionary cannot be referenced anymore. - * - * In normal dict mode, the dictionary lies between lowLimit and dictLimit. - * In dictMatchState mode, lowLimit and dictLimit are the same, - * and the dictionary is below them. - * forceWindow and dictMatchState are therefore incompatible. - */ -MEM_STATIC void -ZSTD_window_enforceMaxDist(ZSTD_window_t* window, - const void* blockEnd, - U32 maxDist, - U32* loadedDictEndPtr, - const ZSTD_matchState_t** dictMatchStatePtr) -{ - U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); - U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0; - DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", - (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); - - /* - When there is no dictionary : loadedDictEnd == 0. - In which case, the test (blockEndIdx > maxDist) is merely to avoid - overflowing next operation `newLowLimit = blockEndIdx - maxDist`. - - When there is a standard dictionary : - Index referential is copied from the dictionary, - which means it starts from 0. - In which case, loadedDictEnd == dictSize, - and it makes sense to compare `blockEndIdx > maxDist + dictSize` - since `blockEndIdx` also starts from zero. - - When there is an attached dictionary : - loadedDictEnd is expressed within the referential of the context, - so it can be directly compared against blockEndIdx. - */ - if (blockEndIdx > maxDist + loadedDictEnd) { - U32 const newLowLimit = blockEndIdx - maxDist; - if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit; - if (window->dictLimit < window->lowLimit) { - DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u", - (unsigned)window->dictLimit, (unsigned)window->lowLimit); - window->dictLimit = window->lowLimit; - } - /* On reaching window size, dictionaries are invalidated */ - if (loadedDictEndPtr) *loadedDictEndPtr = 0; - if (dictMatchStatePtr) *dictMatchStatePtr = NULL; - } -} - -/* Similar to ZSTD_window_enforceMaxDist(), - * but only invalidates dictionary - * when input progresses beyond window size. - * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL) - * loadedDictEnd uses same referential as window->base - * maxDist is the window size */ -MEM_STATIC void -ZSTD_checkDictValidity(const ZSTD_window_t* window, - const void* blockEnd, - U32 maxDist, - U32* loadedDictEndPtr, - const ZSTD_matchState_t** dictMatchStatePtr) -{ - assert(loadedDictEndPtr != NULL); - assert(dictMatchStatePtr != NULL); - { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); - U32 const loadedDictEnd = *loadedDictEndPtr; - DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", - (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); - assert(blockEndIdx >= loadedDictEnd); - - if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) { - /* On reaching window size, dictionaries are invalidated. - * For simplification, if window size is reached anywhere within next block, - * the dictionary is invalidated for the full block. - * - * We also have to invalidate the dictionary if ZSTD_window_update() has detected - * non-contiguous segments, which means that loadedDictEnd != window->dictLimit. - * loadedDictEnd may be 0, if forceWindow is true, but in that case we never use - * dictMatchState, so setting it to NULL is not a problem. - */ - DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)"); - *loadedDictEndPtr = 0; - *dictMatchStatePtr = NULL; - } else { - if (*loadedDictEndPtr != 0) { - DEBUGLOG(6, "dictionary considered valid for current block"); - } } } -} - -MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { - ZSTD_memset(window, 0, sizeof(*window)); - window->base = (BYTE const*)" "; - window->dictBase = (BYTE const*)" "; - ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */ - window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */ - window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */ - window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */ - window->nbOverflowCorrections = 0; -} - -/** - * ZSTD_window_update(): - * Updates the window by appending [src, src + srcSize) to the window. - * If it is not contiguous, the current prefix becomes the extDict, and we - * forget about the extDict. Handles overlap of the prefix and extDict. - * Returns non-zero if the segment is contiguous. - */ -MEM_STATIC -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -U32 ZSTD_window_update(ZSTD_window_t* window, - const void* src, size_t srcSize, - int forceNonContiguous) -{ - BYTE const* const ip = (BYTE const*)src; - U32 contiguous = 1; - DEBUGLOG(5, "ZSTD_window_update"); - if (srcSize == 0) - return contiguous; - assert(window->base != NULL); - assert(window->dictBase != NULL); - /* Check if blocks follow each other */ - if (src != window->nextSrc || forceNonContiguous) { - /* not contiguous */ - size_t const distanceFromBase = (size_t)(window->nextSrc - window->base); - DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit); - window->lowLimit = window->dictLimit; - assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */ - window->dictLimit = (U32)distanceFromBase; - window->dictBase = window->base; - window->base = ip - distanceFromBase; - /* ms->nextToUpdate = window->dictLimit; */ - if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */ - contiguous = 0; - } - window->nextSrc = ip + srcSize; - /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ - if ( (ip+srcSize > window->dictBase + window->lowLimit) - & (ip < window->dictBase + window->dictLimit)) { - size_t const highInputIdx = (size_t)((ip + srcSize) - window->dictBase); - U32 const lowLimitMax = (highInputIdx > (size_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; - assert(highInputIdx < UINT_MAX); - window->lowLimit = lowLimitMax; - DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit); - } - return contiguous; -} - -/** - * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. - */ -MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) -{ - U32 const maxDistance = 1U << windowLog; - U32 const lowestValid = ms->window.lowLimit; - U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; - U32 const isDictionary = (ms->loadedDictEnd != 0); - /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary - * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't - * valid for the entire block. So this check is sufficient to find the lowest valid match index. - */ - U32 const matchLowest = isDictionary ? lowestValid : withinWindow; - return matchLowest; -} - -/** - * Returns the lowest allowed match index in the prefix. - */ -MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) -{ - U32 const maxDistance = 1U << windowLog; - U32 const lowestValid = ms->window.dictLimit; - U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; - U32 const isDictionary = (ms->loadedDictEnd != 0); - /* When computing the lowest prefix index we need to take the dictionary into account to handle - * the edge case where the dictionary and the source are contiguous in memory. - */ - U32 const matchLowest = isDictionary ? lowestValid : withinWindow; - return matchLowest; -} - -/* index_safety_check: - * intentional underflow : ensure repIndex isn't overlapping dict + prefix - * @return 1 if values are not overlapping, - * 0 otherwise */ -MEM_STATIC int ZSTD_index_overlap_check(const U32 prefixLowestIndex, const U32 repIndex) { - return ((U32)((prefixLowestIndex-1) - repIndex) >= 3); -} - - -/* debug functions */ -#if (DEBUGLEVEL>=2) - -MEM_STATIC double ZSTD_fWeight(U32 rawStat) -{ - U32 const fp_accuracy = 8; - U32 const fp_multiplier = (1 << fp_accuracy); - U32 const newStat = rawStat + 1; - U32 const hb = ZSTD_highbit32(newStat); - U32 const BWeight = hb * fp_multiplier; - U32 const FWeight = (newStat << fp_accuracy) >> hb; - U32 const weight = BWeight + FWeight; - assert(hb + fp_accuracy < 31); - return (double)weight / fp_multiplier; -} - -/* display a table content, - * listing each element, its frequency, and its predicted bit cost */ -MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max) -{ - unsigned u, sum; - for (u=0, sum=0; u<=max; u++) sum += table[u]; - DEBUGLOG(2, "total nb elts: %u", sum); - for (u=0; u<=max; u++) { - DEBUGLOG(2, "%2u: %5u (%.2f)", - u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) ); - } -} - -#endif - -/* Short Cache */ - -/* Normally, zstd matchfinders follow this flow: - * 1. Compute hash at ip - * 2. Load index from hashTable[hash] - * 3. Check if *ip == *(base + index) - * In dictionary compression, loading *(base + index) is often an L2 or even L3 miss. - * - * Short cache is an optimization which allows us to avoid step 3 most of the time - * when the data doesn't actually match. With short cache, the flow becomes: - * 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip. - * 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works. - * 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue. - * - * Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to - * dictMatchState matchfinders. - */ -#define ZSTD_SHORT_CACHE_TAG_BITS 8 -#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1) - -/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable. - * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */ -MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) { - size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS; - U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK); - assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0); - hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag; -} - -/* Helper function for short cache matchfinders. - * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */ -MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) { - U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK; - U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK; - return tag1 == tag2; -} - -#if defined (__cplusplus) -} -#endif - -/* =============================================================== - * Shared internal declarations - * These prototypes may be called from sources not in lib/compress - * =============================================================== */ - -/* ZSTD_loadCEntropy() : - * dict : must point at beginning of a valid zstd dictionary. - * return : size of dictionary header (size of magic number + dict ID + entropy tables) - * assumptions : magic number supposed already checked - * and dictSize >= 8 */ -size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, - const void* const dict, size_t dictSize); - -void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs); - -/* ============================================================== - * Private declarations - * These prototypes shall only be called from within lib/compress - * ============================================================== */ - -/* ZSTD_getCParamsFromCCtxParams() : - * cParams are built depending on compressionLevel, src size hints, - * LDM and manually set compression parameters. - * Note: srcSizeHint == 0 means 0! - */ -ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( - const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); - -/*! ZSTD_initCStream_internal() : - * Private use only. Init streaming operation. - * expects params to be valid. - * must receive dict, or cdict, or none, but not both. - * @return : 0, or an error code */ -size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, - const void* dict, size_t dictSize, - const ZSTD_CDict* cdict, - const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); - -void ZSTD_resetSeqStore(seqStore_t* ssPtr); - -/*! ZSTD_getCParamsFromCDict() : - * as the name implies */ -ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict); - -/* ZSTD_compressBegin_advanced_internal() : - * Private use only. To be called from zstdmt_compress.c. */ -size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, - const void* dict, size_t dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - const ZSTD_CDict* cdict, - const ZSTD_CCtx_params* params, - unsigned long long pledgedSrcSize); - -/* ZSTD_compress_advanced_internal() : - * Private use only. To be called from zstdmt_compress.c. */ -size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - const ZSTD_CCtx_params* params); - - -/* ZSTD_writeLastEmptyBlock() : - * output an empty Block with end-of-frame mark to complete a frame - * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) - * or an error code if `dstCapacity` is too small ( 1 */ -U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); - -/** ZSTD_CCtx_trace() : - * Trace the end of a compression call. - */ -void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize); - -/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of - * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. - * Note that the block delimiter must include the last literals of the block. - */ -size_t -ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, - ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); - -/* Returns the number of bytes to move the current read position back by. - * Only non-zero if we ended up splitting a sequence. - * Otherwise, it may return a ZSTD error if something went wrong. - * - * This function will attempt to scan through blockSize bytes - * represented by the sequences in @inSeqs, - * storing any (partial) sequences. - * - * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to - * avoid splitting a match, or to avoid splitting a match such that it would produce a match - * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. - */ -size_t -ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); - -/* Returns 1 if an external sequence producer is registered, otherwise returns 0. */ -MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params* params) { - return params->extSeqProdFunc != NULL; -} - -/* =============================================================== - * Deprecated definitions that are still used internally to avoid - * deprecation warnings. These functions are exactly equivalent to - * their public variants, but avoid the deprecation warnings. - * =============================================================== */ - -size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); - -size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); - - -#endif /* ZSTD_COMPRESS_H */ diff --git a/zstandard_android/src/compress/zstd_compress_literals.c b/zstandard_android/src/compress/zstd_compress_literals.c deleted file mode 100644 index bfd4f11..0000000 --- a/zstandard_android/src/compress/zstd_compress_literals.c +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - /*-************************************* - * Dependencies - ***************************************/ -#include "zstd_compress_literals.h" - - -/* ************************************************************** -* Debug Traces -****************************************************************/ -#if DEBUGLEVEL >= 2 - -static size_t showHexa(const void* src, size_t srcSize) -{ - const BYTE* const ip = (const BYTE*)src; - size_t u; - for (u=0; u31) + (srcSize>4095); - - DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity); - - RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, ""); - - switch(flSize) - { - case 1: /* 2 - 1 - 5 */ - ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); - break; - case 2: /* 2 - 2 - 12 */ - MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); - break; - case 3: /* 2 - 2 - 20 */ - MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); - break; - default: /* not necessary : flSize is {1,2,3} */ - assert(0); - } - - ZSTD_memcpy(ostart + flSize, src, srcSize); - DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize)); - return srcSize + flSize; -} - -static int allBytesIdentical(const void* src, size_t srcSize) -{ - assert(srcSize >= 1); - assert(src != NULL); - { const BYTE b = ((const BYTE*)src)[0]; - size_t p; - for (p=1; p31) + (srcSize>4095); - - assert(dstCapacity >= 4); (void)dstCapacity; - assert(allBytesIdentical(src, srcSize)); - - switch(flSize) - { - case 1: /* 2 - 1 - 5 */ - ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); - break; - case 2: /* 2 - 2 - 12 */ - MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); - break; - case 3: /* 2 - 2 - 20 */ - MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); - break; - default: /* not necessary : flSize is {1,2,3} */ - assert(0); - } - - ostart[flSize] = *(const BYTE*)src; - DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1); - return flSize+1; -} - -/* ZSTD_minLiteralsToCompress() : - * returns minimal amount of literals - * for literal compression to even be attempted. - * Minimum is made tighter as compression strategy increases. - */ -static size_t -ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat) -{ - assert((int)strategy >= 0); - assert((int)strategy <= 9); - /* btultra2 : min 8 bytes; - * then 2x larger for each successive compression strategy - * max threshold 64 bytes */ - { int const shift = MIN(9-(int)strategy, 3); - size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift; - DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc); - return mintc; - } -} - -size_t ZSTD_compressLiterals ( - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - void* entropyWorkspace, size_t entropyWorkspaceSize, - const ZSTD_hufCTables_t* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_strategy strategy, - int disableLiteralCompression, - int suspectUncompressible, - int bmi2) -{ - size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); - BYTE* const ostart = (BYTE*)dst; - U32 singleStream = srcSize < 256; - symbolEncodingType_e hType = set_compressed; - size_t cLitSize; - - DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)", - disableLiteralCompression, (U32)srcSize, dstCapacity); - - DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize)); - - /* Prepare nextEntropy assuming reusing the existing table */ - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - - if (disableLiteralCompression) - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - - /* if too small, don't even attempt compression (speed opt) */ - if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode)) - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - - RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression"); - { HUF_repeat repeat = prevHuf->repeatMode; - int const flags = 0 - | (bmi2 ? HUF_flags_bmi2 : 0) - | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0) - | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0) - | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0); - - typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int); - huf_compress_f huf_compress; - if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; - huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat; - cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize, - src, srcSize, - HUF_SYMBOLVALUE_MAX, LitHufLog, - entropyWorkspace, entropyWorkspaceSize, - (HUF_CElt*)nextHuf->CTable, - &repeat, flags); - DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize); - if (repeat != HUF_repeat_none) { - /* reused the existing table */ - DEBUGLOG(5, "reusing statistics from previous huffman block"); - hType = set_repeat; - } - } - - { size_t const minGain = ZSTD_minGain(srcSize, strategy); - if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) { - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } } - if (cLitSize==1) { - /* A return value of 1 signals that the alphabet consists of a single symbol. - * However, in some rare circumstances, it could be the compressed size (a single byte). - * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`. - * (it's also necessary to not generate statistics). - * Therefore, in such a case, actively check that all bytes are identical. */ - if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) { - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); - } } - - if (hType == set_compressed) { - /* using a newly constructed table */ - nextHuf->repeatMode = HUF_repeat_check; - } - - /* Build header */ - switch(lhSize) - { - case 3: /* 2 - 2 - 10 - 10 */ - if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); - { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); - MEM_writeLE24(ostart, lhc); - break; - } - case 4: /* 2 - 2 - 14 - 14 */ - assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); - { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); - MEM_writeLE32(ostart, lhc); - break; - } - case 5: /* 2 - 2 - 18 - 18 */ - assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); - { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); - MEM_writeLE32(ostart, lhc); - ostart[4] = (BYTE)(cLitSize >> 10); - break; - } - default: /* not possible : lhSize is {3,4,5} */ - assert(0); - } - DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize)); - return lhSize+cLitSize; -} diff --git a/zstandard_android/src/compress/zstd_compress_sequences.c b/zstandard_android/src/compress/zstd_compress_sequences.c deleted file mode 100644 index 8872d4d..0000000 --- a/zstandard_android/src/compress/zstd_compress_sequences.c +++ /dev/null @@ -1,442 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - /*-************************************* - * Dependencies - ***************************************/ -#include "zstd_compress_sequences.h" - -/** - * -log2(x / 256) lookup table for x in [0, 256). - * If x == 0: Return 0 - * Else: Return floor(-log2(x / 256) * 256) - */ -static unsigned const kInverseProbabilityLog256[256] = { - 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162, - 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889, - 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734, - 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626, - 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542, - 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473, - 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415, - 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366, - 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322, - 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282, - 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247, - 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215, - 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185, - 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157, - 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132, - 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108, - 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85, - 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64, - 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44, - 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25, - 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7, - 5, 4, 2, 1, -}; - -static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) { - void const* ptr = ctable; - U16 const* u16ptr = (U16 const*)ptr; - U32 const maxSymbolValue = MEM_read16(u16ptr + 1); - return maxSymbolValue; -} - -/** - * Returns true if we should use ncount=-1 else we should - * use ncount=1 for low probability symbols instead. - */ -static unsigned ZSTD_useLowProbCount(size_t const nbSeq) -{ - /* Heuristic: This should cover most blocks <= 16K and - * start to fade out after 16K to about 32K depending on - * compressibility. - */ - return nbSeq >= 2048; -} - -/** - * Returns the cost in bytes of encoding the normalized count header. - * Returns an error if any of the helper functions return an error. - */ -static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max, - size_t const nbSeq, unsigned const FSELog) -{ - BYTE wksp[FSE_NCOUNTBOUND]; - S16 norm[MaxSeq + 1]; - const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); - FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), ""); - return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog); -} - -/** - * Returns the cost in bits of encoding the distribution described by count - * using the entropy bound. - */ -static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total) -{ - unsigned cost = 0; - unsigned s; - - assert(total > 0); - for (s = 0; s <= max; ++s) { - unsigned norm = (unsigned)((256 * count[s]) / total); - if (count[s] != 0 && norm == 0) - norm = 1; - assert(count[s] < total); - cost += count[s] * kInverseProbabilityLog256[norm]; - } - return cost >> 8; -} - -/** - * Returns the cost in bits of encoding the distribution in count using ctable. - * Returns an error if ctable cannot represent all the symbols in count. - */ -size_t ZSTD_fseBitCost( - FSE_CTable const* ctable, - unsigned const* count, - unsigned const max) -{ - unsigned const kAccuracyLog = 8; - size_t cost = 0; - unsigned s; - FSE_CState_t cstate; - FSE_initCState(&cstate, ctable); - if (ZSTD_getFSEMaxSymbolValue(ctable) < max) { - DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u", - ZSTD_getFSEMaxSymbolValue(ctable), max); - return ERROR(GENERIC); - } - for (s = 0; s <= max; ++s) { - unsigned const tableLog = cstate.stateLog; - unsigned const badCost = (tableLog + 1) << kAccuracyLog; - unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); - if (count[s] == 0) - continue; - if (bitCost >= badCost) { - DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s); - return ERROR(GENERIC); - } - cost += (size_t)count[s] * bitCost; - } - return cost >> kAccuracyLog; -} - -/** - * Returns the cost in bits of encoding the distribution in count using the - * table described by norm. The max symbol support by norm is assumed >= max. - * norm must be valid for every symbol with non-zero probability in count. - */ -size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, - unsigned const* count, unsigned const max) -{ - unsigned const shift = 8 - accuracyLog; - size_t cost = 0; - unsigned s; - assert(accuracyLog <= 8); - for (s = 0; s <= max; ++s) { - unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1; - unsigned const norm256 = normAcc << shift; - assert(norm256 > 0); - assert(norm256 < 256); - cost += count[s] * kInverseProbabilityLog256[norm256]; - } - return cost >> 8; -} - -symbolEncodingType_e -ZSTD_selectEncodingType( - FSE_repeat* repeatMode, unsigned const* count, unsigned const max, - size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, - FSE_CTable const* prevCTable, - short const* defaultNorm, U32 defaultNormLog, - ZSTD_defaultPolicy_e const isDefaultAllowed, - ZSTD_strategy const strategy) -{ - ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); - if (mostFrequent == nbSeq) { - *repeatMode = FSE_repeat_none; - if (isDefaultAllowed && nbSeq <= 2) { - /* Prefer set_basic over set_rle when there are 2 or fewer symbols, - * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. - * If basic encoding isn't possible, always choose RLE. - */ - DEBUGLOG(5, "Selected set_basic"); - return set_basic; - } - DEBUGLOG(5, "Selected set_rle"); - return set_rle; - } - if (strategy < ZSTD_lazy) { - if (isDefaultAllowed) { - size_t const staticFse_nbSeq_max = 1000; - size_t const mult = 10 - strategy; - size_t const baseLog = 3; - size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */ - assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */ - assert(mult <= 9 && mult >= 7); - if ( (*repeatMode == FSE_repeat_valid) - && (nbSeq < staticFse_nbSeq_max) ) { - DEBUGLOG(5, "Selected set_repeat"); - return set_repeat; - } - if ( (nbSeq < dynamicFse_nbSeq_min) - || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) { - DEBUGLOG(5, "Selected set_basic"); - /* The format allows default tables to be repeated, but it isn't useful. - * When using simple heuristics to select encoding type, we don't want - * to confuse these tables with dictionaries. When running more careful - * analysis, we don't need to waste time checking both repeating tables - * and default tables. - */ - *repeatMode = FSE_repeat_none; - return set_basic; - } - } - } else { - size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC); - size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC); - size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); - size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); - - if (isDefaultAllowed) { - assert(!ZSTD_isError(basicCost)); - assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost))); - } - assert(!ZSTD_isError(NCountCost)); - assert(compressedCost < ERROR(maxCode)); - DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u", - (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost); - if (basicCost <= repeatCost && basicCost <= compressedCost) { - DEBUGLOG(5, "Selected set_basic"); - assert(isDefaultAllowed); - *repeatMode = FSE_repeat_none; - return set_basic; - } - if (repeatCost <= compressedCost) { - DEBUGLOG(5, "Selected set_repeat"); - assert(!ZSTD_isError(repeatCost)); - return set_repeat; - } - assert(compressedCost < basicCost && compressedCost < repeatCost); - } - DEBUGLOG(5, "Selected set_compressed"); - *repeatMode = FSE_repeat_check; - return set_compressed; -} - -typedef struct { - S16 norm[MaxSeq + 1]; - U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)]; -} ZSTD_BuildCTableWksp; - -size_t -ZSTD_buildCTable(void* dst, size_t dstCapacity, - FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, - unsigned* count, U32 max, - const BYTE* codeTable, size_t nbSeq, - const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, - const FSE_CTable* prevCTable, size_t prevCTableSize, - void* entropyWorkspace, size_t entropyWorkspaceSize) -{ - BYTE* op = (BYTE*)dst; - const BYTE* const oend = op + dstCapacity; - DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity); - - switch (type) { - case set_rle: - FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), ""); - RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space"); - *op = codeTable[0]; - return 1; - case set_repeat: - ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize); - return 0; - case set_basic: - FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */ - return 0; - case set_compressed: { - ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace; - size_t nbSeq_1 = nbSeq; - const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); - if (count[codeTable[nbSeq-1]] > 1) { - count[codeTable[nbSeq-1]]--; - nbSeq_1--; - } - assert(nbSeq_1 > 1); - assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp)); - (void)entropyWorkspaceSize; - FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed"); - assert(oend >= op); - { size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */ - FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed"); - FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed"); - return NCountSize; - } - } - default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach"); - } -} - -FORCE_INLINE_TEMPLATE size_t -ZSTD_encodeSequences_body( - void* dst, size_t dstCapacity, - FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, - FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, - FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets) -{ - BIT_CStream_t blockStream; - FSE_CState_t stateMatchLength; - FSE_CState_t stateOffsetBits; - FSE_CState_t stateLitLength; - - RETURN_ERROR_IF( - ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)), - dstSize_tooSmall, "not enough space remaining"); - DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)", - (int)(blockStream.endPtr - blockStream.startPtr), - (unsigned)dstCapacity); - - /* first symbols */ - FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); - FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); - FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); - BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); - if (MEM_32bits()) BIT_flushBits(&blockStream); - BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]); - if (MEM_32bits()) BIT_flushBits(&blockStream); - if (longOffsets) { - U32 const ofBits = ofCodeTable[nbSeq-1]; - unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); - if (extraBits) { - BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits); - BIT_flushBits(&blockStream); - } - BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits, - ofBits - extraBits); - } else { - BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]); - } - BIT_flushBits(&blockStream); - - { size_t n; - for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) - BIT_flushBits(&blockStream); /* (7)*/ - BIT_addBits(&blockStream, sequences[n].litLength, llBits); - if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); - BIT_addBits(&blockStream, sequences[n].mlBase, mlBits); - if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); - if (longOffsets) { - unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); - if (extraBits) { - BIT_addBits(&blockStream, sequences[n].offBase, extraBits); - BIT_flushBits(&blockStream); /* (7)*/ - } - BIT_addBits(&blockStream, sequences[n].offBase >> extraBits, - ofBits - extraBits); /* 31 */ - } else { - BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */ - } - BIT_flushBits(&blockStream); /* (7)*/ - DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr)); - } } - - DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog); - FSE_flushCState(&blockStream, &stateMatchLength); - DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog); - FSE_flushCState(&blockStream, &stateOffsetBits); - DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog); - FSE_flushCState(&blockStream, &stateLitLength); - - { size_t const streamSize = BIT_closeCStream(&blockStream); - RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space"); - return streamSize; - } -} - -static size_t -ZSTD_encodeSequences_default( - void* dst, size_t dstCapacity, - FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, - FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, - FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets) -{ - return ZSTD_encodeSequences_body(dst, dstCapacity, - CTable_MatchLength, mlCodeTable, - CTable_OffsetBits, ofCodeTable, - CTable_LitLength, llCodeTable, - sequences, nbSeq, longOffsets); -} - - -#if DYNAMIC_BMI2 - -static BMI2_TARGET_ATTRIBUTE size_t -ZSTD_encodeSequences_bmi2( - void* dst, size_t dstCapacity, - FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, - FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, - FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets) -{ - return ZSTD_encodeSequences_body(dst, dstCapacity, - CTable_MatchLength, mlCodeTable, - CTable_OffsetBits, ofCodeTable, - CTable_LitLength, llCodeTable, - sequences, nbSeq, longOffsets); -} - -#endif - -size_t ZSTD_encodeSequences( - void* dst, size_t dstCapacity, - FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, - FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, - FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) -{ - DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity); -#if DYNAMIC_BMI2 - if (bmi2) { - return ZSTD_encodeSequences_bmi2(dst, dstCapacity, - CTable_MatchLength, mlCodeTable, - CTable_OffsetBits, ofCodeTable, - CTable_LitLength, llCodeTable, - sequences, nbSeq, longOffsets); - } -#endif - (void)bmi2; - return ZSTD_encodeSequences_default(dst, dstCapacity, - CTable_MatchLength, mlCodeTable, - CTable_OffsetBits, ofCodeTable, - CTable_LitLength, llCodeTable, - sequences, nbSeq, longOffsets); -} diff --git a/zstandard_android/src/compress/zstd_compress_sequences.h b/zstandard_android/src/compress/zstd_compress_sequences.h deleted file mode 100644 index 4a3a05d..0000000 --- a/zstandard_android/src/compress/zstd_compress_sequences.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_COMPRESS_SEQUENCES_H -#define ZSTD_COMPRESS_SEQUENCES_H - -#include "../common/fse.h" /* FSE_repeat, FSE_CTable */ -#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */ - -typedef enum { - ZSTD_defaultDisallowed = 0, - ZSTD_defaultAllowed = 1 -} ZSTD_defaultPolicy_e; - -symbolEncodingType_e -ZSTD_selectEncodingType( - FSE_repeat* repeatMode, unsigned const* count, unsigned const max, - size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, - FSE_CTable const* prevCTable, - short const* defaultNorm, U32 defaultNormLog, - ZSTD_defaultPolicy_e const isDefaultAllowed, - ZSTD_strategy const strategy); - -size_t -ZSTD_buildCTable(void* dst, size_t dstCapacity, - FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, - unsigned* count, U32 max, - const BYTE* codeTable, size_t nbSeq, - const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, - const FSE_CTable* prevCTable, size_t prevCTableSize, - void* entropyWorkspace, size_t entropyWorkspaceSize); - -size_t ZSTD_encodeSequences( - void* dst, size_t dstCapacity, - FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, - FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, - FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, - seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2); - -size_t ZSTD_fseBitCost( - FSE_CTable const* ctable, - unsigned const* count, - unsigned const max); - -size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, - unsigned const* count, unsigned const max); -#endif /* ZSTD_COMPRESS_SEQUENCES_H */ diff --git a/zstandard_android/src/compress/zstd_compress_superblock.c b/zstandard_android/src/compress/zstd_compress_superblock.c deleted file mode 100644 index 628a2dc..0000000 --- a/zstandard_android/src/compress/zstd_compress_superblock.c +++ /dev/null @@ -1,688 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - /*-************************************* - * Dependencies - ***************************************/ -#include "zstd_compress_superblock.h" - -#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */ -#include "hist.h" /* HIST_countFast_wksp */ -#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */ -#include "zstd_compress_sequences.h" -#include "zstd_compress_literals.h" - -/** ZSTD_compressSubBlock_literal() : - * Compresses literals section for a sub-block. - * When we have to write the Huffman table we will sometimes choose a header - * size larger than necessary. This is because we have to pick the header size - * before we know the table size + compressed size, so we have a bound on the - * table size. If we guessed incorrectly, we fall back to uncompressed literals. - * - * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded - * in writing the header, otherwise it is set to 0. - * - * hufMetadata->hType has literals block type info. - * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. - * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. - * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block - * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block - * and the following sub-blocks' literals sections will be Treeless_Literals_Block. - * @return : compressed size of literals section of a sub-block - * Or 0 if unable to compress. - * Or error code */ -static size_t -ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, - const ZSTD_hufCTablesMetadata_t* hufMetadata, - const BYTE* literals, size_t litSize, - void* dst, size_t dstSize, - const int bmi2, int writeEntropy, int* entropyWritten) -{ - size_t const header = writeEntropy ? 200 : 0; - size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + dstSize; - BYTE* op = ostart + lhSize; - U32 const singleStream = lhSize == 3; - symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; - size_t cLitSize = 0; - - DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); - - *entropyWritten = 0; - if (litSize == 0 || hufMetadata->hType == set_basic) { - DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal"); - return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); - } else if (hufMetadata->hType == set_rle) { - DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal"); - return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); - } - - assert(litSize > 0); - assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat); - - if (writeEntropy && hufMetadata->hType == set_compressed) { - ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize); - op += hufMetadata->hufDesSize; - cLitSize += hufMetadata->hufDesSize; - DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); - } - - { int const flags = bmi2 ? HUF_flags_bmi2 : 0; - const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags) - : HUF_compress4X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags); - op += cSize; - cLitSize += cSize; - if (cSize == 0 || ERR_isError(cSize)) { - DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize)); - return 0; - } - /* If we expand and we aren't writing a header then emit uncompressed */ - if (!writeEntropy && cLitSize >= litSize) { - DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible"); - return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); - } - /* If we are writing headers then allow expansion that doesn't change our header size. */ - if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) { - assert(cLitSize > litSize); - DEBUGLOG(5, "Literals expanded beyond allowed header size"); - return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); - } - DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize); - } - - /* Build header */ - switch(lhSize) - { - case 3: /* 2 - 2 - 10 - 10 */ - { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); - MEM_writeLE24(ostart, lhc); - break; - } - case 4: /* 2 - 2 - 14 - 14 */ - { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18); - MEM_writeLE32(ostart, lhc); - break; - } - case 5: /* 2 - 2 - 18 - 18 */ - { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22); - MEM_writeLE32(ostart, lhc); - ostart[4] = (BYTE)(cLitSize >> 10); - break; - } - default: /* not possible : lhSize is {3,4,5} */ - assert(0); - } - *entropyWritten = 1; - DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); - return (size_t)(op-ostart); -} - -static size_t -ZSTD_seqDecompressedSize(seqStore_t const* seqStore, - const seqDef* sequences, size_t nbSeqs, - size_t litSize, int lastSubBlock) -{ - size_t matchLengthSum = 0; - size_t litLengthSum = 0; - size_t n; - for (n=0; nllType, fseMetadata->ofType, and fseMetadata->mlType have - * symbol compression modes for the super-block. - * The first successfully compressed block will have these in its header. - * We set entropyWritten=1 when we succeed in compressing the sequences. - * The following sub-blocks will always have repeat mode. - * @return : compressed size of sequences section of a sub-block - * Or 0 if it is unable to compress - * Or error code. */ -static size_t -ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, - const ZSTD_fseCTablesMetadata_t* fseMetadata, - const seqDef* sequences, size_t nbSeq, - const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - const int bmi2, int writeEntropy, int* entropyWritten) -{ - const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + dstCapacity; - BYTE* op = ostart; - BYTE* seqHead; - - DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets); - - *entropyWritten = 0; - /* Sequences Header */ - RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, - dstSize_tooSmall, ""); - if (nbSeq < 128) - *op++ = (BYTE)nbSeq; - else if (nbSeq < LONGNBSEQ) - op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; - else - op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; - if (nbSeq==0) { - return (size_t)(op - ostart); - } - - /* seqHead : flags for FSE encoding type */ - seqHead = op++; - - DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart)); - - if (writeEntropy) { - const U32 LLtype = fseMetadata->llType; - const U32 Offtype = fseMetadata->ofType; - const U32 MLtype = fseMetadata->mlType; - DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize); - *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); - ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize); - op += fseMetadata->fseTablesSize; - } else { - const U32 repeat = set_repeat; - *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2)); - } - - { size_t const bitstreamSize = ZSTD_encodeSequences( - op, (size_t)(oend - op), - fseTables->matchlengthCTable, mlCode, - fseTables->offcodeCTable, ofCode, - fseTables->litlengthCTable, llCode, - sequences, nbSeq, - longOffsets, bmi2); - FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); - op += bitstreamSize; - /* zstd versions <= 1.3.4 mistakenly report corruption when - * FSE_readNCount() receives a buffer < 4 bytes. - * Fixed by https://github.com/facebook/zstd/pull/1146. - * This can happen when the last set_compressed table present is 2 - * bytes and the bitstream is only one byte. - * In this exceedingly rare case, we will simply emit an uncompressed - * block, since it isn't worth optimizing. - */ -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) { - /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ - assert(fseMetadata->lastCountSize + bitstreamSize == 3); - DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " - "emitting an uncompressed block."); - return 0; - } -#endif - DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize); - } - - /* zstd versions <= 1.4.0 mistakenly report error when - * sequences section body size is less than 3 bytes. - * Fixed by https://github.com/facebook/zstd/pull/1664. - * This can happen when the previous sequences section block is compressed - * with rle mode and the current block's sequences section is compressed - * with repeat mode where sequences section body size can be 1 byte. - */ -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - if (op-seqHead < 4) { - DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting " - "an uncompressed block when sequences are < 4 bytes"); - return 0; - } -#endif - - *entropyWritten = 1; - return (size_t)(op - ostart); -} - -/** ZSTD_compressSubBlock() : - * Compresses a single sub-block. - * @return : compressed size of the sub-block - * Or 0 if it failed to compress. */ -static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, - const ZSTD_entropyCTablesMetadata_t* entropyMetadata, - const seqDef* sequences, size_t nbSeq, - const BYTE* literals, size_t litSize, - const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - const int bmi2, - int writeLitEntropy, int writeSeqEntropy, - int* litEntropyWritten, int* seqEntropyWritten, - U32 lastBlock) -{ - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + dstCapacity; - BYTE* op = ostart + ZSTD_blockHeaderSize; - DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)", - litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); - { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, - &entropyMetadata->hufMetadata, literals, litSize, - op, (size_t)(oend-op), - bmi2, writeLitEntropy, litEntropyWritten); - FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); - if (cLitSize == 0) return 0; - op += cLitSize; - } - { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, - &entropyMetadata->fseMetadata, - sequences, nbSeq, - llCode, mlCode, ofCode, - cctxParams, - op, (size_t)(oend-op), - bmi2, writeSeqEntropy, seqEntropyWritten); - FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); - if (cSeqSize == 0) return 0; - op += cSeqSize; - } - /* Write block header */ - { size_t cSize = (size_t)(op-ostart) - ZSTD_blockHeaderSize; - U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); - MEM_writeLE24(ostart, cBlockHeader24); - } - return (size_t)(op-ostart); -} - -static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, - const ZSTD_hufCTables_t* huf, - const ZSTD_hufCTablesMetadata_t* hufMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) -{ - unsigned* const countWksp = (unsigned*)workspace; - unsigned maxSymbolValue = 255; - size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ - - if (hufMetadata->hType == set_basic) return litSize; - else if (hufMetadata->hType == set_rle) return 1; - else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { - size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); - if (ZSTD_isError(largest)) return litSize; - { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); - if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; - return cLitSizeEstimate + literalSectionHeaderSize; - } } - assert(0); /* impossible */ - return 0; -} - -static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, - const BYTE* codeTable, unsigned maxCode, - size_t nbSeq, const FSE_CTable* fseCTable, - const U8* additionalBits, - short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, - void* workspace, size_t wkspSize) -{ - unsigned* const countWksp = (unsigned*)workspace; - const BYTE* ctp = codeTable; - const BYTE* const ctStart = ctp; - const BYTE* const ctEnd = ctStart + nbSeq; - size_t cSymbolTypeSizeEstimateInBits = 0; - unsigned max = maxCode; - - HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ - if (type == set_basic) { - /* We selected this encoding type, so it must be valid. */ - assert(max <= defaultMax); - cSymbolTypeSizeEstimateInBits = max <= defaultMax - ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) - : ERROR(GENERIC); - } else if (type == set_rle) { - cSymbolTypeSizeEstimateInBits = 0; - } else if (type == set_compressed || type == set_repeat) { - cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); - } - if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10; - while (ctp < ctEnd) { - if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; - else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ - ctp++; - } - return cSymbolTypeSizeEstimateInBits / 8; -} - -static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_fseCTables_t* fseTables, - const ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) -{ - size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ - size_t cSeqSizeEstimate = 0; - if (nbSeq == 0) return sequencesSectionHeaderSize; - cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff, - nbSeq, fseTables->offcodeCTable, NULL, - OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, - workspace, wkspSize); - cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL, - nbSeq, fseTables->litlengthCTable, LL_bits, - LL_defaultNorm, LL_defaultNormLog, MaxLL, - workspace, wkspSize); - cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML, - nbSeq, fseTables->matchlengthCTable, ML_bits, - ML_defaultNorm, ML_defaultNormLog, MaxML, - workspace, wkspSize); - if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; - return cSeqSizeEstimate + sequencesSectionHeaderSize; -} - -typedef struct { - size_t estLitSize; - size_t estBlockSize; -} EstimatedBlockSize; -static EstimatedBlockSize ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, - const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_entropyCTables_t* entropy, - const ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize, - int writeLitEntropy, int writeSeqEntropy) -{ - EstimatedBlockSize ebs; - ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize, - &entropy->huf, &entropyMetadata->hufMetadata, - workspace, wkspSize, writeLitEntropy); - ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, - nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, - workspace, wkspSize, writeSeqEntropy); - ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize; - return ebs; -} - -static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) -{ - if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle) - return 1; - if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle) - return 1; - if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle) - return 1; - return 0; -} - -static size_t countLiterals(seqStore_t const* seqStore, const seqDef* sp, size_t seqCount) -{ - size_t n, total = 0; - assert(sp != NULL); - for (n=0; n %zu bytes", seqCount, (const void*)sp, total); - return total; -} - -#define BYTESCALE 256 - -static size_t sizeBlockSequences(const seqDef* sp, size_t nbSeqs, - size_t targetBudget, size_t avgLitCost, size_t avgSeqCost, - int firstSubBlock) -{ - size_t n, budget = 0, inSize=0; - /* entropy headers */ - size_t const headerSize = (size_t)firstSubBlock * 120 * BYTESCALE; /* generous estimate */ - assert(firstSubBlock==0 || firstSubBlock==1); - budget += headerSize; - - /* first sequence => at least one sequence*/ - budget += sp[0].litLength * avgLitCost + avgSeqCost; - if (budget > targetBudget) return 1; - inSize = sp[0].litLength + (sp[0].mlBase+MINMATCH); - - /* loop over sequences */ - for (n=1; n targetBudget) - /* though continue to expand until the sub-block is deemed compressible */ - && (budget < inSize * BYTESCALE) ) - break; - } - - return n; -} - -/** ZSTD_compressSubBlock_multi() : - * Breaks super-block into multiple sub-blocks and compresses them. - * Entropy will be written into the first block. - * The following blocks use repeat_mode to compress. - * Sub-blocks are all compressed, except the last one when beneficial. - * @return : compressed size of the super block (which features multiple ZSTD blocks) - * or 0 if it failed to compress. */ -static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, - const ZSTD_compressedBlockState_t* prevCBlock, - ZSTD_compressedBlockState_t* nextCBlock, - const ZSTD_entropyCTablesMetadata_t* entropyMetadata, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const int bmi2, U32 lastBlock, - void* workspace, size_t wkspSize) -{ - const seqDef* const sstart = seqStorePtr->sequencesStart; - const seqDef* const send = seqStorePtr->sequences; - const seqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */ - size_t const nbSeqs = (size_t)(send - sstart); - const BYTE* const lstart = seqStorePtr->litStart; - const BYTE* const lend = seqStorePtr->lit; - const BYTE* lp = lstart; - size_t const nbLiterals = (size_t)(lend - lstart); - BYTE const* ip = (BYTE const*)src; - BYTE const* const iend = ip + srcSize; - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + dstCapacity; - BYTE* op = ostart; - const BYTE* llCodePtr = seqStorePtr->llCode; - const BYTE* mlCodePtr = seqStorePtr->mlCode; - const BYTE* ofCodePtr = seqStorePtr->ofCode; - size_t const minTarget = ZSTD_TARGETCBLOCKSIZE_MIN; /* enforce minimum size, to reduce undesirable side effects */ - size_t const targetCBlockSize = MAX(minTarget, cctxParams->targetCBlockSize); - int writeLitEntropy = (entropyMetadata->hufMetadata.hType == set_compressed); - int writeSeqEntropy = 1; - - DEBUGLOG(5, "ZSTD_compressSubBlock_multi (srcSize=%u, litSize=%u, nbSeq=%u)", - (unsigned)srcSize, (unsigned)(lend-lstart), (unsigned)(send-sstart)); - - /* let's start by a general estimation for the full block */ - if (nbSeqs > 0) { - EstimatedBlockSize const ebs = - ZSTD_estimateSubBlockSize(lp, nbLiterals, - ofCodePtr, llCodePtr, mlCodePtr, nbSeqs, - &nextCBlock->entropy, entropyMetadata, - workspace, wkspSize, - writeLitEntropy, writeSeqEntropy); - /* quick estimation */ - size_t const avgLitCost = nbLiterals ? (ebs.estLitSize * BYTESCALE) / nbLiterals : BYTESCALE; - size_t const avgSeqCost = ((ebs.estBlockSize - ebs.estLitSize) * BYTESCALE) / nbSeqs; - const size_t nbSubBlocks = MAX((ebs.estBlockSize + (targetCBlockSize/2)) / targetCBlockSize, 1); - size_t n, avgBlockBudget, blockBudgetSupp=0; - avgBlockBudget = (ebs.estBlockSize * BYTESCALE) / nbSubBlocks; - DEBUGLOG(5, "estimated fullblock size=%u bytes ; avgLitCost=%.2f ; avgSeqCost=%.2f ; targetCBlockSize=%u, nbSubBlocks=%u ; avgBlockBudget=%.0f bytes", - (unsigned)ebs.estBlockSize, (double)avgLitCost/BYTESCALE, (double)avgSeqCost/BYTESCALE, - (unsigned)targetCBlockSize, (unsigned)nbSubBlocks, (double)avgBlockBudget/BYTESCALE); - /* simplification: if estimates states that the full superblock doesn't compress, just bail out immediately - * this will result in the production of a single uncompressed block covering @srcSize.*/ - if (ebs.estBlockSize > srcSize) return 0; - - /* compress and write sub-blocks */ - assert(nbSubBlocks>0); - for (n=0; n < nbSubBlocks-1; n++) { - /* determine nb of sequences for current sub-block + nbLiterals from next sequence */ - size_t const seqCount = sizeBlockSequences(sp, (size_t)(send-sp), - avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n==0); - /* if reached last sequence : break to last sub-block (simplification) */ - assert(seqCount <= (size_t)(send-sp)); - if (sp + seqCount == send) break; - assert(seqCount > 0); - /* compress sub-block */ - { int litEntropyWritten = 0; - int seqEntropyWritten = 0; - size_t litSize = countLiterals(seqStorePtr, sp, seqCount); - const size_t decompressedSize = - ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0); - size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, - sp, seqCount, - lp, litSize, - llCodePtr, mlCodePtr, ofCodePtr, - cctxParams, - op, (size_t)(oend-op), - bmi2, writeLitEntropy, writeSeqEntropy, - &litEntropyWritten, &seqEntropyWritten, - 0); - FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); - - /* check compressibility, update state components */ - if (cSize > 0 && cSize < decompressedSize) { - DEBUGLOG(5, "Committed sub-block compressing %u bytes => %u bytes", - (unsigned)decompressedSize, (unsigned)cSize); - assert(ip + decompressedSize <= iend); - ip += decompressedSize; - lp += litSize; - op += cSize; - llCodePtr += seqCount; - mlCodePtr += seqCount; - ofCodePtr += seqCount; - /* Entropy only needs to be written once */ - if (litEntropyWritten) { - writeLitEntropy = 0; - } - if (seqEntropyWritten) { - writeSeqEntropy = 0; - } - sp += seqCount; - blockBudgetSupp = 0; - } } - /* otherwise : do not compress yet, coalesce current sub-block with following one */ - } - } /* if (nbSeqs > 0) */ - - /* write last block */ - DEBUGLOG(5, "Generate last sub-block: %u sequences remaining", (unsigned)(send - sp)); - { int litEntropyWritten = 0; - int seqEntropyWritten = 0; - size_t litSize = (size_t)(lend - lp); - size_t seqCount = (size_t)(send - sp); - const size_t decompressedSize = - ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1); - size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, - sp, seqCount, - lp, litSize, - llCodePtr, mlCodePtr, ofCodePtr, - cctxParams, - op, (size_t)(oend-op), - bmi2, writeLitEntropy, writeSeqEntropy, - &litEntropyWritten, &seqEntropyWritten, - lastBlock); - FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); - - /* update pointers, the nb of literals borrowed from next sequence must be preserved */ - if (cSize > 0 && cSize < decompressedSize) { - DEBUGLOG(5, "Last sub-block compressed %u bytes => %u bytes", - (unsigned)decompressedSize, (unsigned)cSize); - assert(ip + decompressedSize <= iend); - ip += decompressedSize; - lp += litSize; - op += cSize; - llCodePtr += seqCount; - mlCodePtr += seqCount; - ofCodePtr += seqCount; - /* Entropy only needs to be written once */ - if (litEntropyWritten) { - writeLitEntropy = 0; - } - if (seqEntropyWritten) { - writeSeqEntropy = 0; - } - sp += seqCount; - } - } - - - if (writeLitEntropy) { - DEBUGLOG(5, "Literal entropy tables were never written"); - ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); - } - if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { - /* If we haven't written our entropy tables, then we've violated our contract and - * must emit an uncompressed block. - */ - DEBUGLOG(5, "Sequence entropy tables were never written => cancel, emit an uncompressed block"); - return 0; - } - - if (ip < iend) { - /* some data left : last part of the block sent uncompressed */ - size_t const rSize = (size_t)((iend - ip)); - size_t const cSize = ZSTD_noCompressBlock(op, (size_t)(oend - op), ip, rSize, lastBlock); - DEBUGLOG(5, "Generate last uncompressed sub-block of %u bytes", (unsigned)(rSize)); - FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); - assert(cSize != 0); - op += cSize; - /* We have to regenerate the repcodes because we've skipped some sequences */ - if (sp < send) { - const seqDef* seq; - repcodes_t rep; - ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); - for (seq = sstart; seq < sp; ++seq) { - ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); - } - ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); - } - } - - DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed all subBlocks: total compressed size = %u", - (unsigned)(op-ostart)); - return (size_t)(op-ostart); -} - -size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned lastBlock) -{ - ZSTD_entropyCTablesMetadata_t entropyMetadata; - - FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore, - &zc->blockState.prevCBlock->entropy, - &zc->blockState.nextCBlock->entropy, - &zc->appliedParams, - &entropyMetadata, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); - - return ZSTD_compressSubBlock_multi(&zc->seqStore, - zc->blockState.prevCBlock, - zc->blockState.nextCBlock, - &entropyMetadata, - &zc->appliedParams, - dst, dstCapacity, - src, srcSize, - zc->bmi2, lastBlock, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */); -} diff --git a/zstandard_android/src/compress/zstd_cwksp.h b/zstandard_android/src/compress/zstd_cwksp.h deleted file mode 100644 index dcd485c..0000000 --- a/zstandard_android/src/compress/zstd_cwksp.h +++ /dev/null @@ -1,749 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_CWKSP_H -#define ZSTD_CWKSP_H - -/*-************************************* -* Dependencies -***************************************/ -#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ -#include "../common/zstd_internal.h" -#include "../common/portability_macros.h" - -#if defined (__cplusplus) -extern "C" { -#endif - -/*-************************************* -* Constants -***************************************/ - -/* Since the workspace is effectively its own little malloc implementation / - * arena, when we run under ASAN, we should similarly insert redzones between - * each internal element of the workspace, so ASAN will catch overruns that - * reach outside an object but that stay inside the workspace. - * - * This defines the size of that redzone. - */ -#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE -#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 -#endif - - -/* Set our tables and aligneds to align by 64 bytes */ -#define ZSTD_CWKSP_ALIGNMENT_BYTES 64 - -/*-************************************* -* Structures -***************************************/ -typedef enum { - ZSTD_cwksp_alloc_objects, - ZSTD_cwksp_alloc_aligned_init_once, - ZSTD_cwksp_alloc_aligned, - ZSTD_cwksp_alloc_buffers -} ZSTD_cwksp_alloc_phase_e; - -/** - * Used to describe whether the workspace is statically allocated (and will not - * necessarily ever be freed), or if it's dynamically allocated and we can - * expect a well-formed caller to free this. - */ -typedef enum { - ZSTD_cwksp_dynamic_alloc, - ZSTD_cwksp_static_alloc -} ZSTD_cwksp_static_alloc_e; - -/** - * Zstd fits all its internal datastructures into a single continuous buffer, - * so that it only needs to perform a single OS allocation (or so that a buffer - * can be provided to it and it can perform no allocations at all). This buffer - * is called the workspace. - * - * Several optimizations complicate that process of allocating memory ranges - * from this workspace for each internal datastructure: - * - * - These different internal datastructures have different setup requirements: - * - * - The static objects need to be cleared once and can then be trivially - * reused for each compression. - * - * - Various buffers don't need to be initialized at all--they are always - * written into before they're read. - * - * - The matchstate tables have a unique requirement that they don't need - * their memory to be totally cleared, but they do need the memory to have - * some bound, i.e., a guarantee that all values in the memory they've been - * allocated is less than some maximum value (which is the starting value - * for the indices that they will then use for compression). When this - * guarantee is provided to them, they can use the memory without any setup - * work. When it can't, they have to clear the area. - * - * - These buffers also have different alignment requirements. - * - * - We would like to reuse the objects in the workspace for multiple - * compressions without having to perform any expensive reallocation or - * reinitialization work. - * - * - We would like to be able to efficiently reuse the workspace across - * multiple compressions **even when the compression parameters change** and - * we need to resize some of the objects (where possible). - * - * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp - * abstraction was created. It works as follows: - * - * Workspace Layout: - * - * [ ... workspace ... ] - * [objects][tables ->] free space [<- buffers][<- aligned][<- init once] - * - * The various objects that live in the workspace are divided into the - * following categories, and are allocated separately: - * - * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, - * so that literally everything fits in a single buffer. Note: if present, - * this must be the first object in the workspace, since ZSTD_customFree{CCtx, - * CDict}() rely on a pointer comparison to see whether one or two frees are - * required. - * - * - Fixed size objects: these are fixed-size, fixed-count objects that are - * nonetheless "dynamically" allocated in the workspace so that we can - * control how they're initialized separately from the broader ZSTD_CCtx. - * Examples: - * - Entropy Workspace - * - 2 x ZSTD_compressedBlockState_t - * - CDict dictionary contents - * - * - Tables: these are any of several different datastructures (hash tables, - * chain tables, binary trees) that all respect a common format: they are - * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). - * Their sizes depend on the cparams. These tables are 64-byte aligned. - * - * - Init once: these buffers require to be initialized at least once before - * use. They should be used when we want to skip memory initialization - * while not triggering memory checkers (like Valgrind) when reading from - * from this memory without writing to it first. - * These buffers should be used carefully as they might contain data - * from previous compressions. - * Buffers are aligned to 64 bytes. - * - * - Aligned: these buffers don't require any initialization before they're - * used. The user of the buffer should make sure they write into a buffer - * location before reading from it. - * Buffers are aligned to 64 bytes. - * - * - Buffers: these buffers are used for various purposes that don't require - * any alignment or initialization before they're used. This means they can - * be moved around at no cost for a new compression. - * - * Allocating Memory: - * - * The various types of objects must be allocated in order, so they can be - * correctly packed into the workspace buffer. That order is: - * - * 1. Objects - * 2. Init once / Tables - * 3. Aligned / Tables - * 4. Buffers / Tables - * - * Attempts to reserve objects of different types out of order will fail. - */ -typedef struct { - void* workspace; - void* workspaceEnd; - - void* objectEnd; - void* tableEnd; - void* tableValidEnd; - void* allocStart; - void* initOnceStart; - - BYTE allocFailed; - int workspaceOversizedDuration; - ZSTD_cwksp_alloc_phase_e phase; - ZSTD_cwksp_static_alloc_e isStatic; -} ZSTD_cwksp; - -/*-************************************* -* Functions -***************************************/ - -MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); -MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws); - -MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { - (void)ws; - assert(ws->workspace <= ws->objectEnd); - assert(ws->objectEnd <= ws->tableEnd); - assert(ws->objectEnd <= ws->tableValidEnd); - assert(ws->tableEnd <= ws->allocStart); - assert(ws->tableValidEnd <= ws->allocStart); - assert(ws->allocStart <= ws->workspaceEnd); - assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); - assert(ws->workspace <= ws->initOnceStart); -#if ZSTD_MEMORY_SANITIZER - { - intptr_t const offset = __msan_test_shadow(ws->initOnceStart, - (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart); - (void)offset; -#if defined(ZSTD_MSAN_PRINT) - if(offset!=-1) { - __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32); - } -#endif - assert(offset==-1); - }; -#endif -} - -/** - * Align must be a power of 2. - */ -MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { - size_t const mask = align - 1; - assert((align & mask) == 0); - return (size + mask) & ~mask; -} - -/** - * Use this to determine how much space in the workspace we will consume to - * allocate this object. (Normally it should be exactly the size of the object, - * but under special conditions, like ASAN, where we pad each object, it might - * be larger.) - * - * Since tables aren't currently redzoned, you don't need to call through this - * to figure out how much space you need for the matchState tables. Everything - * else is though. - * - * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size(). - */ -MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { - if (size == 0) - return 0; -#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; -#else - return size; -#endif -} - -/** - * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. - * Used to determine the number of bytes required for a given "aligned". - */ -MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) { - return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES)); -} - -/** - * Returns the amount of additional space the cwksp must allocate - * for internal purposes (currently only alignment). - */ -MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { - /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES - * bytes to align the beginning of tables section and end of buffers; - */ - size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2; - return slackSpace; -} - - -/** - * Return the number of additional bytes required to align a pointer to the given number of bytes. - * alignBytes must be a power of two. - */ -MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) { - size_t const alignBytesMask = alignBytes - 1; - size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; - assert((alignBytes & alignBytesMask) == 0); - assert(bytes < alignBytes); - return bytes; -} - -/** - * Returns the initial value for allocStart which is used to determine the position from - * which we can allocate from the end of the workspace. - */ -MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) { - return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1)); -} - -/** - * Internal function. Do not use directly. - * Reserves the given number of bytes within the aligned/buffer segment of the wksp, - * which counts from the end of the wksp (as opposed to the object/table segment). - * - * Returns a pointer to the beginning of that space. - */ -MEM_STATIC void* -ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) -{ - void* const alloc = (BYTE*)ws->allocStart - bytes; - void* const bottom = ws->tableEnd; - DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining", - alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); - ZSTD_cwksp_assert_internal_consistency(ws); - assert(alloc >= bottom); - if (alloc < bottom) { - DEBUGLOG(4, "cwksp: alloc failed!"); - ws->allocFailed = 1; - return NULL; - } - /* the area is reserved from the end of wksp. - * If it overlaps with tableValidEnd, it voids guarantees on values' range */ - if (alloc < ws->tableValidEnd) { - ws->tableValidEnd = alloc; - } - ws->allocStart = alloc; - return alloc; -} - -/** - * Moves the cwksp to the next phase, and does any necessary allocations. - * cwksp initialization must necessarily go through each phase in order. - * Returns a 0 on success, or zstd error - */ -MEM_STATIC size_t -ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) -{ - assert(phase >= ws->phase); - if (phase > ws->phase) { - /* Going from allocating objects to allocating initOnce / tables */ - if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once && - phase >= ZSTD_cwksp_alloc_aligned_init_once) { - ws->tableValidEnd = ws->objectEnd; - ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); - - { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */ - void *const alloc = ws->objectEnd; - size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); - void *const objectEnd = (BYTE *) alloc + bytesToAlign; - DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign); - RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, - "table phase - alignment initial allocation failed!"); - ws->objectEnd = objectEnd; - ws->tableEnd = objectEnd; /* table area starts being empty */ - if (ws->tableValidEnd < ws->tableEnd) { - ws->tableValidEnd = ws->tableEnd; - } - } - } - ws->phase = phase; - ZSTD_cwksp_assert_internal_consistency(ws); - } - return 0; -} - -/** - * Returns whether this object/buffer/etc was allocated in this workspace. - */ -MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) -{ - return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd); -} - -/** - * Internal function. Do not use directly. - */ -MEM_STATIC void* -ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) -{ - void* alloc; - if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) { - return NULL; - } - -#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - /* over-reserve space */ - bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; -#endif - - alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes); - -#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on - * either size. */ - if (alloc) { - alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; - if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { - /* We need to keep the redzone poisoned while unpoisoning the bytes that - * are actually allocated. */ - __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE); - } - } -#endif - - return alloc; -} - -/** - * Reserves and returns unaligned memory. - */ -MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) -{ - return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); -} - -/** - * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). - * This memory has been initialized at least once in the past. - * This doesn't mean it has been initialized this time, and it might contain data from previous - * operations. - * The main usage is for algorithms that might need read access into uninitialized memory. - * The algorithm must maintain safety under these conditions and must make sure it doesn't - * leak any of the past data (directly or in side channels). - */ -MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes) -{ - size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES); - void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once); - assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); - if(ptr && ptr < ws->initOnceStart) { - /* We assume the memory following the current allocation is either: - * 1. Not usable as initOnce memory (end of workspace) - * 2. Another initOnce buffer that has been allocated before (and so was previously memset) - * 3. An ASAN redzone, in which case we don't want to write on it - * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart. - * Note that we assume here that MSAN and ASAN cannot run in the same time. */ - ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes)); - ws->initOnceStart = ptr; - } -#if ZSTD_MEMORY_SANITIZER - assert(__msan_test_shadow(ptr, bytes) == -1); -#endif - return ptr; -} - -/** - * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). - */ -MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) -{ - void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), - ZSTD_cwksp_alloc_aligned); - assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); - return ptr; -} - -/** - * Aligned on 64 bytes. These buffers have the special property that - * their values remain constrained, allowing us to reuse them without - * memset()-ing them. - */ -MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) -{ - const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once; - void* alloc; - void* end; - void* top; - - /* We can only start allocating tables after we are done reserving space for objects at the - * start of the workspace */ - if(ws->phase < phase) { - if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { - return NULL; - } - } - alloc = ws->tableEnd; - end = (BYTE *)alloc + bytes; - top = ws->allocStart; - - DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining", - alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); - assert((bytes & (sizeof(U32)-1)) == 0); - ZSTD_cwksp_assert_internal_consistency(ws); - assert(end <= top); - if (end > top) { - DEBUGLOG(4, "cwksp: table alloc failed!"); - ws->allocFailed = 1; - return NULL; - } - ws->tableEnd = end; - -#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { - __asan_unpoison_memory_region(alloc, bytes); - } -#endif - - assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); - assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); - return alloc; -} - -/** - * Aligned on sizeof(void*). - * Note : should happen only once, at workspace first initialization - */ -MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) -{ - size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); - void* alloc = ws->objectEnd; - void* end = (BYTE*)alloc + roundedBytes; - -#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - /* over-reserve space */ - end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; -#endif - - DEBUGLOG(4, - "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", - alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); - assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0); - assert(bytes % ZSTD_ALIGNOF(void*) == 0); - ZSTD_cwksp_assert_internal_consistency(ws); - /* we must be in the first phase, no advance is possible */ - if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { - DEBUGLOG(3, "cwksp: object alloc failed!"); - ws->allocFailed = 1; - return NULL; - } - ws->objectEnd = end; - ws->tableEnd = end; - ws->tableValidEnd = end; - -#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on - * either size. */ - alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; - if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { - __asan_unpoison_memory_region(alloc, bytes); - } -#endif - - return alloc; -} - -MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) -{ - DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); - -#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) - /* To validate that the table reuse logic is sound, and that we don't - * access table space that we haven't cleaned, we re-"poison" the table - * space every time we mark it dirty. - * Since tableValidEnd space and initOnce space may overlap we don't poison - * the initOnce portion as it break its promise. This means that this poisoning - * check isn't always applied fully. */ - { - size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; - assert(__msan_test_shadow(ws->objectEnd, size) == -1); - if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { - __msan_poison(ws->objectEnd, size); - } else { - assert(ws->initOnceStart >= ws->objectEnd); - __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd); - } - } -#endif - - assert(ws->tableValidEnd >= ws->objectEnd); - assert(ws->tableValidEnd <= ws->allocStart); - ws->tableValidEnd = ws->objectEnd; - ZSTD_cwksp_assert_internal_consistency(ws); -} - -MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { - DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean"); - assert(ws->tableValidEnd >= ws->objectEnd); - assert(ws->tableValidEnd <= ws->allocStart); - if (ws->tableValidEnd < ws->tableEnd) { - ws->tableValidEnd = ws->tableEnd; - } - ZSTD_cwksp_assert_internal_consistency(ws); -} - -/** - * Zero the part of the allocated tables not already marked clean. - */ -MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { - DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables"); - assert(ws->tableValidEnd >= ws->objectEnd); - assert(ws->tableValidEnd <= ws->allocStart); - if (ws->tableValidEnd < ws->tableEnd) { - ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd)); - } - ZSTD_cwksp_mark_tables_clean(ws); -} - -/** - * Invalidates table allocations. - * All other allocations remain valid. - */ -MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) -{ - DEBUGLOG(4, "cwksp: clearing tables!"); - -#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - /* We don't do this when the workspace is statically allocated, because - * when that is the case, we have no capability to hook into the end of the - * workspace's lifecycle to unpoison the memory. - */ - if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { - size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; - __asan_poison_memory_region(ws->objectEnd, size); - } -#endif - - ws->tableEnd = ws->objectEnd; - ZSTD_cwksp_assert_internal_consistency(ws); -} - -/** - * Invalidates all buffer, aligned, and table allocations. - * Object allocations remain valid. - */ -MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { - DEBUGLOG(4, "cwksp: clearing!"); - -#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) - /* To validate that the context reuse logic is sound, and that we don't - * access stuff that this compression hasn't initialized, we re-"poison" - * the workspace except for the areas in which we expect memory reuse - * without initialization (objects, valid tables area and init once - * memory). */ - { - if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { - size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd; - __msan_poison(ws->tableValidEnd, size); - } - } -#endif - -#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - /* We don't do this when the workspace is statically allocated, because - * when that is the case, we have no capability to hook into the end of the - * workspace's lifecycle to unpoison the memory. - */ - if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { - size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; - __asan_poison_memory_region(ws->objectEnd, size); - } -#endif - - ws->tableEnd = ws->objectEnd; - ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); - ws->allocFailed = 0; - if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) { - ws->phase = ZSTD_cwksp_alloc_aligned_init_once; - } - ZSTD_cwksp_assert_internal_consistency(ws); -} - -MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { - return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); -} - -MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) { - return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) - + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); -} - -/** - * The provided workspace takes ownership of the buffer [start, start+size). - * Any existing values in the workspace are ignored (the previously managed - * buffer, if present, must be separately freed). - */ -MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) { - DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); - assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ - ws->workspace = start; - ws->workspaceEnd = (BYTE*)start + size; - ws->objectEnd = ws->workspace; - ws->tableValidEnd = ws->objectEnd; - ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); - ws->phase = ZSTD_cwksp_alloc_objects; - ws->isStatic = isStatic; - ZSTD_cwksp_clear(ws); - ws->workspaceOversizedDuration = 0; - ZSTD_cwksp_assert_internal_consistency(ws); -} - -MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { - void* workspace = ZSTD_customMalloc(size, customMem); - DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); - RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); - ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc); - return 0; -} - -MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { - void *ptr = ws->workspace; - DEBUGLOG(4, "cwksp: freeing workspace"); -#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) - if (ptr != NULL && customMem.customFree != NULL) { - __msan_unpoison(ptr, ZSTD_cwksp_sizeof(ws)); - } -#endif - ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp)); - ZSTD_customFree(ptr, customMem); -} - -/** - * Moves the management of a workspace from one cwksp to another. The src cwksp - * is left in an invalid state (src must be re-init()'ed before it's used again). - */ -MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { - *dst = *src; - ZSTD_memset(src, 0, sizeof(ZSTD_cwksp)); -} - -MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { - return ws->allocFailed; -} - -/*-************************************* -* Functions Checking Free Space -***************************************/ - -/* ZSTD_alignmentSpaceWithinBounds() : - * Returns if the estimated space needed for a wksp is within an acceptable limit of the - * actual amount of space used. - */ -MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) { - /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice - * the alignment bytes difference between estimation and actual usage */ - return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) && - ZSTD_cwksp_used(ws) <= estimatedSpace; -} - - -MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { - return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); -} - -MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { - return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; -} - -MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { - return ZSTD_cwksp_check_available( - ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); -} - -MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { - return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) - && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; -} - -MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( - ZSTD_cwksp* ws, size_t additionalNeededSpace) { - if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { - ws->workspaceOversizedDuration++; - } else { - ws->workspaceOversizedDuration = 0; - } -} - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTD_CWKSP_H */ diff --git a/zstandard_android/src/compress/zstd_double_fast.c b/zstandard_android/src/compress/zstd_double_fast.c deleted file mode 100644 index 50d698b..0000000 --- a/zstandard_android/src/compress/zstd_double_fast.c +++ /dev/null @@ -1,778 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#include "zstd_compress_internal.h" -#include "zstd_double_fast.h" - -#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashLarge = ms->hashTable; - U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; - U32 const mls = cParams->minMatch; - U32* const hashSmall = ms->chainTable; - U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; - const BYTE* const base = ms->window.base; - const BYTE* ip = base + ms->nextToUpdate; - const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; - const U32 fastHashFillStep = 3; - - /* Always insert every fastHashFillStep position into the hash tables. - * Insert the other positions into the large hash table if their entry - * is empty. - */ - for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { - U32 const curr = (U32)(ip - base); - U32 i; - for (i = 0; i < fastHashFillStep; ++i) { - size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls); - size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8); - if (i == 0) { - ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i); - } - if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { - ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i); - } - /* Only load extra positions for ZSTD_dtlm_full */ - if (dtlm == ZSTD_dtlm_fast) - break; - } } -} - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashLarge = ms->hashTable; - U32 const hBitsL = cParams->hashLog; - U32 const mls = cParams->minMatch; - U32* const hashSmall = ms->chainTable; - U32 const hBitsS = cParams->chainLog; - const BYTE* const base = ms->window.base; - const BYTE* ip = base + ms->nextToUpdate; - const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; - const U32 fastHashFillStep = 3; - - /* Always insert every fastHashFillStep position into the hash tables. - * Insert the other positions into the large hash table if their entry - * is empty. - */ - for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { - U32 const curr = (U32)(ip - base); - U32 i; - for (i = 0; i < fastHashFillStep; ++i) { - size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); - size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); - if (i == 0) - hashSmall[smHash] = curr + i; - if (i == 0 || hashLarge[lgHash] == 0) - hashLarge[lgHash] = curr + i; - /* Only load extra positions for ZSTD_dtlm_full */ - if (dtlm == ZSTD_dtlm_fast) - break; - } } -} - -void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, - const void* const end, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp) -{ - if (tfp == ZSTD_tfp_forCDict) { - ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm); - } else { - ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm); - } -} - - -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_compressBlock_doubleFast_noDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, U32 const mls /* template */) -{ - ZSTD_compressionParameters const* cParams = &ms->cParams; - U32* const hashLong = ms->hashTable; - const U32 hBitsL = cParams->hashLog; - U32* const hashSmall = ms->chainTable; - const U32 hBitsS = cParams->chainLog; - const BYTE* const base = ms->window.base; - const BYTE* const istart = (const BYTE*)src; - const BYTE* anchor = istart; - const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - /* presumes that, if there is a dictionary, it must be using Attach mode */ - const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); - const BYTE* const prefixLowest = base + prefixLowestIndex; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - HASH_READ_SIZE; - U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved1 = 0, offsetSaved2 = 0; - - size_t mLength; - U32 offset; - U32 curr; - - /* how many positions to search before increasing step size */ - const size_t kStepIncr = 1 << kSearchStrength; - /* the position at which to increment the step size if no match is found */ - const BYTE* nextStep; - size_t step; /* the current step size */ - - size_t hl0; /* the long hash at ip */ - size_t hl1; /* the long hash at ip1 */ - - U32 idxl0; /* the long match index for ip */ - U32 idxl1; /* the long match index for ip1 */ - - const BYTE* matchl0; /* the long match for ip */ - const BYTE* matchs0; /* the short match for ip */ - const BYTE* matchl1; /* the long match for ip1 */ - const BYTE* matchs0_safe; /* matchs0 or safe address */ - - const BYTE* ip = istart; /* the current position */ - const BYTE* ip1; /* the next position */ - /* Array of ~random data, should have low probability of matching data - * we load from here instead of from tables, if matchl0/matchl1 are - * invalid indices. Used to avoid unpredictable branches. */ - const BYTE dummy[] = {0x12,0x34,0x56,0x78,0x9a,0xbc,0xde,0xf0,0xe2,0xb4}; - - DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic"); - - /* init */ - ip += ((ip - prefixLowest) == 0); - { - U32 const current = (U32)(ip - base); - U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); - U32 const maxRep = current - windowLow; - if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0; - } - - /* Outer Loop: one iteration per match found and stored */ - while (1) { - step = 1; - nextStep = ip + kStepIncr; - ip1 = ip + step; - - if (ip1 > ilimit) { - goto _cleanup; - } - - hl0 = ZSTD_hashPtr(ip, hBitsL, 8); - idxl0 = hashLong[hl0]; - matchl0 = base + idxl0; - - /* Inner Loop: one iteration per search / position */ - do { - const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls); - const U32 idxs0 = hashSmall[hs0]; - curr = (U32)(ip-base); - matchs0 = base + idxs0; - - hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */ - - /* check noDict repcode */ - if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { - mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); - goto _match_stored; - } - - hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); - - /* idxl0 > prefixLowestIndex is a (somewhat) unpredictable branch. - * However expression below complies into conditional move. Since - * match is unlikely and we only *branch* on idxl0 > prefixLowestIndex - * if there is a match, all branches become predictable. */ - { const BYTE* const matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]); - - /* check prefix long match */ - if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) { - mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8; - offset = (U32)(ip-matchl0); - while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */ - goto _match_found; - } } - - idxl1 = hashLong[hl1]; - matchl1 = base + idxl1; - - /* Same optimization as matchl0 above */ - matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]); - - /* check prefix short match */ - if(MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) { - goto _search_next_long; - } - - if (ip1 >= nextStep) { - PREFETCH_L1(ip1 + 64); - PREFETCH_L1(ip1 + 128); - step++; - nextStep += kStepIncr; - } - ip = ip1; - ip1 += step; - - hl0 = hl1; - idxl0 = idxl1; - matchl0 = matchl1; - #if defined(__aarch64__) - PREFETCH_L1(ip+256); - #endif - } while (ip1 <= ilimit); - -_cleanup: - /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), - * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ - offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; - - /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved1; - rep[1] = offset_2 ? offset_2 : offsetSaved2; - - /* Return the last literals size */ - return (size_t)(iend - anchor); - -_search_next_long: - - /* short match found: let's check for a longer one */ - mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; - offset = (U32)(ip - matchs0); - - /* check long match at +1 position */ - if ((idxl1 > prefixLowestIndex) && (MEM_read64(matchl1) == MEM_read64(ip1))) { - size_t const l1len = ZSTD_count(ip1+8, matchl1+8, iend) + 8; - if (l1len > mLength) { - /* use the long match instead */ - ip = ip1; - mLength = l1len; - offset = (U32)(ip-matchl1); - matchs0 = matchl1; - } - } - - while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* complete backward */ - - /* fall-through */ - -_match_found: /* requires ip, offset, mLength */ - offset_2 = offset_1; - offset_1 = offset; - - if (step < 4) { - /* It is unsafe to write this value back to the hashtable when ip1 is - * greater than or equal to the new ip we will have after we're done - * processing this match. Rather than perform that test directly - * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler - * more predictable test. The minmatch even if we take a short match is - * 4 bytes, so as long as step, the distance between ip and ip1 - * (initially) is less than 4, we know ip1 < new ip. */ - hashLong[hl1] = (U32)(ip1 - base); - } - - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); - -_match_stored: - /* match found */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Complementary insertion */ - /* done after iLimit test, as candidates could be > iend-8 */ - { U32 const indexToInsert = curr+2; - hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; - hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); - hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; - hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); - } - - /* check immediate repcode */ - while ( (ip <= ilimit) - && ( (offset_2>0) - & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); - ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength); - ip += rLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } - } - } -} - - -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, - U32 const mls /* template */) -{ - ZSTD_compressionParameters const* cParams = &ms->cParams; - U32* const hashLong = ms->hashTable; - const U32 hBitsL = cParams->hashLog; - U32* const hashSmall = ms->chainTable; - const U32 hBitsS = cParams->chainLog; - const BYTE* const base = ms->window.base; - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - /* presumes that, if there is a dictionary, it must be using Attach mode */ - const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); - const BYTE* const prefixLowest = base + prefixLowestIndex; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - HASH_READ_SIZE; - U32 offset_1=rep[0], offset_2=rep[1]; - - const ZSTD_matchState_t* const dms = ms->dictMatchState; - const ZSTD_compressionParameters* const dictCParams = &dms->cParams; - const U32* const dictHashLong = dms->hashTable; - const U32* const dictHashSmall = dms->chainTable; - const U32 dictStartIndex = dms->window.dictLimit; - const BYTE* const dictBase = dms->window.base; - const BYTE* const dictStart = dictBase + dictStartIndex; - const BYTE* const dictEnd = dms->window.nextSrc; - const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase); - const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; - const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; - const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); - - DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic"); - - /* if a dictionary is attached, it must be within window range */ - assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); - - if (ms->prefetchCDictTables) { - size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); - size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32); - PREFETCH_AREA(dictHashLong, hashTableBytes); - PREFETCH_AREA(dictHashSmall, chainTableBytes); - } - - /* init */ - ip += (dictAndPrefixLength == 0); - - /* dictMatchState repCode checks don't currently handle repCode == 0 - * disabling. */ - assert(offset_1 <= dictAndPrefixLength); - assert(offset_2 <= dictAndPrefixLength); - - /* Main Search Loop */ - while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ - size_t mLength; - U32 offset; - size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); - size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); - size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8); - size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls); - U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS]; - U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS]; - int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL); - int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS); - U32 const curr = (U32)(ip-base); - U32 const matchIndexL = hashLong[h2]; - U32 matchIndexS = hashSmall[h]; - const BYTE* matchLong = base + matchIndexL; - const BYTE* match = base + matchIndexS; - const U32 repIndex = curr + 1 - offset_1; - const BYTE* repMatch = (repIndex < prefixLowestIndex) ? - dictBase + (repIndex - dictIndexDelta) : - base + repIndex; - hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ - - /* check repcode */ - if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); - goto _match_stored; - } - - if ((matchIndexL >= prefixLowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { - /* check prefix long match */ - mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; - offset = (U32)(ip-matchLong); - while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ - goto _match_found; - } else if (dictTagsMatchL) { - /* check dictMatchState long match */ - U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS; - const BYTE* dictMatchL = dictBase + dictMatchIndexL; - assert(dictMatchL < dictEnd); - - if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { - mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8; - offset = (U32)(curr - dictMatchIndexL - dictIndexDelta); - while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */ - goto _match_found; - } } - - if (matchIndexS > prefixLowestIndex) { - /* short match candidate */ - if (MEM_read32(match) == MEM_read32(ip)) { - goto _search_next_long; - } - } else if (dictTagsMatchS) { - /* check dictMatchState short match */ - U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS; - match = dictBase + dictMatchIndexS; - matchIndexS = dictMatchIndexS + dictIndexDelta; - - if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) { - goto _search_next_long; - } } - - ip += ((ip-anchor) >> kSearchStrength) + 1; -#if defined(__aarch64__) - PREFETCH_L1(ip+256); -#endif - continue; - -_search_next_long: - { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); - size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8); - U32 const matchIndexL3 = hashLong[hl3]; - U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS]; - int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3); - const BYTE* matchL3 = base + matchIndexL3; - hashLong[hl3] = curr + 1; - - /* check prefix long +1 match */ - if ((matchIndexL3 >= prefixLowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1))) { - mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; - ip++; - offset = (U32)(ip-matchL3); - while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ - goto _match_found; - } else if (dictTagsMatchL3) { - /* check dict long +1 match */ - U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS; - const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; - assert(dictMatchL3 < dictEnd); - if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { - mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8; - ip++; - offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta); - while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */ - goto _match_found; - } } } - - /* if no long +1 match, explore the short match we found */ - if (matchIndexS < prefixLowestIndex) { - mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4; - offset = (U32)(curr - matchIndexS); - while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - } else { - mLength = ZSTD_count(ip+4, match+4, iend) + 4; - offset = (U32)(ip - match); - while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - } - -_match_found: - offset_2 = offset_1; - offset_1 = offset; - - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); - -_match_stored: - /* match found */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Complementary insertion */ - /* done after iLimit test, as candidates could be > iend-8 */ - { U32 const indexToInsert = curr+2; - hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; - hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); - hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; - hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); - } - - /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ? - dictBase + repIndex2 - dictIndexDelta : - base + repIndex2; - if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2)) - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; - U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } - } - } /* while (ip < ilimit) */ - - /* save reps for next block */ - rep[0] = offset_1; - rep[1] = offset_2; - - /* Return the last literals size */ - return (size_t)(iend - anchor); -} - -#define ZSTD_GEN_DFAST_FN(dictMode, mls) \ - static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \ - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ - void const* src, size_t srcSize) \ - { \ - return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \ - } - -ZSTD_GEN_DFAST_FN(noDict, 4) -ZSTD_GEN_DFAST_FN(noDict, 5) -ZSTD_GEN_DFAST_FN(noDict, 6) -ZSTD_GEN_DFAST_FN(noDict, 7) - -ZSTD_GEN_DFAST_FN(dictMatchState, 4) -ZSTD_GEN_DFAST_FN(dictMatchState, 5) -ZSTD_GEN_DFAST_FN(dictMatchState, 6) -ZSTD_GEN_DFAST_FN(dictMatchState, 7) - - -size_t ZSTD_compressBlock_doubleFast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - const U32 mls = ms->cParams.minMatch; - switch(mls) - { - default: /* includes case 3 */ - case 4 : - return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize); - case 5 : - return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize); - case 6 : - return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize); - case 7 : - return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize); - } -} - - -size_t ZSTD_compressBlock_doubleFast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - const U32 mls = ms->cParams.minMatch; - switch(mls) - { - default: /* includes case 3 */ - case 4 : - return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize); - case 5 : - return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize); - case 6 : - return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize); - case 7 : - return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize); - } -} - - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_compressBlock_doubleFast_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, - U32 const mls /* template */) -{ - ZSTD_compressionParameters const* cParams = &ms->cParams; - U32* const hashLong = ms->hashTable; - U32 const hBitsL = cParams->hashLog; - U32* const hashSmall = ms->chainTable; - U32 const hBitsS = cParams->chainLog; - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; - const BYTE* const base = ms->window.base; - const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); - const U32 dictStartIndex = lowLimit; - const U32 dictLimit = ms->window.dictLimit; - const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit; - const BYTE* const prefixStart = base + prefixStartIndex; - const BYTE* const dictBase = ms->window.dictBase; - const BYTE* const dictStart = dictBase + dictStartIndex; - const BYTE* const dictEnd = dictBase + prefixStartIndex; - U32 offset_1=rep[0], offset_2=rep[1]; - - DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize); - - /* if extDict is invalidated due to maxDistance, switch to "regular" variant */ - if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); - - /* Search Loop */ - while (ip < ilimit) { /* < instead of <=, because (ip+1) */ - const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); - const U32 matchIndex = hashSmall[hSmall]; - const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; - const BYTE* match = matchBase + matchIndex; - - const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); - const U32 matchLongIndex = hashLong[hLong]; - const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base; - const BYTE* matchLong = matchLongBase + matchLongIndex; - - const U32 curr = (U32)(ip-base); - const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ - const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; - const BYTE* const repMatch = repBase + repIndex; - size_t mLength; - hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ - - if (((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) - & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */ - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); - } else { - if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { - const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; - const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; - U32 offset; - mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8; - offset = curr - matchLongIndex; - while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); - - } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { - size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); - U32 const matchIndex3 = hashLong[h3]; - const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base; - const BYTE* match3 = match3Base + matchIndex3; - U32 offset; - hashLong[h3] = curr + 1; - if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { - const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; - const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; - mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8; - ip++; - offset = curr+1 - matchIndex3; - while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ - } else { - const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; - const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; - mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; - offset = curr - matchIndex; - while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - } - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); - - } else { - ip += ((ip-anchor) >> kSearchStrength) + 1; - continue; - } } - - /* move to next sequence start */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Complementary insertion */ - /* done after iLimit test, as candidates could be > iend-8 */ - { U32 const indexToInsert = curr+2; - hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; - hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); - hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; - hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); - } - - /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) - & (offset_2 <= current2 - dictStartIndex)) - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; - U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } } } - - /* save reps for next block */ - rep[0] = offset_1; - rep[1] = offset_2; - - /* Return the last literals size */ - return (size_t)(iend - anchor); -} - -ZSTD_GEN_DFAST_FN(extDict, 4) -ZSTD_GEN_DFAST_FN(extDict, 5) -ZSTD_GEN_DFAST_FN(extDict, 6) -ZSTD_GEN_DFAST_FN(extDict, 7) - -size_t ZSTD_compressBlock_doubleFast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - U32 const mls = ms->cParams.minMatch; - switch(mls) - { - default: /* includes case 3 */ - case 4 : - return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize); - case 5 : - return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize); - case 6 : - return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize); - case 7 : - return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); - } -} - -#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */ diff --git a/zstandard_android/src/compress/zstd_double_fast.h b/zstandard_android/src/compress/zstd_double_fast.h deleted file mode 100644 index ce6ed8c..0000000 --- a/zstandard_android/src/compress/zstd_double_fast.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_DOUBLE_FAST_H -#define ZSTD_DOUBLE_FAST_H - -#if defined (__cplusplus) -extern "C" { -#endif - -#include "../common/mem.h" /* U32 */ -#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */ - -#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR - -void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp); - -size_t ZSTD_compressBlock_doubleFast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_doubleFast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_doubleFast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - -#define ZSTD_COMPRESSBLOCK_DOUBLEFAST ZSTD_compressBlock_doubleFast -#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE ZSTD_compressBlock_doubleFast_dictMatchState -#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT ZSTD_compressBlock_doubleFast_extDict -#else -#define ZSTD_COMPRESSBLOCK_DOUBLEFAST NULL -#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE NULL -#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT NULL -#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */ - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTD_DOUBLE_FAST_H */ diff --git a/zstandard_android/src/compress/zstd_fast.c b/zstandard_android/src/compress/zstd_fast.c deleted file mode 100644 index 5373e36..0000000 --- a/zstandard_android/src/compress/zstd_fast.c +++ /dev/null @@ -1,985 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ -#include "zstd_fast.h" - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms, - const void* const end, - ZSTD_dictTableLoadMethod_e dtlm) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashTable = ms->hashTable; - U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; - U32 const mls = cParams->minMatch; - const BYTE* const base = ms->window.base; - const BYTE* ip = base + ms->nextToUpdate; - const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; - const U32 fastHashFillStep = 3; - - /* Currently, we always use ZSTD_dtlm_full for filling CDict tables. - * Feel free to remove this assert if there's a good reason! */ - assert(dtlm == ZSTD_dtlm_full); - - /* Always insert every fastHashFillStep position into the hash table. - * Insert the other positions if their hash entry is empty. - */ - for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { - U32 const curr = (U32)(ip - base); - { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls); - ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); } - - if (dtlm == ZSTD_dtlm_fast) continue; - /* Only load extra positions for ZSTD_dtlm_full */ - { U32 p; - for (p = 1; p < fastHashFillStep; ++p) { - size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); - if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */ - ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); - } } } } -} - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms, - const void* const end, - ZSTD_dictTableLoadMethod_e dtlm) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashTable = ms->hashTable; - U32 const hBits = cParams->hashLog; - U32 const mls = cParams->minMatch; - const BYTE* const base = ms->window.base; - const BYTE* ip = base + ms->nextToUpdate; - const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; - const U32 fastHashFillStep = 3; - - /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables. - * Feel free to remove this assert if there's a good reason! */ - assert(dtlm == ZSTD_dtlm_fast); - - /* Always insert every fastHashFillStep position into the hash table. - * Insert the other positions if their hash entry is empty. - */ - for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { - U32 const curr = (U32)(ip - base); - size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls); - hashTable[hash0] = curr; - if (dtlm == ZSTD_dtlm_fast) continue; - /* Only load extra positions for ZSTD_dtlm_full */ - { U32 p; - for (p = 1; p < fastHashFillStep; ++p) { - size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls); - if (hashTable[hash] == 0) { /* not yet filled */ - hashTable[hash] = curr + p; - } } } } -} - -void ZSTD_fillHashTable(ZSTD_matchState_t* ms, - const void* const end, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp) -{ - if (tfp == ZSTD_tfp_forCDict) { - ZSTD_fillHashTableForCDict(ms, end, dtlm); - } else { - ZSTD_fillHashTableForCCtx(ms, end, dtlm); - } -} - - -typedef int (*ZSTD_match4Found) (const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit); - -static int -ZSTD_match4Found_cmov(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) -{ - /* Array of ~random data, should have low probability of matching data. - * Load from here if the index is invalid. - * Used to avoid unpredictable branches. */ - static const BYTE dummy[] = {0x12,0x34,0x56,0x78}; - - /* currentIdx >= lowLimit is a (somewhat) unpredictable branch. - * However expression below compiles into conditional move. - */ - const BYTE* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy); - /* Note: this used to be written as : return test1 && test2; - * Unfortunately, once inlined, these tests become branches, - * in which case it becomes critical that they are executed in the right order (test1 then test2). - * So we have to write these tests in a specific manner to ensure their ordering. - */ - if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) return 0; - /* force ordering of these tests, which matters once the function is inlined, as they become branches */ -#if defined(__GNUC__) - __asm__(""); -#endif - return matchIdx >= idxLowLimit; -} - -static int -ZSTD_match4Found_branch(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) -{ - /* using a branch instead of a cmov, - * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true, - * aka almost all candidates are within range */ - U32 mval; - if (matchIdx >= idxLowLimit) { - mval = MEM_read32(matchAddress); - } else { - mval = MEM_read32(currentPtr) ^ 1; /* guaranteed to not match. */ - } - - return (MEM_read32(currentPtr) == mval); -} - - -/** - * If you squint hard enough (and ignore repcodes), the search operation at any - * given position is broken into 4 stages: - * - * 1. Hash (map position to hash value via input read) - * 2. Lookup (map hash val to index via hashtable read) - * 3. Load (map index to value at that position via input read) - * 4. Compare - * - * Each of these steps involves a memory read at an address which is computed - * from the previous step. This means these steps must be sequenced and their - * latencies are cumulative. - * - * Rather than do 1->2->3->4 sequentially for a single position before moving - * onto the next, this implementation interleaves these operations across the - * next few positions: - * - * R = Repcode Read & Compare - * H = Hash - * T = Table Lookup - * M = Match Read & Compare - * - * Pos | Time --> - * ----+------------------- - * N | ... M - * N+1 | ... TM - * N+2 | R H T M - * N+3 | H TM - * N+4 | R H T M - * N+5 | H ... - * N+6 | R ... - * - * This is very much analogous to the pipelining of execution in a CPU. And just - * like a CPU, we have to dump the pipeline when we find a match (i.e., take a - * branch). - * - * When this happens, we throw away our current state, and do the following prep - * to re-enter the loop: - * - * Pos | Time --> - * ----+------------------- - * N | H T - * N+1 | H - * - * This is also the work we do at the beginning to enter the loop initially. - */ -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_compressBlock_fast_noDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, - U32 const mls, int useCmov) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashTable = ms->hashTable; - U32 const hlog = cParams->hashLog; - size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; /* min 2 */ - const BYTE* const base = ms->window.base; - const BYTE* const istart = (const BYTE*)src; - const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); - const BYTE* const prefixStart = base + prefixStartIndex; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - HASH_READ_SIZE; - - const BYTE* anchor = istart; - const BYTE* ip0 = istart; - const BYTE* ip1; - const BYTE* ip2; - const BYTE* ip3; - U32 current0; - - U32 rep_offset1 = rep[0]; - U32 rep_offset2 = rep[1]; - U32 offsetSaved1 = 0, offsetSaved2 = 0; - - size_t hash0; /* hash for ip0 */ - size_t hash1; /* hash for ip1 */ - U32 matchIdx; /* match idx for ip0 */ - - U32 offcode; - const BYTE* match0; - size_t mLength; - - /* ip0 and ip1 are always adjacent. The targetLength skipping and - * uncompressibility acceleration is applied to every other position, - * matching the behavior of #1562. step therefore represents the gap - * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */ - size_t step; - const BYTE* nextStep; - const size_t kStepIncr = (1 << (kSearchStrength - 1)); - const ZSTD_match4Found matchFound = useCmov ? ZSTD_match4Found_cmov : ZSTD_match4Found_branch; - - DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); - ip0 += (ip0 == prefixStart); - { U32 const curr = (U32)(ip0 - base); - U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); - U32 const maxRep = curr - windowLow; - if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0; - if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0; - } - - /* start each op */ -_start: /* Requires: ip0 */ - - step = stepSize; - nextStep = ip0 + kStepIncr; - - /* calculate positions, ip0 - anchor == 0, so we skip step calc */ - ip1 = ip0 + 1; - ip2 = ip0 + step; - ip3 = ip2 + 1; - - if (ip3 >= ilimit) { - goto _cleanup; - } - - hash0 = ZSTD_hashPtr(ip0, hlog, mls); - hash1 = ZSTD_hashPtr(ip1, hlog, mls); - - matchIdx = hashTable[hash0]; - - do { - /* load repcode match for ip[2]*/ - const U32 rval = MEM_read32(ip2 - rep_offset1); - - /* write back hash table entry */ - current0 = (U32)(ip0 - base); - hashTable[hash0] = current0; - - /* check repcode at ip[2] */ - if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) { - ip0 = ip2; - match0 = ip0 - rep_offset1; - mLength = ip0[-1] == match0[-1]; - ip0 -= mLength; - match0 -= mLength; - offcode = REPCODE1_TO_OFFBASE; - mLength += 4; - - /* Write next hash table entry: it's already calculated. - * This write is known to be safe because ip1 is before the - * repcode (ip2). */ - hashTable[hash1] = (U32)(ip1 - base); - - goto _match; - } - - if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { - /* Write next hash table entry (it's already calculated). - * This write is known to be safe because the ip1 == ip0 + 1, - * so searching will resume after ip1 */ - hashTable[hash1] = (U32)(ip1 - base); - - goto _offset; - } - - /* lookup ip[1] */ - matchIdx = hashTable[hash1]; - - /* hash ip[2] */ - hash0 = hash1; - hash1 = ZSTD_hashPtr(ip2, hlog, mls); - - /* advance to next positions */ - ip0 = ip1; - ip1 = ip2; - ip2 = ip3; - - /* write back hash table entry */ - current0 = (U32)(ip0 - base); - hashTable[hash0] = current0; - - if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { - /* Write next hash table entry, since it's already calculated */ - if (step <= 4) { - /* Avoid writing an index if it's >= position where search will resume. - * The minimum possible match has length 4, so search can resume at ip0 + 4. - */ - hashTable[hash1] = (U32)(ip1 - base); - } - goto _offset; - } - - /* lookup ip[1] */ - matchIdx = hashTable[hash1]; - - /* hash ip[2] */ - hash0 = hash1; - hash1 = ZSTD_hashPtr(ip2, hlog, mls); - - /* advance to next positions */ - ip0 = ip1; - ip1 = ip2; - ip2 = ip0 + step; - ip3 = ip1 + step; - - /* calculate step */ - if (ip2 >= nextStep) { - step++; - PREFETCH_L1(ip1 + 64); - PREFETCH_L1(ip1 + 128); - nextStep += kStepIncr; - } - } while (ip3 < ilimit); - -_cleanup: - /* Note that there are probably still a couple positions one could search. - * However, it seems to be a meaningful performance hit to try to search - * them. So let's not. */ - - /* When the repcodes are outside of the prefix, we set them to zero before the loop. - * When the offsets are still zero, we need to restore them after the block to have a correct - * repcode history. If only one offset was invalid, it is easy. The tricky case is when both - * offsets were invalid. We need to figure out which offset to refill with. - * - If both offsets are zero they are in the same order. - * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`. - * - If only one is zero, we need to decide which offset to restore. - * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1. - * - It is impossible for rep_offset2 to be non-zero. - * - * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then - * set rep[0] = rep_offset1 and rep[1] = offsetSaved1. - */ - offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2; - - /* save reps for next block */ - rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1; - rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2; - - /* Return the last literals size */ - return (size_t)(iend - anchor); - -_offset: /* Requires: ip0, idx */ - - /* Compute the offset code. */ - match0 = base + matchIdx; - rep_offset2 = rep_offset1; - rep_offset1 = (U32)(ip0-match0); - offcode = OFFSET_TO_OFFBASE(rep_offset1); - mLength = 4; - - /* Count the backwards match length. */ - while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) { - ip0--; - match0--; - mLength++; - } - -_match: /* Requires: ip0, match0, offcode */ - - /* Count the forward length. */ - mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); - - ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); - - ip0 += mLength; - anchor = ip0; - - /* Fill table and check for immediate repcode. */ - if (ip0 <= ilimit) { - /* Fill Table */ - assert(base+current0+2 > istart); /* check base overflow */ - hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); - - if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */ - while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4; - { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */ - hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); - ip0 += rLength; - ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength); - anchor = ip0; - continue; /* faster when present (confirmed on gcc-8) ... (?) */ - } } } - - goto _start; -} - -#define ZSTD_GEN_FAST_FN(dictMode, mml, cmov) \ - static size_t ZSTD_compressBlock_fast_##dictMode##_##mml##_##cmov( \ - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ - void const* src, size_t srcSize) \ - { \ - return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mml, cmov); \ - } - -ZSTD_GEN_FAST_FN(noDict, 4, 1) -ZSTD_GEN_FAST_FN(noDict, 5, 1) -ZSTD_GEN_FAST_FN(noDict, 6, 1) -ZSTD_GEN_FAST_FN(noDict, 7, 1) - -ZSTD_GEN_FAST_FN(noDict, 4, 0) -ZSTD_GEN_FAST_FN(noDict, 5, 0) -ZSTD_GEN_FAST_FN(noDict, 6, 0) -ZSTD_GEN_FAST_FN(noDict, 7, 0) - -size_t ZSTD_compressBlock_fast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - U32 const mml = ms->cParams.minMatch; - /* use cmov when "candidate in range" branch is likely unpredictable */ - int const useCmov = ms->cParams.windowLog < 19; - assert(ms->dictMatchState == NULL); - if (useCmov) { - switch(mml) - { - default: /* includes case 3 */ - case 4 : - return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize); - case 5 : - return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize); - case 6 : - return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize); - case 7 : - return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); - } - } else { - /* use a branch instead */ - switch(mml) - { - default: /* includes case 3 */ - case 4 : - return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize); - case 5 : - return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize); - case 6 : - return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize); - case 7 : - return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); - } - } -} - -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_compressBlock_fast_dictMatchState_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, U32 const mls, U32 const hasStep) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashTable = ms->hashTable; - U32 const hlog = cParams->hashLog; - /* support stepSize of 0 */ - U32 const stepSize = cParams->targetLength + !(cParams->targetLength); - const BYTE* const base = ms->window.base; - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip0 = istart; - const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */ - const BYTE* anchor = istart; - const U32 prefixStartIndex = ms->window.dictLimit; - const BYTE* const prefixStart = base + prefixStartIndex; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - HASH_READ_SIZE; - U32 offset_1=rep[0], offset_2=rep[1]; - - const ZSTD_matchState_t* const dms = ms->dictMatchState; - const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; - const U32* const dictHashTable = dms->hashTable; - const U32 dictStartIndex = dms->window.dictLimit; - const BYTE* const dictBase = dms->window.base; - const BYTE* const dictStart = dictBase + dictStartIndex; - const BYTE* const dictEnd = dms->window.nextSrc; - const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); - const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart); - const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; - - /* if a dictionary is still attached, it necessarily means that - * it is within window size. So we just check it. */ - const U32 maxDistance = 1U << cParams->windowLog; - const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - assert(endIndex - prefixStartIndex <= maxDistance); - (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ - - (void)hasStep; /* not currently specialized on whether it's accelerated */ - - /* ensure there will be no underflow - * when translating a dict index into a local index */ - assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); - - if (ms->prefetchCDictTables) { - size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); - PREFETCH_AREA(dictHashTable, hashTableBytes); - } - - /* init */ - DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); - ip0 += (dictAndPrefixLength == 0); - /* dictMatchState repCode checks don't currently handle repCode == 0 - * disabling. */ - assert(offset_1 <= dictAndPrefixLength); - assert(offset_2 <= dictAndPrefixLength); - - /* Outer search loop */ - assert(stepSize >= 1); - while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */ - size_t mLength; - size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls); - - size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls); - U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS]; - int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0); - - U32 matchIndex = hashTable[hash0]; - U32 curr = (U32)(ip0 - base); - size_t step = stepSize; - const size_t kStepIncr = 1 << kSearchStrength; - const BYTE* nextStep = ip0 + kStepIncr; - - /* Inner search loop */ - while (1) { - const BYTE* match = base + matchIndex; - const U32 repIndex = curr + 1 - offset_1; - const BYTE* repMatch = (repIndex < prefixStartIndex) ? - dictBase + (repIndex - dictIndexDelta) : - base + repIndex; - const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls); - size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); - hashTable[hash0] = curr; /* update hash table */ - - if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) - && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) { - const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; - ip0++; - ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); - break; - } - - if (dictTagsMatch) { - /* Found a possible dict match */ - const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS; - const BYTE* dictMatch = dictBase + dictMatchIndex; - if (dictMatchIndex > dictStartIndex && - MEM_read32(dictMatch) == MEM_read32(ip0)) { - /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */ - if (matchIndex <= prefixStartIndex) { - U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta); - mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4; - while (((ip0 > anchor) & (dictMatch > dictStart)) - && (ip0[-1] == dictMatch[-1])) { - ip0--; - dictMatch--; - mLength++; - } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); - break; - } - } - } - - if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex)) { - /* found a regular match of size >= 4 */ - U32 const offset = (U32) (ip0 - match); - mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; - while (((ip0 > anchor) & (match > prefixStart)) - && (ip0[-1] == match[-1])) { - ip0--; - match--; - mLength++; - } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); - break; - } - - /* Prepare for next iteration */ - dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS]; - dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1); - matchIndex = hashTable[hash1]; - - if (ip1 >= nextStep) { - step++; - nextStep += kStepIncr; - } - ip0 = ip1; - ip1 = ip1 + step; - if (ip1 > ilimit) goto _cleanup; - - curr = (U32)(ip0 - base); - hash0 = hash1; - } /* end inner search loop */ - - /* match found */ - assert(mLength); - ip0 += mLength; - anchor = ip0; - - if (ip0 <= ilimit) { - /* Fill Table */ - assert(base+curr+2 > istart); /* check base overflow */ - hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); - - /* check immediate repcode */ - while (ip0 <= ilimit) { - U32 const current2 = (U32)(ip0-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? - dictBase - dictIndexDelta + repIndex2 : - base + repIndex2; - if ( (ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) - && (MEM_read32(repMatch2) == MEM_read32(ip0))) { - const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; - U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); - hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2; - ip0 += repLength2; - anchor = ip0; - continue; - } - break; - } - } - - /* Prepare for next iteration */ - assert(ip0 == anchor); - ip1 = ip0 + stepSize; - } - -_cleanup: - /* save reps for next block */ - rep[0] = offset_1; - rep[1] = offset_2; - - /* Return the last literals size */ - return (size_t)(iend - anchor); -} - - -ZSTD_GEN_FAST_FN(dictMatchState, 4, 0) -ZSTD_GEN_FAST_FN(dictMatchState, 5, 0) -ZSTD_GEN_FAST_FN(dictMatchState, 6, 0) -ZSTD_GEN_FAST_FN(dictMatchState, 7, 0) - -size_t ZSTD_compressBlock_fast_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - U32 const mls = ms->cParams.minMatch; - assert(ms->dictMatchState != NULL); - switch(mls) - { - default: /* includes case 3 */ - case 4 : - return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize); - case 5 : - return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize); - case 6 : - return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize); - case 7 : - return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize); - } -} - - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_compressBlock_fast_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, U32 const mls, U32 const hasStep) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashTable = ms->hashTable; - U32 const hlog = cParams->hashLog; - /* support stepSize of 0 */ - size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; - const BYTE* const base = ms->window.base; - const BYTE* const dictBase = ms->window.dictBase; - const BYTE* const istart = (const BYTE*)src; - const BYTE* anchor = istart; - const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); - const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); - const U32 dictStartIndex = lowLimit; - const BYTE* const dictStart = dictBase + dictStartIndex; - const U32 dictLimit = ms->window.dictLimit; - const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; - const BYTE* const prefixStart = base + prefixStartIndex; - const BYTE* const dictEnd = dictBase + prefixStartIndex; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; - U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved1 = 0, offsetSaved2 = 0; - - const BYTE* ip0 = istart; - const BYTE* ip1; - const BYTE* ip2; - const BYTE* ip3; - U32 current0; - - - size_t hash0; /* hash for ip0 */ - size_t hash1; /* hash for ip1 */ - U32 idx; /* match idx for ip0 */ - const BYTE* idxBase; /* base pointer for idx */ - - U32 offcode; - const BYTE* match0; - size_t mLength; - const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */ - - size_t step; - const BYTE* nextStep; - const size_t kStepIncr = (1 << (kSearchStrength - 1)); - - (void)hasStep; /* not currently specialized on whether it's accelerated */ - - DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1); - - /* switch to "regular" variant if extDict is invalidated due to maxDistance */ - if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); - - { U32 const curr = (U32)(ip0 - base); - U32 const maxRep = curr - dictStartIndex; - if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0; - if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0; - } - - /* start each op */ -_start: /* Requires: ip0 */ - - step = stepSize; - nextStep = ip0 + kStepIncr; - - /* calculate positions, ip0 - anchor == 0, so we skip step calc */ - ip1 = ip0 + 1; - ip2 = ip0 + step; - ip3 = ip2 + 1; - - if (ip3 >= ilimit) { - goto _cleanup; - } - - hash0 = ZSTD_hashPtr(ip0, hlog, mls); - hash1 = ZSTD_hashPtr(ip1, hlog, mls); - - idx = hashTable[hash0]; - idxBase = idx < prefixStartIndex ? dictBase : base; - - do { - { /* load repcode match for ip[2] */ - U32 const current2 = (U32)(ip2 - base); - U32 const repIndex = current2 - offset_1; - const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; - U32 rval; - if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */ - & (offset_1 > 0) ) { - rval = MEM_read32(repBase + repIndex); - } else { - rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */ - } - - /* write back hash table entry */ - current0 = (U32)(ip0 - base); - hashTable[hash0] = current0; - - /* check repcode at ip[2] */ - if (MEM_read32(ip2) == rval) { - ip0 = ip2; - match0 = repBase + repIndex; - matchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - assert((match0 != prefixStart) & (match0 != dictStart)); - mLength = ip0[-1] == match0[-1]; - ip0 -= mLength; - match0 -= mLength; - offcode = REPCODE1_TO_OFFBASE; - mLength += 4; - goto _match; - } } - - { /* load match for ip[0] */ - U32 const mval = idx >= dictStartIndex ? - MEM_read32(idxBase + idx) : - MEM_read32(ip0) ^ 1; /* guaranteed not to match */ - - /* check match at ip[0] */ - if (MEM_read32(ip0) == mval) { - /* found a match! */ - goto _offset; - } } - - /* lookup ip[1] */ - idx = hashTable[hash1]; - idxBase = idx < prefixStartIndex ? dictBase : base; - - /* hash ip[2] */ - hash0 = hash1; - hash1 = ZSTD_hashPtr(ip2, hlog, mls); - - /* advance to next positions */ - ip0 = ip1; - ip1 = ip2; - ip2 = ip3; - - /* write back hash table entry */ - current0 = (U32)(ip0 - base); - hashTable[hash0] = current0; - - { /* load match for ip[0] */ - U32 const mval = idx >= dictStartIndex ? - MEM_read32(idxBase + idx) : - MEM_read32(ip0) ^ 1; /* guaranteed not to match */ - - /* check match at ip[0] */ - if (MEM_read32(ip0) == mval) { - /* found a match! */ - goto _offset; - } } - - /* lookup ip[1] */ - idx = hashTable[hash1]; - idxBase = idx < prefixStartIndex ? dictBase : base; - - /* hash ip[2] */ - hash0 = hash1; - hash1 = ZSTD_hashPtr(ip2, hlog, mls); - - /* advance to next positions */ - ip0 = ip1; - ip1 = ip2; - ip2 = ip0 + step; - ip3 = ip1 + step; - - /* calculate step */ - if (ip2 >= nextStep) { - step++; - PREFETCH_L1(ip1 + 64); - PREFETCH_L1(ip1 + 128); - nextStep += kStepIncr; - } - } while (ip3 < ilimit); - -_cleanup: - /* Note that there are probably still a couple positions we could search. - * However, it seems to be a meaningful performance hit to try to search - * them. So let's not. */ - - /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), - * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ - offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; - - /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved1; - rep[1] = offset_2 ? offset_2 : offsetSaved2; - - /* Return the last literals size */ - return (size_t)(iend - anchor); - -_offset: /* Requires: ip0, idx, idxBase */ - - /* Compute the offset code. */ - { U32 const offset = current0 - idx; - const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart; - matchEnd = idx < prefixStartIndex ? dictEnd : iend; - match0 = idxBase + idx; - offset_2 = offset_1; - offset_1 = offset; - offcode = OFFSET_TO_OFFBASE(offset); - mLength = 4; - - /* Count the backwards match length. */ - while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) { - ip0--; - match0--; - mLength++; - } } - -_match: /* Requires: ip0, match0, offcode, matchEnd */ - - /* Count the forward length. */ - assert(matchEnd != 0); - mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart); - - ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); - - ip0 += mLength; - anchor = ip0; - - /* write next hash table entry */ - if (ip1 < ip0) { - hashTable[hash1] = (U32)(ip1 - base); - } - - /* Fill table and check for immediate repcode. */ - if (ip0 <= ilimit) { - /* Fill Table */ - assert(base+current0+2 > istart); /* check base overflow */ - hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); - - while (ip0 <= ilimit) { - U32 const repIndex2 = (U32)(ip0-base) - offset_2; - const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) & (offset_2 > 0)) - && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) { - const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; - { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); - hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); - ip0 += repLength2; - anchor = ip0; - continue; - } - break; - } } - - goto _start; -} - -ZSTD_GEN_FAST_FN(extDict, 4, 0) -ZSTD_GEN_FAST_FN(extDict, 5, 0) -ZSTD_GEN_FAST_FN(extDict, 6, 0) -ZSTD_GEN_FAST_FN(extDict, 7, 0) - -size_t ZSTD_compressBlock_fast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - U32 const mls = ms->cParams.minMatch; - assert(ms->dictMatchState == NULL); - switch(mls) - { - default: /* includes case 3 */ - case 4 : - return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize); - case 5 : - return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize); - case 6 : - return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize); - case 7 : - return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize); - } -} diff --git a/zstandard_android/src/compress/zstd_lazy.c b/zstandard_android/src/compress/zstd_lazy.c deleted file mode 100644 index cfbadf4..0000000 --- a/zstandard_android/src/compress/zstd_lazy.c +++ /dev/null @@ -1,2199 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#include "zstd_compress_internal.h" -#include "zstd_lazy.h" -#include "../common/bits.h" /* ZSTD_countTrailingZeros64 */ - -#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) - -#define kLazySkippingStep 8 - - -/*-************************************* -* Binary Tree search -***************************************/ - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_updateDUBT(ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iend, - U32 mls) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashTable = ms->hashTable; - U32 const hashLog = cParams->hashLog; - - U32* const bt = ms->chainTable; - U32 const btLog = cParams->chainLog - 1; - U32 const btMask = (1 << btLog) - 1; - - const BYTE* const base = ms->window.base; - U32 const target = (U32)(ip - base); - U32 idx = ms->nextToUpdate; - - if (idx != target) - DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)", - idx, target, ms->window.dictLimit); - assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */ - (void)iend; - - assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */ - for ( ; idx < target ; idx++) { - size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */ - U32 const matchIndex = hashTable[h]; - - U32* const nextCandidatePtr = bt + 2*(idx&btMask); - U32* const sortMarkPtr = nextCandidatePtr + 1; - - DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx); - hashTable[h] = idx; /* Update Hash Table */ - *nextCandidatePtr = matchIndex; /* update BT like a chain */ - *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK; - } - ms->nextToUpdate = target; -} - - -/** ZSTD_insertDUBT1() : - * sort one already inserted but unsorted position - * assumption : curr >= btlow == (curr - btmask) - * doesn't fail */ -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, - U32 curr, const BYTE* inputEnd, - U32 nbCompares, U32 btLow, - const ZSTD_dictMode_e dictMode) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const bt = ms->chainTable; - U32 const btLog = cParams->chainLog - 1; - U32 const btMask = (1 << btLog) - 1; - size_t commonLengthSmaller=0, commonLengthLarger=0; - const BYTE* const base = ms->window.base; - const BYTE* const dictBase = ms->window.dictBase; - const U32 dictLimit = ms->window.dictLimit; - const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr; - const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const BYTE* match; - U32* smallerPtr = bt + 2*(curr&btMask); - U32* largerPtr = smallerPtr + 1; - U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */ - U32 dummy32; /* to be nullified at the end */ - U32 const windowValid = ms->window.lowLimit; - U32 const maxDistance = 1U << cParams->windowLog; - U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid; - - - DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)", - curr, dictLimit, windowLow); - assert(curr >= btLow); - assert(ip < iend); /* condition for ZSTD_count */ - - for (; nbCompares && (matchIndex > windowLow); --nbCompares) { - U32* const nextPtr = bt + 2*(matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - assert(matchIndex < curr); - /* note : all candidates are now supposed sorted, - * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK - * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */ - - if ( (dictMode != ZSTD_extDict) - || (matchIndex+matchLength >= dictLimit) /* both in current segment*/ - || (curr < dictLimit) /* both in extDict */) { - const BYTE* const mBase = ( (dictMode != ZSTD_extDict) - || (matchIndex+matchLength >= dictLimit)) ? - base : dictBase; - assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */ - || (curr < dictLimit) ); - match = mBase + matchIndex; - matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); - } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); - if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* preparation for next read of match[matchLength] */ - } - - DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ", - curr, matchIndex, (U32)matchLength); - - if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ - break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ - } - - if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ - /* match is smaller than current */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ - DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u", - matchIndex, btLow, nextPtr[1]); - smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ - matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ - } else { - /* match is larger than current */ - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ - DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u", - matchIndex, btLow, nextPtr[0]); - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } } - - *smallerPtr = *largerPtr = 0; -} - - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_DUBT_findBetterDictMatch ( - const ZSTD_matchState_t* ms, - const BYTE* const ip, const BYTE* const iend, - size_t* offsetPtr, - size_t bestLength, - U32 nbCompares, - U32 const mls, - const ZSTD_dictMode_e dictMode) -{ - const ZSTD_matchState_t * const dms = ms->dictMatchState; - const ZSTD_compressionParameters* const dmsCParams = &dms->cParams; - const U32 * const dictHashTable = dms->hashTable; - U32 const hashLog = dmsCParams->hashLog; - size_t const h = ZSTD_hashPtr(ip, hashLog, mls); - U32 dictMatchIndex = dictHashTable[h]; - - const BYTE* const base = ms->window.base; - const BYTE* const prefixStart = base + ms->window.dictLimit; - U32 const curr = (U32)(ip-base); - const BYTE* const dictBase = dms->window.base; - const BYTE* const dictEnd = dms->window.nextSrc; - U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base); - U32 const dictLowLimit = dms->window.lowLimit; - U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit; - - U32* const dictBt = dms->chainTable; - U32 const btLog = dmsCParams->chainLog - 1; - U32 const btMask = (1 << btLog) - 1; - U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask; - - size_t commonLengthSmaller=0, commonLengthLarger=0; - - (void)dictMode; - assert(dictMode == ZSTD_dictMatchState); - - for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) { - U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - const BYTE* match = dictBase + dictMatchIndex; - matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); - if (dictMatchIndex+matchLength >= dictHighLimit) - match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */ - - if (matchLength > bestLength) { - U32 matchIndex = dictMatchIndex + dictIndexDelta; - if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { - DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)", - curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex); - bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); - } - if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */ - break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } - } - - if (match[matchLength] < ip[matchLength]) { - if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ - } else { - /* match is larger than current */ - if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */ - commonLengthLarger = matchLength; - dictMatchIndex = nextPtr[0]; - } - } - - if (bestLength >= MINMATCH) { - U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offsetPtr); (void)mIndex; - DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)", - curr, (U32)bestLength, (U32)*offsetPtr, mIndex); - } - return bestLength; - -} - - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, - const BYTE* const ip, const BYTE* const iend, - size_t* offBasePtr, - U32 const mls, - const ZSTD_dictMode_e dictMode) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashTable = ms->hashTable; - U32 const hashLog = cParams->hashLog; - size_t const h = ZSTD_hashPtr(ip, hashLog, mls); - U32 matchIndex = hashTable[h]; - - const BYTE* const base = ms->window.base; - U32 const curr = (U32)(ip-base); - U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); - - U32* const bt = ms->chainTable; - U32 const btLog = cParams->chainLog - 1; - U32 const btMask = (1 << btLog) - 1; - U32 const btLow = (btMask >= curr) ? 0 : curr - btMask; - U32 const unsortLimit = MAX(btLow, windowLow); - - U32* nextCandidate = bt + 2*(matchIndex&btMask); - U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1; - U32 nbCompares = 1U << cParams->searchLog; - U32 nbCandidates = nbCompares; - U32 previousCandidate = 0; - - DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr); - assert(ip <= iend-8); /* required for h calculation */ - assert(dictMode != ZSTD_dedicatedDictSearch); - - /* reach end of unsorted candidates list */ - while ( (matchIndex > unsortLimit) - && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK) - && (nbCandidates > 1) ) { - DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted", - matchIndex); - *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */ - previousCandidate = matchIndex; - matchIndex = *nextCandidate; - nextCandidate = bt + 2*(matchIndex&btMask); - unsortedMark = bt + 2*(matchIndex&btMask) + 1; - nbCandidates --; - } - - /* nullify last candidate if it's still unsorted - * simplification, detrimental to compression ratio, beneficial for speed */ - if ( (matchIndex > unsortLimit) - && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) { - DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u", - matchIndex); - *nextCandidate = *unsortedMark = 0; - } - - /* batch sort stacked candidates */ - matchIndex = previousCandidate; - while (matchIndex) { /* will end on matchIndex == 0 */ - U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1; - U32 const nextCandidateIdx = *nextCandidateIdxPtr; - ZSTD_insertDUBT1(ms, matchIndex, iend, - nbCandidates, unsortLimit, dictMode); - matchIndex = nextCandidateIdx; - nbCandidates++; - } - - /* find longest match */ - { size_t commonLengthSmaller = 0, commonLengthLarger = 0; - const BYTE* const dictBase = ms->window.dictBase; - const U32 dictLimit = ms->window.dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const prefixStart = base + dictLimit; - U32* smallerPtr = bt + 2*(curr&btMask); - U32* largerPtr = bt + 2*(curr&btMask) + 1; - U32 matchEndIdx = curr + 8 + 1; - U32 dummy32; /* to be nullified at the end */ - size_t bestLength = 0; - - matchIndex = hashTable[h]; - hashTable[h] = curr; /* Update Hash Table */ - - for (; nbCompares && (matchIndex > windowLow); --nbCompares) { - U32* const nextPtr = bt + 2*(matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - const BYTE* match; - - if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) { - match = base + matchIndex; - matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); - } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); - if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > bestLength) { - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (U32)matchLength; - if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) ) - bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex); - if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ - if (dictMode == ZSTD_dictMatchState) { - nbCompares = 0; /* in addition to avoiding checking any - * further in this loop, make sure we - * skip checking in the dictionary. */ - } - break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } - } - - if (match[matchLength] < ip[matchLength]) { - /* match is smaller than current */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ - matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ - } else { - /* match is larger than current */ - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } } - - *smallerPtr = *largerPtr = 0; - - assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ - if (dictMode == ZSTD_dictMatchState && nbCompares) { - bestLength = ZSTD_DUBT_findBetterDictMatch( - ms, ip, iend, - offBasePtr, bestLength, nbCompares, - mls, dictMode); - } - - assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */ - ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ - if (bestLength >= MINMATCH) { - U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offBasePtr); (void)mIndex; - DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)", - curr, (U32)bestLength, (U32)*offBasePtr, mIndex); - } - return bestLength; - } -} - - -/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, - const BYTE* const ip, const BYTE* const iLimit, - size_t* offBasePtr, - const U32 mls /* template */, - const ZSTD_dictMode_e dictMode) -{ - DEBUGLOG(7, "ZSTD_BtFindBestMatch"); - if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ - ZSTD_updateDUBT(ms, ip, iLimit, mls); - return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode); -} - -/*********************************** -* Dedicated dict search -***********************************/ - -void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip) -{ - const BYTE* const base = ms->window.base; - U32 const target = (U32)(ip - base); - U32* const hashTable = ms->hashTable; - U32* const chainTable = ms->chainTable; - U32 const chainSize = 1 << ms->cParams.chainLog; - U32 idx = ms->nextToUpdate; - U32 const minChain = chainSize < target - idx ? target - chainSize : idx; - U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG; - U32 const cacheSize = bucketSize - 1; - U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize; - U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts; - - /* We know the hashtable is oversized by a factor of `bucketSize`. - * We are going to temporarily pretend `bucketSize == 1`, keeping only a - * single entry. We will use the rest of the space to construct a temporary - * chaintable. - */ - U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG; - U32* const tmpHashTable = hashTable; - U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog); - U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog; - U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx; - U32 hashIdx; - - assert(ms->cParams.chainLog <= 24); - assert(ms->cParams.hashLog > ms->cParams.chainLog); - assert(idx != 0); - assert(tmpMinChain <= minChain); - - /* fill conventional hash table and conventional chain table */ - for ( ; idx < target; idx++) { - U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch); - if (idx >= tmpMinChain) { - tmpChainTable[idx - tmpMinChain] = hashTable[h]; - } - tmpHashTable[h] = idx; - } - - /* sort chains into ddss chain table */ - { - U32 chainPos = 0; - for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) { - U32 count; - U32 countBeyondMinChain = 0; - U32 i = tmpHashTable[hashIdx]; - for (count = 0; i >= tmpMinChain && count < cacheSize; count++) { - /* skip through the chain to the first position that won't be - * in the hash cache bucket */ - if (i < minChain) { - countBeyondMinChain++; - } - i = tmpChainTable[i - tmpMinChain]; - } - if (count == cacheSize) { - for (count = 0; count < chainLimit;) { - if (i < minChain) { - if (!i || ++countBeyondMinChain > cacheSize) { - /* only allow pulling `cacheSize` number of entries - * into the cache or chainTable beyond `minChain`, - * to replace the entries pulled out of the - * chainTable into the cache. This lets us reach - * back further without increasing the total number - * of entries in the chainTable, guaranteeing the - * DDSS chain table will fit into the space - * allocated for the regular one. */ - break; - } - } - chainTable[chainPos++] = i; - count++; - if (i < tmpMinChain) { - break; - } - i = tmpChainTable[i - tmpMinChain]; - } - } else { - count = 0; - } - if (count) { - tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count; - } else { - tmpHashTable[hashIdx] = 0; - } - } - assert(chainPos <= chainSize); /* I believe this is guaranteed... */ - } - - /* move chain pointers into the last entry of each hash bucket */ - for (hashIdx = (1 << hashLog); hashIdx; ) { - U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG; - U32 const chainPackedPointer = tmpHashTable[hashIdx]; - U32 i; - for (i = 0; i < cacheSize; i++) { - hashTable[bucketIdx + i] = 0; - } - hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer; - } - - /* fill the buckets of the hash table */ - for (idx = ms->nextToUpdate; idx < target; idx++) { - U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch) - << ZSTD_LAZY_DDSS_BUCKET_LOG; - U32 i; - /* Shift hash cache down 1. */ - for (i = cacheSize - 1; i; i--) - hashTable[h + i] = hashTable[h + i - 1]; - hashTable[h] = idx; - } - - ms->nextToUpdate = target; -} - -/* Returns the longest match length found in the dedicated dict search structure. - * If none are longer than the argument ml, then ml will be returned. - */ -FORCE_INLINE_TEMPLATE -size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts, - const ZSTD_matchState_t* const dms, - const BYTE* const ip, const BYTE* const iLimit, - const BYTE* const prefixStart, const U32 curr, - const U32 dictLimit, const size_t ddsIdx) { - const U32 ddsLowestIndex = dms->window.dictLimit; - const BYTE* const ddsBase = dms->window.base; - const BYTE* const ddsEnd = dms->window.nextSrc; - const U32 ddsSize = (U32)(ddsEnd - ddsBase); - const U32 ddsIndexDelta = dictLimit - ddsSize; - const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG); - const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1; - U32 ddsAttempt; - U32 matchIndex; - - for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) { - PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]); - } - - { - U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; - U32 const chainIndex = chainPackedPointer >> 8; - - PREFETCH_L1(&dms->chainTable[chainIndex]); - } - - for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) { - size_t currentMl=0; - const BYTE* match; - matchIndex = dms->hashTable[ddsIdx + ddsAttempt]; - match = ddsBase + matchIndex; - - if (!matchIndex) { - return ml; - } - - /* guaranteed by table construction */ - (void)ddsLowestIndex; - assert(matchIndex >= ddsLowestIndex); - assert(match+4 <= ddsEnd); - if (MEM_read32(match) == MEM_read32(ip)) { - /* assumption : matchIndex <= dictLimit-4 (by table construction) */ - currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4; - } - - /* save best solution */ - if (currentMl > ml) { - ml = currentMl; - *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta)); - if (ip+currentMl == iLimit) { - /* best possible, avoids read overflow on next attempt */ - return ml; - } - } - } - - { - U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; - U32 chainIndex = chainPackedPointer >> 8; - U32 const chainLength = chainPackedPointer & 0xFF; - U32 const chainAttempts = nbAttempts - ddsAttempt; - U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts; - U32 chainAttempt; - - for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) { - PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]); - } - - for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) { - size_t currentMl=0; - const BYTE* match; - matchIndex = dms->chainTable[chainIndex]; - match = ddsBase + matchIndex; - - /* guaranteed by table construction */ - assert(matchIndex >= ddsLowestIndex); - assert(match+4 <= ddsEnd); - if (MEM_read32(match) == MEM_read32(ip)) { - /* assumption : matchIndex <= dictLimit-4 (by table construction) */ - currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4; - } - - /* save best solution */ - if (currentMl > ml) { - ml = currentMl; - *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta)); - if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ - } - } - } - return ml; -} - - -/* ********************************* -* Hash Chain -***********************************/ -#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)] - -/* Update chains up to ip (excluded) - Assumption : always within prefix (i.e. not within extDict) */ -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -U32 ZSTD_insertAndFindFirstIndex_internal( - ZSTD_matchState_t* ms, - const ZSTD_compressionParameters* const cParams, - const BYTE* ip, U32 const mls, U32 const lazySkipping) -{ - U32* const hashTable = ms->hashTable; - const U32 hashLog = cParams->hashLog; - U32* const chainTable = ms->chainTable; - const U32 chainMask = (1 << cParams->chainLog) - 1; - const BYTE* const base = ms->window.base; - const U32 target = (U32)(ip - base); - U32 idx = ms->nextToUpdate; - - while(idx < target) { /* catch up */ - size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); - NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; - hashTable[h] = idx; - idx++; - /* Stop inserting every position when in the lazy skipping mode. */ - if (lazySkipping) - break; - } - - ms->nextToUpdate = target; - return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; -} - -U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { - const ZSTD_compressionParameters* const cParams = &ms->cParams; - return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0); -} - -/* inlining is important to hardwire a hot branch (template emulation) */ -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_HcFindBestMatch( - ZSTD_matchState_t* ms, - const BYTE* const ip, const BYTE* const iLimit, - size_t* offsetPtr, - const U32 mls, const ZSTD_dictMode_e dictMode) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const chainTable = ms->chainTable; - const U32 chainSize = (1 << cParams->chainLog); - const U32 chainMask = chainSize-1; - const BYTE* const base = ms->window.base; - const BYTE* const dictBase = ms->window.dictBase; - const U32 dictLimit = ms->window.dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const U32 curr = (U32)(ip-base); - const U32 maxDistance = 1U << cParams->windowLog; - const U32 lowestValid = ms->window.lowLimit; - const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; - const U32 isDictionary = (ms->loadedDictEnd != 0); - const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance; - const U32 minChain = curr > chainSize ? curr - chainSize : 0; - U32 nbAttempts = 1U << cParams->searchLog; - size_t ml=4-1; - - const ZSTD_matchState_t* const dms = ms->dictMatchState; - const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch - ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0; - const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch - ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0; - - U32 matchIndex; - - if (dictMode == ZSTD_dedicatedDictSearch) { - const U32* entry = &dms->hashTable[ddsIdx]; - PREFETCH_L1(entry); - } - - /* HC4 match finder */ - matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, ms->lazySkipping); - - for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) { - size_t currentMl=0; - if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { - const BYTE* const match = base + matchIndex; - assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ - /* read 4B starting from (match + ml + 1 - sizeof(U32)) */ - if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */ - currentMl = ZSTD_count(ip, match, iLimit); - } else { - const BYTE* const match = dictBase + matchIndex; - assert(match+4 <= dictEnd); - if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ - currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; - } - - /* save best solution */ - if (currentMl > ml) { - ml = currentMl; - *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); - if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ - } - - if (matchIndex <= minChain) break; - matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); - } - - assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ - if (dictMode == ZSTD_dedicatedDictSearch) { - ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms, - ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); - } else if (dictMode == ZSTD_dictMatchState) { - const U32* const dmsChainTable = dms->chainTable; - const U32 dmsChainSize = (1 << dms->cParams.chainLog); - const U32 dmsChainMask = dmsChainSize - 1; - const U32 dmsLowestIndex = dms->window.dictLimit; - const BYTE* const dmsBase = dms->window.base; - const BYTE* const dmsEnd = dms->window.nextSrc; - const U32 dmsSize = (U32)(dmsEnd - dmsBase); - const U32 dmsIndexDelta = dictLimit - dmsSize; - const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0; - - matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)]; - - for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) { - size_t currentMl=0; - const BYTE* const match = dmsBase + matchIndex; - assert(match+4 <= dmsEnd); - if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ - currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4; - - /* save best solution */ - if (currentMl > ml) { - ml = currentMl; - assert(curr > matchIndex + dmsIndexDelta); - *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta)); - if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ - } - - if (matchIndex <= dmsMinChain) break; - - matchIndex = dmsChainTable[matchIndex & dmsChainMask]; - } - } - - return ml; -} - -/* ********************************* -* (SIMD) Row-based matchfinder -***********************************/ -/* Constants for row-based hash */ -#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1) -#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */ - -#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1) - -typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */ - -/* ZSTD_VecMask_next(): - * Starting from the LSB, returns the idx of the next non-zero bit. - * Basically counting the nb of trailing zeroes. - */ -MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) { - return ZSTD_countTrailingZeros64(val); -} - -/* ZSTD_row_nextIndex(): - * Returns the next index to insert at within a tagTable row, and updates the "head" - * value to reflect the update. Essentially cycles backwards from [1, {entries per row}) - */ -FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) { - U32 next = (*tagRow-1) & rowMask; - next += (next == 0) ? rowMask : 0; /* skip first position */ - *tagRow = (BYTE)next; - return next; -} - -/* ZSTD_isAligned(): - * Checks that a pointer is aligned to "align" bytes which must be a power of 2. - */ -MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) { - assert((align & (align - 1)) == 0); - return (((size_t)ptr) & (align - 1)) == 0; -} - -/* ZSTD_row_prefetch(): - * Performs prefetching for the hashTable and tagTable at a given row. - */ -FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) { - PREFETCH_L1(hashTable + relRow); - if (rowLog >= 5) { - PREFETCH_L1(hashTable + relRow + 16); - /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */ - } - PREFETCH_L1(tagTable + relRow); - if (rowLog == 6) { - PREFETCH_L1(tagTable + relRow + 32); - } - assert(rowLog == 4 || rowLog == 5 || rowLog == 6); - assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */ - assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */ -} - -/* ZSTD_row_fillHashCache(): - * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries, - * but not beyond iLimit. - */ -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base, - U32 const rowLog, U32 const mls, - U32 idx, const BYTE* const iLimit) -{ - U32 const* const hashTable = ms->hashTable; - BYTE const* const tagTable = ms->tagTable; - U32 const hashLog = ms->rowHashLog; - U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1); - U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch); - - for (; idx < lim; ++idx) { - U32 const hash = (U32)ZSTD_hashPtrSalted(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt); - U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; - ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); - ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash; - } - - DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1], - ms->hashCache[2], ms->hashCache[3], ms->hashCache[4], - ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]); -} - -/* ZSTD_row_nextCachedHash(): - * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at - * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable. - */ -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable, - BYTE const* tagTable, BYTE const* base, - U32 idx, U32 const hashLog, - U32 const rowLog, U32 const mls, - U64 const hashSalt) -{ - U32 const newHash = (U32)ZSTD_hashPtrSalted(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt); - U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; - ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); - { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK]; - cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash; - return hash; - } -} - -/* ZSTD_row_update_internalImpl(): - * Updates the hash table with positions starting from updateStartIdx until updateEndIdx. - */ -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms, - U32 updateStartIdx, U32 const updateEndIdx, - U32 const mls, U32 const rowLog, - U32 const rowMask, U32 const useCache) -{ - U32* const hashTable = ms->hashTable; - BYTE* const tagTable = ms->tagTable; - U32 const hashLog = ms->rowHashLog; - const BYTE* const base = ms->window.base; - - DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx); - for (; updateStartIdx < updateEndIdx; ++updateStartIdx) { - U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt) - : (U32)ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt); - U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; - U32* const row = hashTable + relRow; - BYTE* tagRow = tagTable + relRow; - U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); - - assert(hash == ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt)); - tagRow[pos] = hash & ZSTD_ROW_HASH_TAG_MASK; - row[pos] = updateStartIdx; - } -} - -/* ZSTD_row_update_internal(): - * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate. - * Skips sections of long matches as is necessary. - */ -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip, - U32 const mls, U32 const rowLog, - U32 const rowMask, U32 const useCache) -{ - U32 idx = ms->nextToUpdate; - const BYTE* const base = ms->window.base; - const U32 target = (U32)(ip - base); - const U32 kSkipThreshold = 384; - const U32 kMaxMatchStartPositionsToUpdate = 96; - const U32 kMaxMatchEndPositionsToUpdate = 32; - - if (useCache) { - /* Only skip positions when using hash cache, i.e. - * if we are loading a dict, don't skip anything. - * If we decide to skip, then we only update a set number - * of positions at the beginning and end of the match. - */ - if (UNLIKELY(target - idx > kSkipThreshold)) { - U32 const bound = idx + kMaxMatchStartPositionsToUpdate; - ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache); - idx = target - kMaxMatchEndPositionsToUpdate; - ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1); - } - } - assert(target >= idx); - ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache); - ms->nextToUpdate = target; -} - -/* ZSTD_row_update(): - * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary - * processing. - */ -void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) { - const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - const U32 rowMask = (1u << rowLog) - 1; - const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */); - - DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog); - ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */); -} - -/* Returns the mask width of bits group of which will be set to 1. Given not all - * architectures have easy movemask instruction, this helps to iterate over - * groups of bits easier and faster. - */ -FORCE_INLINE_TEMPLATE U32 -ZSTD_row_matchMaskGroupWidth(const U32 rowEntries) -{ - assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); - assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES); - (void)rowEntries; -#if defined(ZSTD_ARCH_ARM_NEON) - /* NEON path only works for little endian */ - if (!MEM_isLittleEndian()) { - return 1; - } - if (rowEntries == 16) { - return 4; - } - if (rowEntries == 32) { - return 2; - } - if (rowEntries == 64) { - return 1; - } -#endif - return 1; -} - -#if defined(ZSTD_ARCH_X86_SSE2) -FORCE_INLINE_TEMPLATE ZSTD_VecMask -ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head) -{ - const __m128i comparisonMask = _mm_set1_epi8((char)tag); - int matches[4] = {0}; - int i; - assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4); - for (i=0; i> chunkSize; - do { - size_t chunk = MEM_readST(&src[i]); - chunk ^= splatChar; - chunk = (((chunk | x80) - x01) | chunk) & x80; - matches <<= chunkSize; - matches |= (chunk * extractMagic) >> shiftAmount; - i -= chunkSize; - } while (i >= 0); - } else { /* big endian: reverse bits during extraction */ - const size_t msb = xFF ^ (xFF >> 1); - const size_t extractMagic = (msb / 0x1FF) | msb; - do { - size_t chunk = MEM_readST(&src[i]); - chunk ^= splatChar; - chunk = (((chunk | x80) - x01) | chunk) & x80; - matches <<= chunkSize; - matches |= ((chunk >> 7) * extractMagic) >> shiftAmount; - i -= chunkSize; - } while (i >= 0); - } - matches = ~matches; - if (rowEntries == 16) { - return ZSTD_rotateRight_U16((U16)matches, headGrouped); - } else if (rowEntries == 32) { - return ZSTD_rotateRight_U32((U32)matches, headGrouped); - } else { - return ZSTD_rotateRight_U64((U64)matches, headGrouped); - } - } -#endif -} - -/* The high-level approach of the SIMD row based match finder is as follows: - * - Figure out where to insert the new entry: - * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index. - * - The hash is salted by a value that changes on every context reset, so when the same table is used - * we will avoid collisions that would otherwise slow us down by introducing phantom matches. - * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines - * which row to insert into. - * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can - * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes - * per row). - * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and - * generate a bitfield that we can cycle through to check the collisions in the hash table. - * - Pick the longest match. - * - Insert the tag into the equivalent row and position in the tagTable. - */ -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_RowFindBestMatch( - ZSTD_matchState_t* ms, - const BYTE* const ip, const BYTE* const iLimit, - size_t* offsetPtr, - const U32 mls, const ZSTD_dictMode_e dictMode, - const U32 rowLog) -{ - U32* const hashTable = ms->hashTable; - BYTE* const tagTable = ms->tagTable; - U32* const hashCache = ms->hashCache; - const U32 hashLog = ms->rowHashLog; - const ZSTD_compressionParameters* const cParams = &ms->cParams; - const BYTE* const base = ms->window.base; - const BYTE* const dictBase = ms->window.dictBase; - const U32 dictLimit = ms->window.dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const U32 curr = (U32)(ip-base); - const U32 maxDistance = 1U << cParams->windowLog; - const U32 lowestValid = ms->window.lowLimit; - const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; - const U32 isDictionary = (ms->loadedDictEnd != 0); - const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance; - const U32 rowEntries = (1U << rowLog); - const U32 rowMask = rowEntries - 1; - const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */ - const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries); - const U64 hashSalt = ms->hashSalt; - U32 nbAttempts = 1U << cappedSearchLog; - size_t ml=4-1; - U32 hash; - - /* DMS/DDS variables that may be referenced laster */ - const ZSTD_matchState_t* const dms = ms->dictMatchState; - - /* Initialize the following variables to satisfy static analyzer */ - size_t ddsIdx = 0; - U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */ - U32 dmsTag = 0; - U32* dmsRow = NULL; - BYTE* dmsTagRow = NULL; - - if (dictMode == ZSTD_dedicatedDictSearch) { - const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG; - { /* Prefetch DDS hashtable entry */ - ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG; - PREFETCH_L1(&dms->hashTable[ddsIdx]); - } - ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0; - } - - if (dictMode == ZSTD_dictMatchState) { - /* Prefetch DMS rows */ - U32* const dmsHashTable = dms->hashTable; - BYTE* const dmsTagTable = dms->tagTable; - U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls); - U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; - dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK; - dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow); - dmsRow = dmsHashTable + dmsRelRow; - ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog); - } - - /* Update the hashTable and tagTable up to (but not including) ip */ - if (!ms->lazySkipping) { - ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */); - hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls, hashSalt); - } else { - /* Stop inserting every position when in the lazy skipping mode. - * The hash cache is also not kept up to date in this mode. - */ - hash = (U32)ZSTD_hashPtrSalted(ip, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt); - ms->nextToUpdate = curr; - } - ms->hashSaltEntropy += hash; /* collect salt entropy */ - - { /* Get the hash for ip, compute the appropriate row */ - U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; - U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK; - U32* const row = hashTable + relRow; - BYTE* tagRow = (BYTE*)(tagTable + relRow); - U32 const headGrouped = (*tagRow & rowMask) * groupWidth; - U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; - size_t numMatches = 0; - size_t currMatch = 0; - ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries); - - /* Cycle through the matches and prefetch */ - for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) { - U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; - U32 const matchIndex = row[matchPos]; - if(matchPos == 0) continue; - assert(numMatches < rowEntries); - if (matchIndex < lowLimit) - break; - if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { - PREFETCH_L1(base + matchIndex); - } else { - PREFETCH_L1(dictBase + matchIndex); - } - matchBuffer[numMatches++] = matchIndex; - --nbAttempts; - } - - /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop - in ZSTD_row_update_internal() at the next search. */ - { - U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); - tagRow[pos] = (BYTE)tag; - row[pos] = ms->nextToUpdate++; - } - - /* Return the longest match */ - for (; currMatch < numMatches; ++currMatch) { - U32 const matchIndex = matchBuffer[currMatch]; - size_t currentMl=0; - assert(matchIndex < curr); - assert(matchIndex >= lowLimit); - - if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { - const BYTE* const match = base + matchIndex; - assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ - /* read 4B starting from (match + ml + 1 - sizeof(U32)) */ - if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */ - currentMl = ZSTD_count(ip, match, iLimit); - } else { - const BYTE* const match = dictBase + matchIndex; - assert(match+4 <= dictEnd); - if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ - currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; - } - - /* Save best solution */ - if (currentMl > ml) { - ml = currentMl; - *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); - if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ - } - } - } - - assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ - if (dictMode == ZSTD_dedicatedDictSearch) { - ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms, - ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); - } else if (dictMode == ZSTD_dictMatchState) { - /* TODO: Measure and potentially add prefetching to DMS */ - const U32 dmsLowestIndex = dms->window.dictLimit; - const BYTE* const dmsBase = dms->window.base; - const BYTE* const dmsEnd = dms->window.nextSrc; - const U32 dmsSize = (U32)(dmsEnd - dmsBase); - const U32 dmsIndexDelta = dictLimit - dmsSize; - - { U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth; - U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; - size_t numMatches = 0; - size_t currMatch = 0; - ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries); - - for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) { - U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; - U32 const matchIndex = dmsRow[matchPos]; - if(matchPos == 0) continue; - if (matchIndex < dmsLowestIndex) - break; - PREFETCH_L1(dmsBase + matchIndex); - matchBuffer[numMatches++] = matchIndex; - --nbAttempts; - } - - /* Return the longest match */ - for (; currMatch < numMatches; ++currMatch) { - U32 const matchIndex = matchBuffer[currMatch]; - size_t currentMl=0; - assert(matchIndex >= dmsLowestIndex); - assert(matchIndex < curr); - - { const BYTE* const match = dmsBase + matchIndex; - assert(match+4 <= dmsEnd); - if (MEM_read32(match) == MEM_read32(ip)) - currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4; - } - - if (currentMl > ml) { - ml = currentMl; - assert(curr > matchIndex + dmsIndexDelta); - *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta)); - if (ip+currentMl == iLimit) break; - } - } - } - } - return ml; -} - - -/** - * Generate search functions templated on (dictMode, mls, rowLog). - * These functions are outlined for code size & compilation time. - * ZSTD_searchMax() dispatches to the correct implementation function. - * - * TODO: The start of the search function involves loading and calculating a - * bunch of constants from the ZSTD_matchState_t. These computations could be - * done in an initialization function, and saved somewhere in the match state. - * Then we could pass a pointer to the saved state instead of the match state, - * and avoid duplicate computations. - * - * TODO: Move the match re-winding into searchMax. This improves compression - * ratio, and unlocks further simplifications with the next TODO. - * - * TODO: Try moving the repcode search into searchMax. After the re-winding - * and repcode search are in searchMax, there is no more logic in the match - * finder loop that requires knowledge about the dictMode. So we should be - * able to avoid force inlining it, and we can join the extDict loop with - * the single segment loop. It should go in searchMax instead of its own - * function to avoid having multiple virtual function calls per search. - */ - -#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls -#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls -#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog - -#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE - -#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \ - ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \ - ZSTD_matchState_t* ms, \ - const BYTE* ip, const BYTE* const iLimit, \ - size_t* offBasePtr) \ - { \ - assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \ - } \ - -#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \ - ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \ - ZSTD_matchState_t* ms, \ - const BYTE* ip, const BYTE* const iLimit, \ - size_t* offsetPtr) \ - { \ - assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ - } \ - -#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \ - ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \ - ZSTD_matchState_t* ms, \ - const BYTE* ip, const BYTE* const iLimit, \ - size_t* offsetPtr) \ - { \ - assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ - assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \ - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \ - } \ - -#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \ - X(dictMode, mls, 4) \ - X(dictMode, mls, 5) \ - X(dictMode, mls, 6) - -#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \ - ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \ - ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \ - ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6) - -#define ZSTD_FOR_EACH_MLS(X, dictMode) \ - X(dictMode, 4) \ - X(dictMode, 5) \ - X(dictMode, 6) - -#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \ - X(__VA_ARGS__, noDict) \ - X(__VA_ARGS__, extDict) \ - X(__VA_ARGS__, dictMatchState) \ - X(__VA_ARGS__, dedicatedDictSearch) - -/* Generate row search fns for each combination of (dictMode, mls, rowLog) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN) -/* Generate binary Tree search fns for each combination of (dictMode, mls) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN) -/* Generate hash chain search fns for each combination of (dictMode, mls) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN) - -typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; - -#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \ - case mls: \ - return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); -#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \ - case mls: \ - return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); -#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \ - case rowLog: \ - return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr); - -#define ZSTD_SWITCH_MLS(X, dictMode) \ - switch (mls) { \ - ZSTD_FOR_EACH_MLS(X, dictMode) \ - } - -#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \ - case mls: \ - switch (rowLog) { \ - ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \ - } \ - ZSTD_UNREACHABLE; \ - break; - -#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \ - switch (searchMethod) { \ - case search_hashChain: \ - ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \ - break; \ - case search_binaryTree: \ - ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \ - break; \ - case search_rowHash: \ - ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \ - break; \ - } \ - ZSTD_UNREACHABLE; - -/** - * Searches for the longest match at @p ip. - * Dispatches to the correct implementation function based on the - * (searchMethod, dictMode, mls, rowLog). We use switch statements - * here instead of using an indirect function call through a function - * pointer because after Spectre and Meltdown mitigations, indirect - * function calls can be very costly, especially in the kernel. - * - * NOTE: dictMode and searchMethod should be templated, so those switch - * statements should be optimized out. Only the mls & rowLog switches - * should be left. - * - * @param ms The match state. - * @param ip The position to search at. - * @param iend The end of the input data. - * @param[out] offsetPtr Stores the match offset into this pointer. - * @param mls The minimum search length, in the range [4, 6]. - * @param rowLog The row log (if applicable), in the range [4, 6]. - * @param searchMethod The search method to use (templated). - * @param dictMode The dictMode (templated). - * - * @returns The length of the longest match found, or < mls if no match is found. - * If a match is found its offset is stored in @p offsetPtr. - */ -FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax( - ZSTD_matchState_t* ms, - const BYTE* ip, - const BYTE* iend, - size_t* offsetPtr, - U32 const mls, - U32 const rowLog, - searchMethod_e const searchMethod, - ZSTD_dictMode_e const dictMode) -{ - if (dictMode == ZSTD_noDict) { - ZSTD_SWITCH_SEARCH_METHOD(noDict) - } else if (dictMode == ZSTD_extDict) { - ZSTD_SWITCH_SEARCH_METHOD(extDict) - } else if (dictMode == ZSTD_dictMatchState) { - ZSTD_SWITCH_SEARCH_METHOD(dictMatchState) - } else if (dictMode == ZSTD_dedicatedDictSearch) { - ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch) - } - ZSTD_UNREACHABLE; - return 0; -} - -/* ******************************* -* Common parser - lazy strategy -*********************************/ - -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_compressBlock_lazy_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, - U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize, - const searchMethod_e searchMethod, const U32 depth, - ZSTD_dictMode_e const dictMode) -{ - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8; - const BYTE* const base = ms->window.base; - const U32 prefixLowestIndex = ms->window.dictLimit; - const BYTE* const prefixLowest = base + prefixLowestIndex; - const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); - const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - - U32 offset_1 = rep[0], offset_2 = rep[1]; - U32 offsetSaved1 = 0, offsetSaved2 = 0; - - const int isDMS = dictMode == ZSTD_dictMatchState; - const int isDDS = dictMode == ZSTD_dedicatedDictSearch; - const int isDxS = isDMS || isDDS; - const ZSTD_matchState_t* const dms = ms->dictMatchState; - const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0; - const BYTE* const dictBase = isDxS ? dms->window.base : NULL; - const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL; - const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL; - const U32 dictIndexDelta = isDxS ? - prefixLowestIndex - (U32)(dictEnd - dictBase) : - 0; - const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest)); - - DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod); - ip += (dictAndPrefixLength == 0); - if (dictMode == ZSTD_noDict) { - U32 const curr = (U32)(ip - base); - U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog); - U32 const maxRep = curr - windowLow; - if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0; - } - if (isDxS) { - /* dictMatchState repCode checks don't currently handle repCode == 0 - * disabling. */ - assert(offset_1 <= dictAndPrefixLength); - assert(offset_2 <= dictAndPrefixLength); - } - - /* Reset the lazy skipping state */ - ms->lazySkipping = 0; - - if (searchMethod == search_rowHash) { - ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); - } - - /* Match Loop */ -#if defined(__GNUC__) && defined(__x86_64__) - /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the - * code alignment is perturbed. To fix the instability align the loop on 32-bytes. - */ - __asm__(".p2align 5"); -#endif - while (ip < ilimit) { - size_t matchLength=0; - size_t offBase = REPCODE1_TO_OFFBASE; - const BYTE* start=ip+1; - DEBUGLOG(7, "search baseline (depth 0)"); - - /* check repCode */ - if (isDxS) { - const U32 repIndex = (U32)(ip - base) + 1 - offset_1; - const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch) - && repIndex < prefixLowestIndex) ? - dictBase + (repIndex - dictIndexDelta) : - base + repIndex; - if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; - if (depth==0) goto _storeSequence; - } - } - if ( dictMode == ZSTD_noDict - && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { - matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; - if (depth==0) goto _storeSequence; - } - - /* first search (depth 0) */ - { size_t offbaseFound = 999999999; - size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode); - if (ml2 > matchLength) - matchLength = ml2, start = ip, offBase = offbaseFound; - } - - if (matchLength < 4) { - size_t const step = ((size_t)(ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */; - ip += step; - /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time. - * In this mode we stop inserting every position into our tables, and only insert - * positions that we search, which is one in step positions. - * The exact cutoff is flexible, I've just chosen a number that is reasonably high, - * so we minimize the compression ratio loss in "normal" scenarios. This mode gets - * triggered once we've gone 2KB without finding any matches. - */ - ms->lazySkipping = step > kLazySkippingStep; - continue; - } - - /* let's try to find a better solution */ - if (depth>=1) - while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { - size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; - int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); - if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; - } - if (isDxS) { - const U32 repIndex = (U32)(ip - base) - offset_1; - const BYTE* repMatch = repIndex < prefixLowestIndex ? - dictBase + (repIndex - dictIndexDelta) : - base + repIndex; - if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) - && (MEM_read32(repMatch) == MEM_read32(ip)) ) { - const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; - size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; - int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); - if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; - } - } - { size_t ofbCandidate=999999999; - size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); - if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offBase = ofbCandidate, start = ip; - continue; /* search a better one */ - } } - - /* let's find an even better one */ - if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { - size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; - int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); - if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; - } - if (isDxS) { - const U32 repIndex = (U32)(ip - base) - offset_1; - const BYTE* repMatch = repIndex < prefixLowestIndex ? - dictBase + (repIndex - dictIndexDelta) : - base + repIndex; - if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) - && (MEM_read32(repMatch) == MEM_read32(ip)) ) { - const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; - size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; - int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); - if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; - } - } - { size_t ofbCandidate=999999999; - size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); - if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offBase = ofbCandidate, start = ip; - continue; - } } } - break; /* nothing found : store previous solution */ - } - - /* NOTE: - * Pay attention that `start[-value]` can lead to strange undefined behavior - * notably if `value` is unsigned, resulting in a large positive `-value`. - */ - /* catch up */ - if (OFFBASE_IS_OFFSET(offBase)) { - if (dictMode == ZSTD_noDict) { - while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest)) - && (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */ - { start--; matchLength++; } - } - if (isDxS) { - U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase)); - const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex; - const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest; - while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ - } - offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase); - } - /* store sequence */ -_storeSequence: - { size_t const litLength = (size_t)(start - anchor); - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); - anchor = ip = start + matchLength; - } - if (ms->lazySkipping) { - /* We've found a match, disable lazy skipping mode, and refill the hash cache. */ - if (searchMethod == search_rowHash) { - ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); - } - ms->lazySkipping = 0; - } - - /* check immediate repcode */ - if (isDxS) { - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex = current2 - offset_2; - const BYTE* repMatch = repIndex < prefixLowestIndex ? - dictBase - dictIndexDelta + repIndex : - base + repIndex; - if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) - && (MEM_read32(repMatch) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; - offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); - ip += matchLength; - anchor = ip; - continue; - } - break; - } - } - - if (dictMode == ZSTD_noDict) { - while ( ((ip <= ilimit) & (offset_2>0)) - && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { - /* store sequence */ - matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); - ip += matchLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } } } - - /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), - * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ - offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; - - /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved1; - rep[1] = offset_2 ? offset_2 : offsetSaved2; - - /* Return the last literals size */ - return (size_t)(iend - anchor); -} -#endif /* build exclusions */ - - -#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_greedy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict); -} - -size_t ZSTD_compressBlock_greedy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState); -} - -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch); -} - -size_t ZSTD_compressBlock_greedy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict); -} - -size_t ZSTD_compressBlock_greedy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState); -} - -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch); -} -#endif - -#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_lazy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict); -} - -size_t ZSTD_compressBlock_lazy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState); -} - -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch); -} - -size_t ZSTD_compressBlock_lazy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict); -} - -size_t ZSTD_compressBlock_lazy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState); -} - -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch); -} -#endif - -#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_lazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict); -} - -size_t ZSTD_compressBlock_lazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState); -} - -size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch); -} - -size_t ZSTD_compressBlock_lazy2_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict); -} - -size_t ZSTD_compressBlock_lazy2_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState); -} - -size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch); -} -#endif - -#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_btlazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict); -} - -size_t ZSTD_compressBlock_btlazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState); -} -#endif - -#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_compressBlock_lazy_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, - U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize, - const searchMethod_e searchMethod, const U32 depth) -{ - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8; - const BYTE* const base = ms->window.base; - const U32 dictLimit = ms->window.dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const BYTE* const dictBase = ms->window.dictBase; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const dictStart = dictBase + ms->window.lowLimit; - const U32 windowLog = ms->cParams.windowLog; - const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); - const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - - U32 offset_1 = rep[0], offset_2 = rep[1]; - - DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod); - - /* Reset the lazy skipping state */ - ms->lazySkipping = 0; - - /* init */ - ip += (ip == prefixStart); - if (searchMethod == search_rowHash) { - ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); - } - - /* Match Loop */ -#if defined(__GNUC__) && defined(__x86_64__) - /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the - * code alignment is perturbed. To fix the instability align the loop on 32-bytes. - */ - __asm__(".p2align 5"); -#endif - while (ip < ilimit) { - size_t matchLength=0; - size_t offBase = REPCODE1_TO_OFFBASE; - const BYTE* start=ip+1; - U32 curr = (U32)(ip-base); - - /* check repCode */ - { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog); - const U32 repIndex = (U32)(curr+1 - offset_1); - const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE* const repMatch = repBase + repIndex; - if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) - & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */ - if (MEM_read32(ip+1) == MEM_read32(repMatch)) { - /* repcode detected we should take it */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4; - if (depth==0) goto _storeSequence; - } } - - /* first search (depth 0) */ - { size_t ofbCandidate = 999999999; - size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); - if (ml2 > matchLength) - matchLength = ml2, start = ip, offBase = ofbCandidate; - } - - if (matchLength < 4) { - size_t const step = ((size_t)(ip-anchor) >> kSearchStrength); - ip += step + 1; /* jump faster over incompressible sections */ - /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time. - * In this mode we stop inserting every position into our tables, and only insert - * positions that we search, which is one in step positions. - * The exact cutoff is flexible, I've just chosen a number that is reasonably high, - * so we minimize the compression ratio loss in "normal" scenarios. This mode gets - * triggered once we've gone 2KB without finding any matches. - */ - ms->lazySkipping = step > kLazySkippingStep; - continue; - } - - /* let's try to find a better solution */ - if (depth>=1) - while (ip repIndex >= windowLow` */ - if (MEM_read32(ip) == MEM_read32(repMatch)) { - /* repcode detected */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - int const gain2 = (int)(repLength * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); - if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip; - } } - - /* search match, depth 1 */ - { size_t ofbCandidate = 999999999; - size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); - if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offBase = ofbCandidate, start = ip; - continue; /* search a better one */ - } } - - /* let's find an even better one */ - if ((depth==2) && (ip repIndex >= windowLow` */ - if (MEM_read32(ip) == MEM_read32(repMatch)) { - /* repcode detected */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - int const gain2 = (int)(repLength * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); - if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip; - } } - - /* search match, depth 2 */ - { size_t ofbCandidate = 999999999; - size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); - if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offBase = ofbCandidate, start = ip; - continue; - } } } - break; /* nothing found : store previous solution */ - } - - /* catch up */ - if (OFFBASE_IS_OFFSET(offBase)) { - U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase)); - const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; - const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; - while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ - offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase); - } - - /* store sequence */ -_storeSequence: - { size_t const litLength = (size_t)(start - anchor); - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); - anchor = ip = start + matchLength; - } - if (ms->lazySkipping) { - /* We've found a match, disable lazy skipping mode, and refill the hash cache. */ - if (searchMethod == search_rowHash) { - ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); - } - ms->lazySkipping = 0; - } - - /* check immediate repcode */ - while (ip <= ilimit) { - const U32 repCurrent = (U32)(ip-base); - const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog); - const U32 repIndex = repCurrent - offset_2; - const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE* const repMatch = repBase + repIndex; - if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) - & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ - if (MEM_read32(ip) == MEM_read32(repMatch)) { - /* repcode detected we should take it */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); - ip += matchLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } - break; - } } - - /* Save reps for next block */ - rep[0] = offset_1; - rep[1] = offset_2; - - /* Return the last literals size */ - return (size_t)(iend - anchor); -} -#endif /* build exclusions */ - -#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_greedy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0); -} - -size_t ZSTD_compressBlock_greedy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0); -} -#endif - -#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_lazy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) - -{ - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1); -} - -size_t ZSTD_compressBlock_lazy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) - -{ - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1); -} -#endif - -#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_lazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) - -{ - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2); -} - -size_t ZSTD_compressBlock_lazy2_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) -{ - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2); -} -#endif - -#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_btlazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize) - -{ - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2); -} -#endif diff --git a/zstandard_android/src/compress/zstd_lazy.h b/zstandard_android/src/compress/zstd_lazy.h deleted file mode 100644 index 3635813..0000000 --- a/zstandard_android/src/compress/zstd_lazy.h +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_LAZY_H -#define ZSTD_LAZY_H - -#if defined (__cplusplus) -extern "C" { -#endif - -#include "zstd_compress_internal.h" - -/** - * Dedicated Dictionary Search Structure bucket log. In the - * ZSTD_dedicatedDictSearch mode, the hashTable has - * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just - * one. - */ -#define ZSTD_LAZY_DDSS_BUCKET_LOG 2 - -#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ - -#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) -U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); -void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip); - -void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip); - -void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */ -#endif - -#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_greedy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - -#define ZSTD_COMPRESSBLOCK_GREEDY ZSTD_compressBlock_greedy -#define ZSTD_COMPRESSBLOCK_GREEDY_ROW ZSTD_compressBlock_greedy_row -#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE ZSTD_compressBlock_greedy_dictMatchState -#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW ZSTD_compressBlock_greedy_dictMatchState_row -#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH ZSTD_compressBlock_greedy_dedicatedDictSearch -#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_greedy_dedicatedDictSearch_row -#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT ZSTD_compressBlock_greedy_extDict -#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW ZSTD_compressBlock_greedy_extDict_row -#else -#define ZSTD_COMPRESSBLOCK_GREEDY NULL -#define ZSTD_COMPRESSBLOCK_GREEDY_ROW NULL -#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE NULL -#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW NULL -#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH NULL -#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW NULL -#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT NULL -#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW NULL -#endif - -#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_lazy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - -#define ZSTD_COMPRESSBLOCK_LAZY ZSTD_compressBlock_lazy -#define ZSTD_COMPRESSBLOCK_LAZY_ROW ZSTD_compressBlock_lazy_row -#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE ZSTD_compressBlock_lazy_dictMatchState -#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy_dictMatchState_row -#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy_dedicatedDictSearch -#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy_dedicatedDictSearch_row -#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT ZSTD_compressBlock_lazy_extDict -#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW ZSTD_compressBlock_lazy_extDict_row -#else -#define ZSTD_COMPRESSBLOCK_LAZY NULL -#define ZSTD_COMPRESSBLOCK_LAZY_ROW NULL -#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE NULL -#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW NULL -#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH NULL -#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW NULL -#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT NULL -#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW NULL -#endif - -#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_lazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - -#define ZSTD_COMPRESSBLOCK_LAZY2 ZSTD_compressBlock_lazy2 -#define ZSTD_COMPRESSBLOCK_LAZY2_ROW ZSTD_compressBlock_lazy2_row -#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE ZSTD_compressBlock_lazy2_dictMatchState -#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy2_dictMatchState_row -#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy2_dedicatedDictSearch -#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy2_dedicatedDictSearch_row -#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT ZSTD_compressBlock_lazy2_extDict -#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW ZSTD_compressBlock_lazy2_extDict_row -#else -#define ZSTD_COMPRESSBLOCK_LAZY2 NULL -#define ZSTD_COMPRESSBLOCK_LAZY2_ROW NULL -#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE NULL -#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW NULL -#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH NULL -#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW NULL -#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT NULL -#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW NULL -#endif - -#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_btlazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btlazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btlazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - -#define ZSTD_COMPRESSBLOCK_BTLAZY2 ZSTD_compressBlock_btlazy2 -#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE ZSTD_compressBlock_btlazy2_dictMatchState -#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT ZSTD_compressBlock_btlazy2_extDict -#else -#define ZSTD_COMPRESSBLOCK_BTLAZY2 NULL -#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE NULL -#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT NULL -#endif - - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTD_LAZY_H */ diff --git a/zstandard_android/src/compress/zstd_ldm.c b/zstandard_android/src/compress/zstd_ldm.c deleted file mode 100644 index 17c069f..0000000 --- a/zstandard_android/src/compress/zstd_ldm.c +++ /dev/null @@ -1,730 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#include "zstd_ldm.h" - -#include "../common/debug.h" -#include "../common/xxhash.h" -#include "zstd_fast.h" /* ZSTD_fillHashTable() */ -#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */ -#include "zstd_ldm_geartab.h" - -#define LDM_BUCKET_SIZE_LOG 3 -#define LDM_MIN_MATCH_LENGTH 64 -#define LDM_HASH_RLOG 7 - -typedef struct { - U64 rolling; - U64 stopMask; -} ldmRollingHashState_t; - -/** ZSTD_ldm_gear_init(): - * - * Initializes the rolling hash state such that it will honor the - * settings in params. */ -static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params) -{ - unsigned maxBitsInMask = MIN(params->minMatchLength, 64); - unsigned hashRateLog = params->hashRateLog; - - state->rolling = ~(U32)0; - - /* The choice of the splitting criterion is subject to two conditions: - * 1. it has to trigger on average every 2^(hashRateLog) bytes; - * 2. ideally, it has to depend on a window of minMatchLength bytes. - * - * In the gear hash algorithm, bit n depends on the last n bytes; - * so in order to obtain a good quality splitting criterion it is - * preferable to use bits with high weight. - * - * To match condition 1 we use a mask with hashRateLog bits set - * and, because of the previous remark, we make sure these bits - * have the highest possible weight while still respecting - * condition 2. - */ - if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) { - state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog); - } else { - /* In this degenerate case we simply honor the hash rate. */ - state->stopMask = ((U64)1 << hashRateLog) - 1; - } -} - -/** ZSTD_ldm_gear_reset() - * Feeds [data, data + minMatchLength) into the hash without registering any - * splits. This effectively resets the hash state. This is used when skipping - * over data, either at the beginning of a block, or skipping sections. - */ -static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state, - BYTE const* data, size_t minMatchLength) -{ - U64 hash = state->rolling; - size_t n = 0; - -#define GEAR_ITER_ONCE() do { \ - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \ - n += 1; \ - } while (0) - while (n + 3 < minMatchLength) { - GEAR_ITER_ONCE(); - GEAR_ITER_ONCE(); - GEAR_ITER_ONCE(); - GEAR_ITER_ONCE(); - } - while (n < minMatchLength) { - GEAR_ITER_ONCE(); - } -#undef GEAR_ITER_ONCE -} - -/** ZSTD_ldm_gear_feed(): - * - * Registers in the splits array all the split points found in the first - * size bytes following the data pointer. This function terminates when - * either all the data has been processed or LDM_BATCH_SIZE splits are - * present in the splits array. - * - * Precondition: The splits array must not be full. - * Returns: The number of bytes processed. */ -static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state, - BYTE const* data, size_t size, - size_t* splits, unsigned* numSplits) -{ - size_t n; - U64 hash, mask; - - hash = state->rolling; - mask = state->stopMask; - n = 0; - -#define GEAR_ITER_ONCE() do { \ - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \ - n += 1; \ - if (UNLIKELY((hash & mask) == 0)) { \ - splits[*numSplits] = n; \ - *numSplits += 1; \ - if (*numSplits == LDM_BATCH_SIZE) \ - goto done; \ - } \ - } while (0) - - while (n + 3 < size) { - GEAR_ITER_ONCE(); - GEAR_ITER_ONCE(); - GEAR_ITER_ONCE(); - GEAR_ITER_ONCE(); - } - while (n < size) { - GEAR_ITER_ONCE(); - } - -#undef GEAR_ITER_ONCE - -done: - state->rolling = hash; - return n; -} - -void ZSTD_ldm_adjustParameters(ldmParams_t* params, - ZSTD_compressionParameters const* cParams) -{ - params->windowLog = cParams->windowLog; - ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); - DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); - if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; - if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH; - if (params->hashLog == 0) { - params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG); - assert(params->hashLog <= ZSTD_HASHLOG_MAX); - } - if (params->hashRateLog == 0) { - params->hashRateLog = params->windowLog < params->hashLog - ? 0 - : params->windowLog - params->hashLog; - } - params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); -} - -size_t ZSTD_ldm_getTableSize(ldmParams_t params) -{ - size_t const ldmHSize = ((size_t)1) << params.hashLog; - size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog); - size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog); - size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) - + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t)); - return params.enableLdm == ZSTD_ps_enable ? totalSize : 0; -} - -size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) -{ - return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0; -} - -/** ZSTD_ldm_getBucket() : - * Returns a pointer to the start of the bucket associated with hash. */ -static ldmEntry_t* ZSTD_ldm_getBucket( - ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) -{ - return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); -} - -/** ZSTD_ldm_insertEntry() : - * Insert the entry with corresponding hash into the hash table */ -static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, - size_t const hash, const ldmEntry_t entry, - ldmParams_t const ldmParams) -{ - BYTE* const pOffset = ldmState->bucketOffsets + hash; - unsigned const offset = *pOffset; - - *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry; - *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1)); - -} - -/** ZSTD_ldm_countBackwardsMatch() : - * Returns the number of bytes that match backwards before pIn and pMatch. - * - * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ -static size_t ZSTD_ldm_countBackwardsMatch( - const BYTE* pIn, const BYTE* pAnchor, - const BYTE* pMatch, const BYTE* pMatchBase) -{ - size_t matchLength = 0; - while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) { - pIn--; - pMatch--; - matchLength++; - } - return matchLength; -} - -/** ZSTD_ldm_countBackwardsMatch_2segments() : - * Returns the number of bytes that match backwards from pMatch, - * even with the backwards match spanning 2 different segments. - * - * On reaching `pMatchBase`, start counting from mEnd */ -static size_t ZSTD_ldm_countBackwardsMatch_2segments( - const BYTE* pIn, const BYTE* pAnchor, - const BYTE* pMatch, const BYTE* pMatchBase, - const BYTE* pExtDictStart, const BYTE* pExtDictEnd) -{ - size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase); - if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) { - /* If backwards match is entirely in the extDict or prefix, immediately return */ - return matchLength; - } - DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength); - matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart); - DEBUGLOG(7, "final backwards match length = %zu", matchLength); - return matchLength; -} - -/** ZSTD_ldm_fillFastTables() : - * - * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. - * This is similar to ZSTD_loadDictionaryContent. - * - * The tables for the other strategies are filled within their - * block compressors. */ -static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, - void const* end) -{ - const BYTE* const iend = (const BYTE*)end; - - switch(ms->cParams.strategy) - { - case ZSTD_fast: - ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); - break; - - case ZSTD_dfast: -#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR - ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); -#else - assert(0); /* shouldn't be called: cparams should've been adjusted. */ -#endif - break; - - case ZSTD_greedy: - case ZSTD_lazy: - case ZSTD_lazy2: - case ZSTD_btlazy2: - case ZSTD_btopt: - case ZSTD_btultra: - case ZSTD_btultra2: - break; - default: - assert(0); /* not possible : not a valid strategy id */ - } - - return 0; -} - -void ZSTD_ldm_fillHashTable( - ldmState_t* ldmState, const BYTE* ip, - const BYTE* iend, ldmParams_t const* params) -{ - U32 const minMatchLength = params->minMatchLength; - U32 const hBits = params->hashLog - params->bucketSizeLog; - BYTE const* const base = ldmState->window.base; - BYTE const* const istart = ip; - ldmRollingHashState_t hashState; - size_t* const splits = ldmState->splitIndices; - unsigned numSplits; - - DEBUGLOG(5, "ZSTD_ldm_fillHashTable"); - - ZSTD_ldm_gear_init(&hashState, params); - while (ip < iend) { - size_t hashed; - unsigned n; - - numSplits = 0; - hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits); - - for (n = 0; n < numSplits; n++) { - if (ip + splits[n] >= istart + minMatchLength) { - BYTE const* const split = ip + splits[n] - minMatchLength; - U64 const xxhash = XXH64(split, minMatchLength, 0); - U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1)); - ldmEntry_t entry; - - entry.offset = (U32)(split - base); - entry.checksum = (U32)(xxhash >> 32); - ZSTD_ldm_insertEntry(ldmState, hash, entry, *params); - } - } - - ip += hashed; - } -} - - -/** ZSTD_ldm_limitTableUpdate() : - * - * Sets cctx->nextToUpdate to a position corresponding closer to anchor - * if it is far way - * (after a long match, only update tables a limited amount). */ -static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) -{ - U32 const curr = (U32)(anchor - ms->window.base); - if (curr > ms->nextToUpdate + 1024) { - ms->nextToUpdate = - curr - MIN(512, curr - ms->nextToUpdate - 1024); - } -} - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_ldm_generateSequences_internal( - ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, - ldmParams_t const* params, void const* src, size_t srcSize) -{ - /* LDM parameters */ - int const extDict = ZSTD_window_hasExtDict(ldmState->window); - U32 const minMatchLength = params->minMatchLength; - U32 const entsPerBucket = 1U << params->bucketSizeLog; - U32 const hBits = params->hashLog - params->bucketSizeLog; - /* Prefix and extDict parameters */ - U32 const dictLimit = ldmState->window.dictLimit; - U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit; - BYTE const* const base = ldmState->window.base; - BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL; - BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL; - BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL; - BYTE const* const lowPrefixPtr = base + dictLimit; - /* Input bounds */ - BYTE const* const istart = (BYTE const*)src; - BYTE const* const iend = istart + srcSize; - BYTE const* const ilimit = iend - HASH_READ_SIZE; - /* Input positions */ - BYTE const* anchor = istart; - BYTE const* ip = istart; - /* Rolling hash state */ - ldmRollingHashState_t hashState; - /* Arrays for staged-processing */ - size_t* const splits = ldmState->splitIndices; - ldmMatchCandidate_t* const candidates = ldmState->matchCandidates; - unsigned numSplits; - - if (srcSize < minMatchLength) - return iend - anchor; - - /* Initialize the rolling hash state with the first minMatchLength bytes */ - ZSTD_ldm_gear_init(&hashState, params); - ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength); - ip += minMatchLength; - - while (ip < ilimit) { - size_t hashed; - unsigned n; - - numSplits = 0; - hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip, - splits, &numSplits); - - for (n = 0; n < numSplits; n++) { - BYTE const* const split = ip + splits[n] - minMatchLength; - U64 const xxhash = XXH64(split, minMatchLength, 0); - U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1)); - - candidates[n].split = split; - candidates[n].hash = hash; - candidates[n].checksum = (U32)(xxhash >> 32); - candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params); - PREFETCH_L1(candidates[n].bucket); - } - - for (n = 0; n < numSplits; n++) { - size_t forwardMatchLength = 0, backwardMatchLength = 0, - bestMatchLength = 0, mLength; - U32 offset; - BYTE const* const split = candidates[n].split; - U32 const checksum = candidates[n].checksum; - U32 const hash = candidates[n].hash; - ldmEntry_t* const bucket = candidates[n].bucket; - ldmEntry_t const* cur; - ldmEntry_t const* bestEntry = NULL; - ldmEntry_t newEntry; - - newEntry.offset = (U32)(split - base); - newEntry.checksum = checksum; - - /* If a split point would generate a sequence overlapping with - * the previous one, we merely register it in the hash table and - * move on */ - if (split < anchor) { - ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); - continue; - } - - for (cur = bucket; cur < bucket + entsPerBucket; cur++) { - size_t curForwardMatchLength, curBackwardMatchLength, - curTotalMatchLength; - if (cur->checksum != checksum || cur->offset <= lowestIndex) { - continue; - } - if (extDict) { - BYTE const* const curMatchBase = - cur->offset < dictLimit ? dictBase : base; - BYTE const* const pMatch = curMatchBase + cur->offset; - BYTE const* const matchEnd = - cur->offset < dictLimit ? dictEnd : iend; - BYTE const* const lowMatchPtr = - cur->offset < dictLimit ? dictStart : lowPrefixPtr; - curForwardMatchLength = - ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr); - if (curForwardMatchLength < minMatchLength) { - continue; - } - curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments( - split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd); - } else { /* !extDict */ - BYTE const* const pMatch = base + cur->offset; - curForwardMatchLength = ZSTD_count(split, pMatch, iend); - if (curForwardMatchLength < minMatchLength) { - continue; - } - curBackwardMatchLength = - ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr); - } - curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; - - if (curTotalMatchLength > bestMatchLength) { - bestMatchLength = curTotalMatchLength; - forwardMatchLength = curForwardMatchLength; - backwardMatchLength = curBackwardMatchLength; - bestEntry = cur; - } - } - - /* No match found -- insert an entry into the hash table - * and process the next candidate match */ - if (bestEntry == NULL) { - ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); - continue; - } - - /* Match found */ - offset = (U32)(split - base) - bestEntry->offset; - mLength = forwardMatchLength + backwardMatchLength; - { - rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size; - - /* Out of sequence storage */ - if (rawSeqStore->size == rawSeqStore->capacity) - return ERROR(dstSize_tooSmall); - seq->litLength = (U32)(split - backwardMatchLength - anchor); - seq->matchLength = (U32)mLength; - seq->offset = offset; - rawSeqStore->size++; - } - - /* Insert the current entry into the hash table --- it must be - * done after the previous block to avoid clobbering bestEntry */ - ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); - - anchor = split + forwardMatchLength; - - /* If we find a match that ends after the data that we've hashed - * then we have a repeating, overlapping, pattern. E.g. all zeros. - * If one repetition of the pattern matches our `stopMask` then all - * repetitions will. We don't need to insert them all into out table, - * only the first one. So skip over overlapping matches. - * This is a major speed boost (20x) for compressing a single byte - * repeated, when that byte ends up in the table. - */ - if (anchor > ip + hashed) { - ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength); - /* Continue the outer loop at anchor (ip + hashed == anchor). */ - ip = anchor - hashed; - break; - } - } - - ip += hashed; - } - - return iend - anchor; -} - -/*! ZSTD_ldm_reduceTable() : - * reduce table indexes by `reducerValue` */ -static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, - U32 const reducerValue) -{ - U32 u; - for (u = 0; u < size; u++) { - if (table[u].offset < reducerValue) table[u].offset = 0; - else table[u].offset -= reducerValue; - } -} - -size_t ZSTD_ldm_generateSequences( - ldmState_t* ldmState, rawSeqStore_t* sequences, - ldmParams_t const* params, void const* src, size_t srcSize) -{ - U32 const maxDist = 1U << params->windowLog; - BYTE const* const istart = (BYTE const*)src; - BYTE const* const iend = istart + srcSize; - size_t const kMaxChunkSize = 1 << 20; - size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0); - size_t chunk; - size_t leftoverSize = 0; - - assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize); - /* Check that ZSTD_window_update() has been called for this chunk prior - * to passing it to this function. - */ - assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize); - /* The input could be very large (in zstdmt), so it must be broken up into - * chunks to enforce the maximum distance and handle overflow correction. - */ - assert(sequences->pos <= sequences->size); - assert(sequences->size <= sequences->capacity); - for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) { - BYTE const* const chunkStart = istart + chunk * kMaxChunkSize; - size_t const remaining = (size_t)(iend - chunkStart); - BYTE const *const chunkEnd = - (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize; - size_t const chunkSize = chunkEnd - chunkStart; - size_t newLeftoverSize; - size_t const prevSize = sequences->size; - - assert(chunkStart < iend); - /* 1. Perform overflow correction if necessary. */ - if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) { - U32 const ldmHSize = 1U << params->hashLog; - U32 const correction = ZSTD_window_correctOverflow( - &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart); - ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); - /* invalidate dictionaries on overflow correction */ - ldmState->loadedDictEnd = 0; - } - /* 2. We enforce the maximum offset allowed. - * - * kMaxChunkSize should be small enough that we don't lose too much of - * the window through early invalidation. - * TODO: * Test the chunk size. - * * Try invalidation after the sequence generation and test the - * offset against maxDist directly. - * - * NOTE: Because of dictionaries + sequence splitting we MUST make sure - * that any offset used is valid at the END of the sequence, since it may - * be split into two sequences. This condition holds when using - * ZSTD_window_enforceMaxDist(), but if we move to checking offsets - * against maxDist directly, we'll have to carefully handle that case. - */ - ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL); - /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */ - newLeftoverSize = ZSTD_ldm_generateSequences_internal( - ldmState, sequences, params, chunkStart, chunkSize); - if (ZSTD_isError(newLeftoverSize)) - return newLeftoverSize; - /* 4. We add the leftover literals from previous iterations to the first - * newly generated sequence, or add the `newLeftoverSize` if none are - * generated. - */ - /* Prepend the leftover literals from the last call */ - if (prevSize < sequences->size) { - sequences->seq[prevSize].litLength += (U32)leftoverSize; - leftoverSize = newLeftoverSize; - } else { - assert(newLeftoverSize == chunkSize); - leftoverSize += chunkSize; - } - } - return 0; -} - -void -ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) -{ - while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { - rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; - if (srcSize <= seq->litLength) { - /* Skip past srcSize literals */ - seq->litLength -= (U32)srcSize; - return; - } - srcSize -= seq->litLength; - seq->litLength = 0; - if (srcSize < seq->matchLength) { - /* Skip past the first srcSize of the match */ - seq->matchLength -= (U32)srcSize; - if (seq->matchLength < minMatch) { - /* The match is too short, omit it */ - if (rawSeqStore->pos + 1 < rawSeqStore->size) { - seq[1].litLength += seq[0].matchLength; - } - rawSeqStore->pos++; - } - return; - } - srcSize -= seq->matchLength; - seq->matchLength = 0; - rawSeqStore->pos++; - } -} - -/** - * If the sequence length is longer than remaining then the sequence is split - * between this block and the next. - * - * Returns the current sequence to handle, or if the rest of the block should - * be literals, it returns a sequence with offset == 0. - */ -static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, - U32 const remaining, U32 const minMatch) -{ - rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; - assert(sequence.offset > 0); - /* Likely: No partial sequence */ - if (remaining >= sequence.litLength + sequence.matchLength) { - rawSeqStore->pos++; - return sequence; - } - /* Cut the sequence short (offset == 0 ==> rest is literals). */ - if (remaining <= sequence.litLength) { - sequence.offset = 0; - } else if (remaining < sequence.litLength + sequence.matchLength) { - sequence.matchLength = remaining - sequence.litLength; - if (sequence.matchLength < minMatch) { - sequence.offset = 0; - } - } - /* Skip past `remaining` bytes for the future sequences. */ - ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); - return sequence; -} - -void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { - U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); - while (currPos && rawSeqStore->pos < rawSeqStore->size) { - rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; - if (currPos >= currSeq.litLength + currSeq.matchLength) { - currPos -= currSeq.litLength + currSeq.matchLength; - rawSeqStore->pos++; - } else { - rawSeqStore->posInSequence = currPos; - break; - } - } - if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) { - rawSeqStore->posInSequence = 0; - } -} - -size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - ZSTD_paramSwitch_e useRowMatchFinder, - void const* src, size_t srcSize) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - unsigned const minMatch = cParams->minMatch; - ZSTD_blockCompressor const blockCompressor = - ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms)); - /* Input bounds */ - BYTE const* const istart = (BYTE const*)src; - BYTE const* const iend = istart + srcSize; - /* Input positions */ - BYTE const* ip = istart; - - DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize); - /* If using opt parser, use LDMs only as candidates rather than always accepting them */ - if (cParams->strategy >= ZSTD_btopt) { - size_t lastLLSize; - ms->ldmSeqStore = rawSeqStore; - lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize); - ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize); - return lastLLSize; - } - - assert(rawSeqStore->pos <= rawSeqStore->size); - assert(rawSeqStore->size <= rawSeqStore->capacity); - /* Loop through each sequence and apply the block compressor to the literals */ - while (rawSeqStore->pos < rawSeqStore->size && ip < iend) { - /* maybeSplitSequence updates rawSeqStore->pos */ - rawSeq const sequence = maybeSplitSequence(rawSeqStore, - (U32)(iend - ip), minMatch); - /* End signal */ - if (sequence.offset == 0) - break; - - assert(ip + sequence.litLength + sequence.matchLength <= iend); - - /* Fill tables for block compressor */ - ZSTD_ldm_limitTableUpdate(ms, ip); - ZSTD_ldm_fillFastTables(ms, ip); - /* Run the block compressor */ - DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength); - { - int i; - size_t const newLitLength = - blockCompressor(ms, seqStore, rep, ip, sequence.litLength); - ip += sequence.litLength; - /* Update the repcodes */ - for (i = ZSTD_REP_NUM - 1; i > 0; i--) - rep[i] = rep[i-1]; - rep[0] = sequence.offset; - /* Store the sequence */ - ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, - OFFSET_TO_OFFBASE(sequence.offset), - sequence.matchLength); - ip += sequence.matchLength; - } - } - /* Fill the tables for the block compressor */ - ZSTD_ldm_limitTableUpdate(ms, ip); - ZSTD_ldm_fillFastTables(ms, ip); - /* Compress the last literals */ - return blockCompressor(ms, seqStore, rep, ip, iend - ip); -} diff --git a/zstandard_android/src/compress/zstd_ldm.h b/zstandard_android/src/compress/zstd_ldm.h deleted file mode 100644 index f147021..0000000 --- a/zstandard_android/src/compress/zstd_ldm.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_LDM_H -#define ZSTD_LDM_H - -#if defined (__cplusplus) -extern "C" { -#endif - -#include "zstd_compress_internal.h" /* ldmParams_t, U32 */ -#include "../zstd.h" /* ZSTD_CCtx, size_t */ - -/*-************************************* -* Long distance matching -***************************************/ - -#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT - -void ZSTD_ldm_fillHashTable( - ldmState_t* state, const BYTE* ip, - const BYTE* iend, ldmParams_t const* params); - -/** - * ZSTD_ldm_generateSequences(): - * - * Generates the sequences using the long distance match finder. - * Generates long range matching sequences in `sequences`, which parse a prefix - * of the source. `sequences` must be large enough to store every sequence, - * which can be checked with `ZSTD_ldm_getMaxNbSeq()`. - * @returns 0 or an error code. - * - * NOTE: The user must have called ZSTD_window_update() for all of the input - * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. - * NOTE: This function returns an error if it runs out of space to store - * sequences. - */ -size_t ZSTD_ldm_generateSequences( - ldmState_t* ldms, rawSeqStore_t* sequences, - ldmParams_t const* params, void const* src, size_t srcSize); - -/** - * ZSTD_ldm_blockCompress(): - * - * Compresses a block using the predefined sequences, along with a secondary - * block compressor. The literals section of every sequence is passed to the - * secondary block compressor, and those sequences are interspersed with the - * predefined sequences. Returns the length of the last literals. - * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed. - * `rawSeqStore.seq` may also be updated to split the last sequence between two - * blocks. - * @return The length of the last literals. - * - * NOTE: The source must be at most the maximum block size, but the predefined - * sequences can be any size, and may be longer than the block. In the case that - * they are longer than the block, the last sequences may need to be split into - * two. We handle that case correctly, and update `rawSeqStore` appropriately. - * NOTE: This function does not return any errors. - */ -size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - ZSTD_paramSwitch_e useRowMatchFinder, - void const* src, size_t srcSize); - -/** - * ZSTD_ldm_skipSequences(): - * - * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`. - * Avoids emitting matches less than `minMatch` bytes. - * Must be called for data that is not passed to ZSTD_ldm_blockCompress(). - */ -void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, - U32 const minMatch); - -/* ZSTD_ldm_skipRawSeqStoreBytes(): - * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'. - * Not to be used in conjunction with ZSTD_ldm_skipSequences(). - * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). - */ -void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes); - -/** ZSTD_ldm_getTableSize() : - * Estimate the space needed for long distance matching tables or 0 if LDM is - * disabled. - */ -size_t ZSTD_ldm_getTableSize(ldmParams_t params); - -/** ZSTD_ldm_getSeqSpace() : - * Return an upper bound on the number of sequences that can be produced by - * the long distance matcher, or 0 if LDM is disabled. - */ -size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize); - -/** ZSTD_ldm_adjustParameters() : - * If the params->hashRateLog is not set, set it to its default value based on - * windowLog and params->hashLog. - * - * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to - * params->hashLog if it is not). - * - * Ensures that the minMatchLength >= targetLength during optimal parsing. - */ -void ZSTD_ldm_adjustParameters(ldmParams_t* params, - ZSTD_compressionParameters const* cParams); - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTD_FAST_H */ diff --git a/zstandard_android/src/compress/zstd_opt.c b/zstandard_android/src/compress/zstd_opt.c deleted file mode 100644 index 8a4345b..0000000 --- a/zstandard_android/src/compress/zstd_opt.c +++ /dev/null @@ -1,1576 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#include "zstd_compress_internal.h" -#include "hist.h" -#include "zstd_opt.h" - -#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) - -#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ -#define ZSTD_MAX_PRICE (1<<30) - -#define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ - - -/*-************************************* -* Price functions for optimal parser -***************************************/ - -#if 0 /* approximation at bit level (for tests) */ -# define BITCOST_ACCURACY 0 -# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat, opt) ((void)(opt), ZSTD_bitWeight(stat)) -#elif 0 /* fractional bit accuracy (for tests) */ -# define BITCOST_ACCURACY 8 -# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) ((void)(opt), ZSTD_fracWeight(stat)) -#else /* opt==approx, ultra==accurate */ -# define BITCOST_ACCURACY 8 -# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) -#endif - -/* ZSTD_bitWeight() : - * provide estimated "cost" of a stat in full bits only */ -MEM_STATIC U32 ZSTD_bitWeight(U32 stat) -{ - return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); -} - -/* ZSTD_fracWeight() : - * provide fractional-bit "cost" of a stat, - * using linear interpolation approximation */ -MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) -{ - U32 const stat = rawStat + 1; - U32 const hb = ZSTD_highbit32(stat); - U32 const BWeight = hb * BITCOST_MULTIPLIER; - /* Fweight was meant for "Fractional weight" - * but it's effectively a value between 1 and 2 - * using fixed point arithmetic */ - U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; - U32 const weight = BWeight + FWeight; - assert(hb + BITCOST_ACCURACY < 31); - return weight; -} - -#if (DEBUGLEVEL>=2) -/* debugging function, - * @return price in bytes as fractional value - * for debug messages only */ -MEM_STATIC double ZSTD_fCost(int price) -{ - return (double)price / (BITCOST_MULTIPLIER*8); -} -#endif - -static int ZSTD_compressedLiterals(optState_t const* const optPtr) -{ - return optPtr->literalCompressionMode != ZSTD_ps_disable; -} - -static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) -{ - if (ZSTD_compressedLiterals(optPtr)) - optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel); - optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel); - optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel); - optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel); -} - - -static U32 sum_u32(const unsigned table[], size_t nbElts) -{ - size_t n; - U32 total = 0; - for (n=0; n0); - unsigned const newStat = base + (table[s] >> shift); - sum += newStat; - table[s] = newStat; - } - return sum; -} - -/* ZSTD_scaleStats() : - * reduce all elt frequencies in table if sum too large - * return the resulting sum of elements */ -static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget) -{ - U32 const prevsum = sum_u32(table, lastEltIndex+1); - U32 const factor = prevsum >> logTarget; - DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget); - assert(logTarget < 30); - if (factor <= 1) return prevsum; - return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed); -} - -/* ZSTD_rescaleFreqs() : - * if first block (detected by optPtr->litLengthSum == 0) : init statistics - * take hints from dictionary if there is one - * and init from zero if there is none, - * using src for literals stats, and baseline stats for sequence symbols - * otherwise downscale existing stats, to be used as seed for next block. - */ -static void -ZSTD_rescaleFreqs(optState_t* const optPtr, - const BYTE* const src, size_t const srcSize, - int const optLevel) -{ - int const compressedLiterals = ZSTD_compressedLiterals(optPtr); - DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); - optPtr->priceType = zop_dynamic; - - if (optPtr->litLengthSum == 0) { /* no literals stats collected -> first block assumed -> init */ - - /* heuristic: use pre-defined stats for too small inputs */ - if (srcSize <= ZSTD_PREDEF_THRESHOLD) { - DEBUGLOG(5, "srcSize <= %i : use predefined stats", ZSTD_PREDEF_THRESHOLD); - optPtr->priceType = zop_predef; - } - - assert(optPtr->symbolCosts != NULL); - if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { - - /* huffman stats covering the full value set : table presumed generated by dictionary */ - optPtr->priceType = zop_dynamic; - - if (compressedLiterals) { - /* generate literals statistics from huffman table */ - unsigned lit; - assert(optPtr->litFreq != NULL); - optPtr->litSum = 0; - for (lit=0; lit<=MaxLit; lit++) { - U32 const scaleLog = 11; /* scale to 2K */ - U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit); - assert(bitCost <= scaleLog); - optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; - optPtr->litSum += optPtr->litFreq[lit]; - } } - - { unsigned ll; - FSE_CState_t llstate; - FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable); - optPtr->litLengthSum = 0; - for (ll=0; ll<=MaxLL; ll++) { - U32 const scaleLog = 10; /* scale to 1K */ - U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); - assert(bitCost < scaleLog); - optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; - optPtr->litLengthSum += optPtr->litLengthFreq[ll]; - } } - - { unsigned ml; - FSE_CState_t mlstate; - FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable); - optPtr->matchLengthSum = 0; - for (ml=0; ml<=MaxML; ml++) { - U32 const scaleLog = 10; - U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); - assert(bitCost < scaleLog); - optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; - optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; - } } - - { unsigned of; - FSE_CState_t ofstate; - FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable); - optPtr->offCodeSum = 0; - for (of=0; of<=MaxOff; of++) { - U32 const scaleLog = 10; - U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); - assert(bitCost < scaleLog); - optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; - optPtr->offCodeSum += optPtr->offCodeFreq[of]; - } } - - } else { /* first block, no dictionary */ - - assert(optPtr->litFreq != NULL); - if (compressedLiterals) { - /* base initial cost of literals on direct frequency within src */ - unsigned lit = MaxLit; - HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ - optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8, base_0possible); - } - - { unsigned const baseLLfreqs[MaxLL+1] = { - 4, 2, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1 - }; - ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs)); - optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1); - } - - { unsigned ml; - for (ml=0; ml<=MaxML; ml++) - optPtr->matchLengthFreq[ml] = 1; - } - optPtr->matchLengthSum = MaxML+1; - - { unsigned const baseOFCfreqs[MaxOff+1] = { - 6, 2, 1, 1, 2, 3, 4, 4, - 4, 3, 2, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1 - }; - ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs)); - optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1); - } - - } - - } else { /* new block : scale down accumulated statistics */ - - if (compressedLiterals) - optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12); - optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11); - optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11); - optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11); - } - - ZSTD_setBasePrices(optPtr, optLevel); -} - -/* ZSTD_rawLiteralsCost() : - * price of literals (only) in specified segment (which length can be 0). - * does not include price of literalLength symbol */ -static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, - const optState_t* const optPtr, - int optLevel) -{ - DEBUGLOG(8, "ZSTD_rawLiteralsCost (%u literals)", litLength); - if (litLength == 0) return 0; - - if (!ZSTD_compressedLiterals(optPtr)) - return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */ - - if (optPtr->priceType == zop_predef) - return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */ - - /* dynamic statistics */ - { U32 price = optPtr->litSumBasePrice * litLength; - U32 const litPriceMax = optPtr->litSumBasePrice - BITCOST_MULTIPLIER; - U32 u; - assert(optPtr->litSumBasePrice >= BITCOST_MULTIPLIER); - for (u=0; u < litLength; u++) { - U32 litPrice = WEIGHT(optPtr->litFreq[literals[u]], optLevel); - if (UNLIKELY(litPrice > litPriceMax)) litPrice = litPriceMax; - price -= litPrice; - } - return price; - } -} - -/* ZSTD_litLengthPrice() : - * cost of literalLength symbol */ -static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel) -{ - assert(litLength <= ZSTD_BLOCKSIZE_MAX); - if (optPtr->priceType == zop_predef) - return WEIGHT(litLength, optLevel); - - /* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX - * because it isn't representable in the zstd format. - * So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. - * In such a case, the block would be all literals. - */ - if (litLength == ZSTD_BLOCKSIZE_MAX) - return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel); - - /* dynamic statistics */ - { U32 const llCode = ZSTD_LLcode(litLength); - return (LL_bits[llCode] * BITCOST_MULTIPLIER) - + optPtr->litLengthSumBasePrice - - WEIGHT(optPtr->litLengthFreq[llCode], optLevel); - } -} - -/* ZSTD_getMatchPrice() : - * Provides the cost of the match part (offset + matchLength) of a sequence. - * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. - * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq() - * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) - */ -FORCE_INLINE_TEMPLATE U32 -ZSTD_getMatchPrice(U32 const offBase, - U32 const matchLength, - const optState_t* const optPtr, - int const optLevel) -{ - U32 price; - U32 const offCode = ZSTD_highbit32(offBase); - U32 const mlBase = matchLength - MINMATCH; - assert(matchLength >= MINMATCH); - - if (optPtr->priceType == zop_predef) /* fixed scheme, does not use statistics */ - return WEIGHT(mlBase, optLevel) - + ((16 + offCode) * BITCOST_MULTIPLIER); /* emulated offset cost */ - - /* dynamic statistics */ - price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); - if ((optLevel<2) /*static*/ && offCode >= 20) - price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */ - - /* match Length */ - { U32 const mlCode = ZSTD_MLcode(mlBase); - price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel)); - } - - price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */ - - DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price); - return price; -} - -/* ZSTD_updateStats() : - * assumption : literals + litLength <= iend */ -static void ZSTD_updateStats(optState_t* const optPtr, - U32 litLength, const BYTE* literals, - U32 offBase, U32 matchLength) -{ - /* literals */ - if (ZSTD_compressedLiterals(optPtr)) { - U32 u; - for (u=0; u < litLength; u++) - optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; - optPtr->litSum += litLength*ZSTD_LITFREQ_ADD; - } - - /* literal Length */ - { U32 const llCode = ZSTD_LLcode(litLength); - optPtr->litLengthFreq[llCode]++; - optPtr->litLengthSum++; - } - - /* offset code : follows storeSeq() numeric representation */ - { U32 const offCode = ZSTD_highbit32(offBase); - assert(offCode <= MaxOff); - optPtr->offCodeFreq[offCode]++; - optPtr->offCodeSum++; - } - - /* match Length */ - { U32 const mlBase = matchLength - MINMATCH; - U32 const mlCode = ZSTD_MLcode(mlBase); - optPtr->matchLengthFreq[mlCode]++; - optPtr->matchLengthSum++; - } -} - - -/* ZSTD_readMINMATCH() : - * function safe only for comparisons - * assumption : memPtr must be at least 4 bytes before end of buffer */ -MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) -{ - switch (length) - { - default : - case 4 : return MEM_read32(memPtr); - case 3 : if (MEM_isLittleEndian()) - return MEM_read32(memPtr)<<8; - else - return MEM_read32(memPtr)>>8; - } -} - - -/* Update hashTable3 up to ip (excluded) - Assumption : always within prefix (i.e. not within extDict) */ -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* const ip) -{ - U32* const hashTable3 = ms->hashTable3; - U32 const hashLog3 = ms->hashLog3; - const BYTE* const base = ms->window.base; - U32 idx = *nextToUpdate3; - U32 const target = (U32)(ip - base); - size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3); - assert(hashLog3 > 0); - - while(idx < target) { - hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; - idx++; - } - - *nextToUpdate3 = target; - return hashTable3[hash3]; -} - - -/*-************************************* -* Binary Tree search -***************************************/ -/** ZSTD_insertBt1() : add one or multiple positions to tree. - * @param ip assumed <= iend-8 . - * @param target The target of ZSTD_updateTree_internal() - we are filling to this position - * @return : nb of positions added */ -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -U32 ZSTD_insertBt1( - const ZSTD_matchState_t* ms, - const BYTE* const ip, const BYTE* const iend, - U32 const target, - U32 const mls, const int extDict) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32* const hashTable = ms->hashTable; - U32 const hashLog = cParams->hashLog; - size_t const h = ZSTD_hashPtr(ip, hashLog, mls); - U32* const bt = ms->chainTable; - U32 const btLog = cParams->chainLog - 1; - U32 const btMask = (1 << btLog) - 1; - U32 matchIndex = hashTable[h]; - size_t commonLengthSmaller=0, commonLengthLarger=0; - const BYTE* const base = ms->window.base; - const BYTE* const dictBase = ms->window.dictBase; - const U32 dictLimit = ms->window.dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const BYTE* match; - const U32 curr = (U32)(ip-base); - const U32 btLow = btMask >= curr ? 0 : curr - btMask; - U32* smallerPtr = bt + 2*(curr&btMask); - U32* largerPtr = smallerPtr + 1; - U32 dummy32; /* to be nullified at the end */ - /* windowLow is based on target because - * we only need positions that will be in the window at the end of the tree update. - */ - U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog); - U32 matchEndIdx = curr+8+1; - size_t bestLength = 8; - U32 nbCompares = 1U << cParams->searchLog; -#ifdef ZSTD_C_PREDICT - U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0); - U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1); - predictedSmall += (predictedSmall>0); - predictedLarge += (predictedLarge>0); -#endif /* ZSTD_C_PREDICT */ - - DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr); - - assert(curr <= target); - assert(ip <= iend-8); /* required for h calculation */ - hashTable[h] = curr; /* Update Hash Table */ - - assert(windowLow > 0); - for (; nbCompares && (matchIndex >= windowLow); --nbCompares) { - U32* const nextPtr = bt + 2*(matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - assert(matchIndex < curr); - -#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ - const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ - if (matchIndex == predictedSmall) { - /* no need to check length, result known */ - *smallerPtr = matchIndex; - if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ - matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ - predictedSmall = predictPtr[1] + (predictPtr[1]>0); - continue; - } - if (matchIndex == predictedLarge) { - *largerPtr = matchIndex; - if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - predictedLarge = predictPtr[0] + (predictPtr[0]>0); - continue; - } -#endif - - if (!extDict || (matchIndex+matchLength >= dictLimit)) { - assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */ - match = base + matchIndex; - matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); - } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); - if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > bestLength) { - bestLength = matchLength; - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (U32)matchLength; - } - - if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ - break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ - } - - if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ - /* match is smaller than current */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ - smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ - matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ - } else { - /* match is larger than current */ - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } } - - *smallerPtr = *largerPtr = 0; - { U32 positions = 0; - if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */ - assert(matchEndIdx > curr + 8); - return MAX(positions, matchEndIdx - (curr + 8)); - } -} - -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_updateTree_internal( - ZSTD_matchState_t* ms, - const BYTE* const ip, const BYTE* const iend, - const U32 mls, const ZSTD_dictMode_e dictMode) -{ - const BYTE* const base = ms->window.base; - U32 const target = (U32)(ip - base); - U32 idx = ms->nextToUpdate; - DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", - idx, target, dictMode); - - while(idx < target) { - U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict); - assert(idx < (U32)(idx + forward)); - idx += forward; - } - assert((size_t)(ip - base) <= (size_t)(U32)(-1)); - assert((size_t)(iend - base) <= (size_t)(U32)(-1)); - ms->nextToUpdate = target; -} - -void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { - ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); -} - -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -U32 -ZSTD_insertBtAndGetAllMatches ( - ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ - ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* const ip, const BYTE* const iLimit, - const ZSTD_dictMode_e dictMode, - const U32 rep[ZSTD_REP_NUM], - const U32 ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ - const U32 lengthToBeat, - const U32 mls /* template */) -{ - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); - const BYTE* const base = ms->window.base; - U32 const curr = (U32)(ip-base); - U32 const hashLog = cParams->hashLog; - U32 const minMatch = (mls==3) ? 3 : 4; - U32* const hashTable = ms->hashTable; - size_t const h = ZSTD_hashPtr(ip, hashLog, mls); - U32 matchIndex = hashTable[h]; - U32* const bt = ms->chainTable; - U32 const btLog = cParams->chainLog - 1; - U32 const btMask= (1U << btLog) - 1; - size_t commonLengthSmaller=0, commonLengthLarger=0; - const BYTE* const dictBase = ms->window.dictBase; - U32 const dictLimit = ms->window.dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const prefixStart = base + dictLimit; - U32 const btLow = (btMask >= curr) ? 0 : curr - btMask; - U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); - U32 const matchLow = windowLow ? windowLow : 1; - U32* smallerPtr = bt + 2*(curr&btMask); - U32* largerPtr = bt + 2*(curr&btMask) + 1; - U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */ - U32 dummy32; /* to be nullified at the end */ - U32 mnum = 0; - U32 nbCompares = 1U << cParams->searchLog; - - const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; - const ZSTD_compressionParameters* const dmsCParams = - dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; - const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; - const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL; - U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0; - U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0; - U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; - U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; - U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; - U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0; - U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit; - - size_t bestLength = lengthToBeat-1; - DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr); - - /* check repCode */ - assert(ll0 <= 1); /* necessarily 1 or 0 */ - { U32 const lastR = ZSTD_REP_NUM + ll0; - U32 repCode; - for (repCode = ll0; repCode < lastR; repCode++) { - U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; - U32 const repIndex = curr - repOffset; - U32 repLen = 0; - assert(curr >= dictLimit); - if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */ - /* We must validate the repcode offset because when we're using a dictionary the - * valid offset range shrinks when the dictionary goes out of bounds. - */ - if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) { - repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch; - } - } else { /* repIndex < dictLimit || repIndex >= curr */ - const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ? - dmsBase + repIndex - dmsIndexDelta : - dictBase + repIndex; - assert(curr >= windowLow); - if ( dictMode == ZSTD_extDict - && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */ - & (ZSTD_index_overlap_check(dictLimit, repIndex)) ) - && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { - repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; - } - if (dictMode == ZSTD_dictMatchState - && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */ - & (ZSTD_index_overlap_check(dictLimit, repIndex)) ) - && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { - repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch; - } } - /* save longer solution */ - if (repLen > bestLength) { - DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", - repCode, ll0, repOffset, repLen); - bestLength = repLen; - matches[mnum].off = REPCODE_TO_OFFBASE(repCode - ll0 + 1); /* expect value between 1 and 3 */ - matches[mnum].len = (U32)repLen; - mnum++; - if ( (repLen > sufficient_len) - | (ip+repLen == iLimit) ) { /* best possible */ - return mnum; - } } } } - - /* HC3 match finder */ - if ((mls == 3) /*static*/ && (bestLength < mls)) { - U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip); - if ((matchIndex3 >= matchLow) - & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) { - size_t mlen; - if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) { - const BYTE* const match = base + matchIndex3; - mlen = ZSTD_count(ip, match, iLimit); - } else { - const BYTE* const match = dictBase + matchIndex3; - mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); - } - - /* save best solution */ - if (mlen >= mls /* == 3 > bestLength */) { - DEBUGLOG(8, "found small match with hlog3, of length %u", - (U32)mlen); - bestLength = mlen; - assert(curr > matchIndex3); - assert(mnum==0); /* no prior solution */ - matches[0].off = OFFSET_TO_OFFBASE(curr - matchIndex3); - matches[0].len = (U32)mlen; - mnum = 1; - if ( (mlen > sufficient_len) | - (ip+mlen == iLimit) ) { /* best possible length */ - ms->nextToUpdate = curr+1; /* skip insertion */ - return 1; - } } } - /* no dictMatchState lookup: dicts don't have a populated HC3 table */ - } /* if (mls == 3) */ - - hashTable[h] = curr; /* Update Hash Table */ - - for (; nbCompares && (matchIndex >= matchLow); --nbCompares) { - U32* const nextPtr = bt + 2*(matchIndex & btMask); - const BYTE* match; - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - assert(curr > matchIndex); - - if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) { - assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */ - match = base + matchIndex; - if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ - matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit); - } else { - match = dictBase + matchIndex; - assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ - matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); - if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* prepare for match[matchLength] read */ - } - - if (matchLength > bestLength) { - DEBUGLOG(8, "found match of length %u at distance %u (offBase=%u)", - (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex)); - assert(matchEndIdx > matchIndex); - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (U32)matchLength; - bestLength = matchLength; - matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex); - matches[mnum].len = (U32)matchLength; - mnum++; - if ( (matchLength > ZSTD_OPT_NUM) - | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { - if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */ - break; /* drop, to preserve bt consistency (miss a little bit of compression) */ - } } - - if (match[matchLength] < ip[matchLength]) { - /* match smaller than current */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */ - matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */ - } else { - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } } - - *smallerPtr = *largerPtr = 0; - - assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ - if (dictMode == ZSTD_dictMatchState && nbCompares) { - size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); - U32 dictMatchIndex = dms->hashTable[dmsH]; - const U32* const dmsBt = dms->chainTable; - commonLengthSmaller = commonLengthLarger = 0; - for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) { - const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - const BYTE* match = dmsBase + dictMatchIndex; - matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart); - if (dictMatchIndex+matchLength >= dmsHighLimit) - match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */ - - if (matchLength > bestLength) { - matchIndex = dictMatchIndex + dmsIndexDelta; - DEBUGLOG(8, "found dms match of length %u at distance %u (offBase=%u)", - (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex)); - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (U32)matchLength; - bestLength = matchLength; - matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex); - matches[mnum].len = (U32)matchLength; - mnum++; - if ( (matchLength > ZSTD_OPT_NUM) - | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { - break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } } - - if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */ - if (match[matchLength] < ip[matchLength]) { - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ - } else { - /* match is larger than current */ - commonLengthLarger = matchLength; - dictMatchIndex = nextPtr[0]; - } } } /* if (dictMode == ZSTD_dictMatchState) */ - - assert(matchEndIdx > curr+8); - ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ - return mnum; -} - -typedef U32 (*ZSTD_getAllMatchesFn)( - ZSTD_match_t*, - ZSTD_matchState_t*, - U32*, - const BYTE*, - const BYTE*, - const U32 rep[ZSTD_REP_NUM], - U32 const ll0, - U32 const lengthToBeat); - -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -U32 ZSTD_btGetAllMatches_internal( - ZSTD_match_t* matches, - ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* ip, - const BYTE* const iHighLimit, - const U32 rep[ZSTD_REP_NUM], - U32 const ll0, - U32 const lengthToBeat, - const ZSTD_dictMode_e dictMode, - const U32 mls) -{ - assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls); - DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls); - if (ip < ms->window.base + ms->nextToUpdate) - return 0; /* skipped area */ - ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode); - return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls); -} - -#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls - -#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \ - static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \ - ZSTD_match_t* matches, \ - ZSTD_matchState_t* ms, \ - U32* nextToUpdate3, \ - const BYTE* ip, \ - const BYTE* const iHighLimit, \ - const U32 rep[ZSTD_REP_NUM], \ - U32 const ll0, \ - U32 const lengthToBeat) \ - { \ - return ZSTD_btGetAllMatches_internal( \ - matches, ms, nextToUpdate3, ip, iHighLimit, \ - rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \ - } - -#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \ - GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \ - GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \ - GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \ - GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6) - -GEN_ZSTD_BT_GET_ALL_MATCHES(noDict) -GEN_ZSTD_BT_GET_ALL_MATCHES(extDict) -GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState) - -#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \ - { \ - ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \ - ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \ - ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \ - ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \ - } - -static ZSTD_getAllMatchesFn -ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode) -{ - ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = { - ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict), - ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict), - ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState) - }; - U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6); - assert((U32)dictMode < 3); - assert(mls - 3 < 4); - return getAllMatchesFns[(int)dictMode][mls - 3]; -} - -/************************* -* LDM helper functions * -*************************/ - -/* Struct containing info needed to make decision about ldm inclusion */ -typedef struct { - rawSeqStore_t seqStore; /* External match candidates store for this block */ - U32 startPosInBlock; /* Start position of the current match candidate */ - U32 endPosInBlock; /* End position of the current match candidate */ - U32 offset; /* Offset of the match candidate */ -} ZSTD_optLdm_t; - -/* ZSTD_optLdm_skipRawSeqStoreBytes(): - * Moves forward in @rawSeqStore by @nbBytes, - * which will update the fields 'pos' and 'posInSequence'. - */ -static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) -{ - U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); - while (currPos && rawSeqStore->pos < rawSeqStore->size) { - rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; - if (currPos >= currSeq.litLength + currSeq.matchLength) { - currPos -= currSeq.litLength + currSeq.matchLength; - rawSeqStore->pos++; - } else { - rawSeqStore->posInSequence = currPos; - break; - } - } - if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) { - rawSeqStore->posInSequence = 0; - } -} - -/* ZSTD_opt_getNextMatchAndUpdateSeqStore(): - * Calculates the beginning and end of the next match in the current block. - * Updates 'pos' and 'posInSequence' of the ldmSeqStore. - */ -static void -ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock, - U32 blockBytesRemaining) -{ - rawSeq currSeq; - U32 currBlockEndPos; - U32 literalsBytesRemaining; - U32 matchBytesRemaining; - - /* Setting match end position to MAX to ensure we never use an LDM during this block */ - if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { - optLdm->startPosInBlock = UINT_MAX; - optLdm->endPosInBlock = UINT_MAX; - return; - } - /* Calculate appropriate bytes left in matchLength and litLength - * after adjusting based on ldmSeqStore->posInSequence */ - currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos]; - assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength); - currBlockEndPos = currPosInBlock + blockBytesRemaining; - literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ? - currSeq.litLength - (U32)optLdm->seqStore.posInSequence : - 0; - matchBytesRemaining = (literalsBytesRemaining == 0) ? - currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) : - currSeq.matchLength; - - /* If there are more literal bytes than bytes remaining in block, no ldm is possible */ - if (literalsBytesRemaining >= blockBytesRemaining) { - optLdm->startPosInBlock = UINT_MAX; - optLdm->endPosInBlock = UINT_MAX; - ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining); - return; - } - - /* Matches may be < MINMATCH by this process. In that case, we will reject them - when we are deciding whether or not to add the ldm */ - optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining; - optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining; - optLdm->offset = currSeq.offset; - - if (optLdm->endPosInBlock > currBlockEndPos) { - /* Match ends after the block ends, we can't use the whole match */ - optLdm->endPosInBlock = currBlockEndPos; - ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock); - } else { - /* Consume nb of bytes equal to size of sequence left */ - ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining); - } -} - -/* ZSTD_optLdm_maybeAddMatch(): - * Adds a match if it's long enough, - * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock', - * into 'matches'. Maintains the correct ordering of 'matches'. - */ -static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, - const ZSTD_optLdm_t* optLdm, U32 currPosInBlock) -{ - U32 const posDiff = currPosInBlock - optLdm->startPosInBlock; - /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */ - U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; - - /* Ensure that current block position is not outside of the match */ - if (currPosInBlock < optLdm->startPosInBlock - || currPosInBlock >= optLdm->endPosInBlock - || candidateMatchLength < MINMATCH) { - return; - } - - if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) { - U32 const candidateOffBase = OFFSET_TO_OFFBASE(optLdm->offset); - DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offBase: %u matchLength %u) at block position=%u", - candidateOffBase, candidateMatchLength, currPosInBlock); - matches[*nbMatches].len = candidateMatchLength; - matches[*nbMatches].off = candidateOffBase; - (*nbMatches)++; - } -} - -/* ZSTD_optLdm_processMatchCandidate(): - * Wrapper function to update ldm seq store and call ldm functions as necessary. - */ -static void -ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, - ZSTD_match_t* matches, U32* nbMatches, - U32 currPosInBlock, U32 remainingBytes) -{ - if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { - return; - } - - if (currPosInBlock >= optLdm->endPosInBlock) { - if (currPosInBlock > optLdm->endPosInBlock) { - /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily - * at the end of a match from the ldm seq store, and will often be some bytes - * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots" - */ - U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock; - ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot); - } - ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); - } - ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock); -} - - -/*-******************************* -* Optimal parser -*********************************/ - -#if 0 /* debug */ - -static void -listStats(const U32* table, int lastEltID) -{ - int const nbElts = lastEltID + 1; - int enb; - for (enb=0; enb < nbElts; enb++) { - (void)table; - /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */ - RAWLOG(2, "%4i,", table[enb]); - } - RAWLOG(2, " \n"); -} - -#endif - -#define LIT_PRICE(_p) (int)ZSTD_rawLiteralsCost(_p, 1, optStatePtr, optLevel) -#define LL_PRICE(_l) (int)ZSTD_litLengthPrice(_l, optStatePtr, optLevel) -#define LL_INCPRICE(_l) (LL_PRICE(_l) - LL_PRICE(_l-1)) - -FORCE_INLINE_TEMPLATE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t -ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, - seqStore_t* seqStore, - U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize, - const int optLevel, - const ZSTD_dictMode_e dictMode) -{ - optState_t* const optStatePtr = &ms->opt; - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; - const BYTE* const base = ms->window.base; - const BYTE* const prefixStart = base + ms->window.dictLimit; - const ZSTD_compressionParameters* const cParams = &ms->cParams; - - ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode); - - U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); - U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4; - U32 nextToUpdate3 = ms->nextToUpdate; - - ZSTD_optimal_t* const opt = optStatePtr->priceTable; - ZSTD_match_t* const matches = optStatePtr->matchTable; - ZSTD_optimal_t lastStretch; - ZSTD_optLdm_t optLdm; - - ZSTD_memset(&lastStretch, 0, sizeof(ZSTD_optimal_t)); - - optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore; - optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; - ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip)); - - /* init */ - DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u", - (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate); - assert(optLevel <= 2); - ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel); - ip += (ip==prefixStart); - - /* Match Loop */ - while (ip < ilimit) { - U32 cur, last_pos = 0; - - /* find first match */ - { U32 const litlen = (U32)(ip - anchor); - U32 const ll0 = !litlen; - U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch); - ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, - (U32)(ip-istart), (U32)(iend-ip)); - if (!nbMatches) { - DEBUGLOG(8, "no match found at cPos %u", (unsigned)(ip-istart)); - ip++; - continue; - } - - /* Match found: let's store this solution, and eventually find more candidates. - * During this forward pass, @opt is used to store stretches, - * defined as "a match followed by N literals". - * Note how this is different from a Sequence, which is "N literals followed by a match". - * Storing stretches allows us to store different match predecessors - * for each literal position part of a literals run. */ - - /* initialize opt[0] */ - opt[0].mlen = 0; /* there are only literals so far */ - opt[0].litlen = litlen; - /* No need to include the actual price of the literals before the first match - * because it is static for the duration of the forward pass, and is included - * in every subsequent price. But, we include the literal length because - * the cost variation of litlen depends on the value of litlen. - */ - opt[0].price = LL_PRICE(litlen); - ZSTD_STATIC_ASSERT(sizeof(opt[0].rep[0]) == sizeof(rep[0])); - ZSTD_memcpy(&opt[0].rep, rep, sizeof(opt[0].rep)); - - /* large match -> immediate encoding */ - { U32 const maxML = matches[nbMatches-1].len; - U32 const maxOffBase = matches[nbMatches-1].off; - DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffBase=%u at cPos=%u => start new series", - nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart)); - - if (maxML > sufficient_len) { - lastStretch.litlen = 0; - lastStretch.mlen = maxML; - lastStretch.off = maxOffBase; - DEBUGLOG(6, "large match (%u>%u) => immediate encoding", - maxML, sufficient_len); - cur = 0; - last_pos = maxML; - goto _shortestPath; - } } - - /* set prices for first matches starting position == 0 */ - assert(opt[0].price >= 0); - { U32 pos; - U32 matchNb; - for (pos = 1; pos < minMatch; pos++) { - opt[pos].price = ZSTD_MAX_PRICE; - opt[pos].mlen = 0; - opt[pos].litlen = litlen + pos; - } - for (matchNb = 0; matchNb < nbMatches; matchNb++) { - U32 const offBase = matches[matchNb].off; - U32 const end = matches[matchNb].len; - for ( ; pos <= end ; pos++ ) { - int const matchPrice = (int)ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel); - int const sequencePrice = opt[0].price + matchPrice; - DEBUGLOG(7, "rPos:%u => set initial price : %.2f", - pos, ZSTD_fCost(sequencePrice)); - opt[pos].mlen = pos; - opt[pos].off = offBase; - opt[pos].litlen = 0; /* end of match */ - opt[pos].price = sequencePrice + LL_PRICE(0); - } - } - last_pos = pos-1; - opt[pos].price = ZSTD_MAX_PRICE; - } - } - - /* check further positions */ - for (cur = 1; cur <= last_pos; cur++) { - const BYTE* const inr = ip + cur; - assert(cur <= ZSTD_OPT_NUM); - DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur); - - /* Fix current position with one literal if cheaper */ - { U32 const litlen = opt[cur-1].litlen + 1; - int const price = opt[cur-1].price - + LIT_PRICE(ip+cur-1) - + LL_INCPRICE(litlen); - assert(price < 1000000000); /* overflow check */ - if (price <= opt[cur].price) { - ZSTD_optimal_t const prevMatch = opt[cur]; - DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", - inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, - opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]); - opt[cur] = opt[cur-1]; - opt[cur].litlen = litlen; - opt[cur].price = price; - if ( (optLevel >= 1) /* additional check only for higher modes */ - && (prevMatch.litlen == 0) /* replace a match */ - && (LL_INCPRICE(1) < 0) /* ll1 is cheaper than ll0 */ - && LIKELY(ip + cur < iend) - ) { - /* check next position, in case it would be cheaper */ - int with1literal = prevMatch.price + LIT_PRICE(ip+cur) + LL_INCPRICE(1); - int withMoreLiterals = price + LIT_PRICE(ip+cur) + LL_INCPRICE(litlen+1); - DEBUGLOG(7, "then at next rPos %u : match+1lit %.2f vs %ulits %.2f", - cur+1, ZSTD_fCost(with1literal), litlen+1, ZSTD_fCost(withMoreLiterals)); - if ( (with1literal < withMoreLiterals) - && (with1literal < opt[cur+1].price) ) { - /* update offset history - before it disappears */ - U32 const prev = cur - prevMatch.mlen; - repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0); - assert(cur >= prevMatch.mlen); - DEBUGLOG(7, "==> match+1lit is cheaper (%.2f < %.2f) (hist:%u,%u,%u) !", - ZSTD_fCost(with1literal), ZSTD_fCost(withMoreLiterals), - newReps.rep[0], newReps.rep[1], newReps.rep[2] ); - opt[cur+1] = prevMatch; /* mlen & offbase */ - ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(repcodes_t)); - opt[cur+1].litlen = 1; - opt[cur+1].price = with1literal; - if (last_pos < cur+1) last_pos = cur+1; - } - } - } else { - DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f)", - inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price)); - } - } - - /* Offset history is not updated during match comparison. - * Do it here, now that the match is selected and confirmed. - */ - ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t)); - assert(cur >= opt[cur].mlen); - if (opt[cur].litlen == 0) { - /* just finished a match => alter offset history */ - U32 const prev = cur - opt[cur].mlen; - repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0); - ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t)); - } - - /* last match must start at a minimum distance of 8 from oend */ - if (inr > ilimit) continue; - - if (cur == last_pos) break; - - if ( (optLevel==0) /*static_test*/ - && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) { - DEBUGLOG(7, "skip current position : next rPos(%u) price is cheaper", cur+1); - continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ - } - - assert(opt[cur].price >= 0); - { U32 const ll0 = (opt[cur].litlen == 0); - int const previousPrice = opt[cur].price; - int const basePrice = previousPrice + LL_PRICE(0); - U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch); - U32 matchNb; - - ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, - (U32)(inr-istart), (U32)(iend-inr)); - - if (!nbMatches) { - DEBUGLOG(7, "rPos:%u : no match found", cur); - continue; - } - - { U32 const longestML = matches[nbMatches-1].len; - DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of longest ML=%u", - inr-istart, cur, nbMatches, longestML); - - if ( (longestML > sufficient_len) - || (cur + longestML >= ZSTD_OPT_NUM) - || (ip + cur + longestML >= iend) ) { - lastStretch.mlen = longestML; - lastStretch.off = matches[nbMatches-1].off; - lastStretch.litlen = 0; - last_pos = cur + longestML; - goto _shortestPath; - } } - - /* set prices using matches found at position == cur */ - for (matchNb = 0; matchNb < nbMatches; matchNb++) { - U32 const offset = matches[matchNb].off; - U32 const lastML = matches[matchNb].len; - U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; - U32 mlen; - - DEBUGLOG(7, "testing match %u => offBase=%4u, mlen=%2u, llen=%2u", - matchNb, matches[matchNb].off, lastML, opt[cur].litlen); - - for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ - U32 const pos = cur + mlen; - int const price = basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); - - if ((pos > last_pos) || (price < opt[pos].price)) { - DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", - pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); - while (last_pos < pos) { - /* fill empty positions, for future comparisons */ - last_pos++; - opt[last_pos].price = ZSTD_MAX_PRICE; - opt[last_pos].litlen = !0; /* just needs to be != 0, to mean "not an end of match" */ - } - opt[pos].mlen = mlen; - opt[pos].off = offset; - opt[pos].litlen = 0; - opt[pos].price = price; - } else { - DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)", - pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); - if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */ - } - } } } - opt[last_pos+1].price = ZSTD_MAX_PRICE; - } /* for (cur = 1; cur <= last_pos; cur++) */ - - lastStretch = opt[last_pos]; - assert(cur >= lastStretch.mlen); - cur = last_pos - lastStretch.mlen; - -_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ - assert(opt[0].mlen == 0); - assert(last_pos >= lastStretch.mlen); - assert(cur == last_pos - lastStretch.mlen); - - if (lastStretch.mlen==0) { - /* no solution : all matches have been converted into literals */ - assert(lastStretch.litlen == (ip - anchor) + last_pos); - ip += last_pos; - continue; - } - assert(lastStretch.off > 0); - - /* Update offset history */ - if (lastStretch.litlen == 0) { - /* finishing on a match : update offset history */ - repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0); - ZSTD_memcpy(rep, &reps, sizeof(repcodes_t)); - } else { - ZSTD_memcpy(rep, lastStretch.rep, sizeof(repcodes_t)); - assert(cur >= lastStretch.litlen); - cur -= lastStretch.litlen; - } - - /* Let's write the shortest path solution. - * It is stored in @opt in reverse order, - * starting from @storeEnd (==cur+2), - * effectively partially @opt overwriting. - * Content is changed too: - * - So far, @opt stored stretches, aka a match followed by literals - * - Now, it will store sequences, aka literals followed by a match - */ - { U32 const storeEnd = cur + 2; - U32 storeStart = storeEnd; - U32 stretchPos = cur; - - DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)", - last_pos, cur); (void)last_pos; - assert(storeEnd < ZSTD_OPT_SIZE); - DEBUGLOG(6, "last stretch copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", - storeEnd, lastStretch.litlen, lastStretch.mlen, lastStretch.off); - if (lastStretch.litlen > 0) { - /* last "sequence" is unfinished: just a bunch of literals */ - opt[storeEnd].litlen = lastStretch.litlen; - opt[storeEnd].mlen = 0; - storeStart = storeEnd-1; - opt[storeStart] = lastStretch; - } { - opt[storeEnd] = lastStretch; /* note: litlen will be fixed */ - storeStart = storeEnd; - } - while (1) { - ZSTD_optimal_t nextStretch = opt[stretchPos]; - opt[storeStart].litlen = nextStretch.litlen; - DEBUGLOG(6, "selected sequence (llen=%u,mlen=%u,ofc=%u)", - opt[storeStart].litlen, opt[storeStart].mlen, opt[storeStart].off); - if (nextStretch.mlen == 0) { - /* reaching beginning of segment */ - break; - } - storeStart--; - opt[storeStart] = nextStretch; /* note: litlen will be fixed */ - assert(nextStretch.litlen + nextStretch.mlen <= stretchPos); - stretchPos -= nextStretch.litlen + nextStretch.mlen; - } - - /* save sequences */ - DEBUGLOG(6, "sending selected sequences into seqStore"); - { U32 storePos; - for (storePos=storeStart; storePos <= storeEnd; storePos++) { - U32 const llen = opt[storePos].litlen; - U32 const mlen = opt[storePos].mlen; - U32 const offBase = opt[storePos].off; - U32 const advance = llen + mlen; - DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", - anchor - istart, (unsigned)llen, (unsigned)mlen); - - if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ - assert(storePos == storeEnd); /* must be last sequence */ - ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */ - continue; /* will finish */ - } - - assert(anchor + llen <= iend); - ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen); - ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen); - anchor += advance; - ip = anchor; - } } - DEBUGLOG(7, "new offset history : %u, %u, %u", rep[0], rep[1], rep[2]); - - /* update all costs */ - ZSTD_setBasePrices(optStatePtr, optLevel); - } - } /* while (ip < ilimit) */ - - /* Return the last literals size */ - return (size_t)(iend - anchor); -} -#endif /* build exclusions */ - -#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR -static size_t ZSTD_compressBlock_opt0( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) -{ - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode); -} -#endif - -#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR -static size_t ZSTD_compressBlock_opt2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) -{ - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode); -} -#endif - -#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_btopt( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) -{ - DEBUGLOG(5, "ZSTD_compressBlock_btopt"); - return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict); -} -#endif - - - - -#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR -/* ZSTD_initStats_ultra(): - * make a first compression pass, just to seed stats with more accurate starting values. - * only works on first block, with no dictionary and no ldm. - * this function cannot error out, its narrow contract must be respected. - */ -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_initStats_ultra(ZSTD_matchState_t* ms, - seqStore_t* seqStore, - U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) -{ - U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ - ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep)); - - DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize); - assert(ms->opt.litLengthSum == 0); /* first block */ - assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */ - assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */ - assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */ - - ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/ - - /* invalidate first scan from history, only keep entropy stats */ - ZSTD_resetSeqStore(seqStore); - ms->window.base -= srcSize; - ms->window.dictLimit += (U32)srcSize; - ms->window.lowLimit = ms->window.dictLimit; - ms->nextToUpdate = ms->window.dictLimit; - -} - -size_t ZSTD_compressBlock_btultra( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) -{ - DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict); -} - -size_t ZSTD_compressBlock_btultra2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) -{ - U32 const curr = (U32)((const BYTE*)src - ms->window.base); - DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); - - /* 2-passes strategy: - * this strategy makes a first pass over first block to collect statistics - * in order to seed next round's statistics with it. - * After 1st pass, function forgets history, and starts a new block. - * Consequently, this can only work if no data has been previously loaded in tables, - * aka, no dictionary, no prefix, no ldm preprocessing. - * The compression ratio gain is generally small (~0.5% on first block), - * the cost is 2x cpu time on first block. */ - assert(srcSize <= ZSTD_BLOCKSIZE_MAX); - if ( (ms->opt.litLengthSum==0) /* first block */ - && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ - && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ - && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ - && (srcSize > ZSTD_PREDEF_THRESHOLD) /* input large enough to not employ default stats */ - ) { - ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); - } - - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict); -} -#endif - -#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_btopt_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) -{ - return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); -} - -size_t ZSTD_compressBlock_btopt_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) -{ - return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict); -} -#endif - -#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_btultra_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) -{ - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); -} - -size_t ZSTD_compressBlock_btultra_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - const void* src, size_t srcSize) -{ - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict); -} -#endif - -/* note : no btultra2 variant for extDict nor dictMatchState, - * because btultra2 is not meant to work with dictionaries - * and is only specific for the first block (no prefix) */ diff --git a/zstandard_android/src/compress/zstd_opt.h b/zstandard_android/src/compress/zstd_opt.h deleted file mode 100644 index d4e7113..0000000 --- a/zstandard_android/src/compress/zstd_opt.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_OPT_H -#define ZSTD_OPT_H - -#if defined (__cplusplus) -extern "C" { -#endif - -#include "zstd_compress_internal.h" - -#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ - || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) -/* used in ZSTD_loadDictionaryContent() */ -void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend); -#endif - -#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_btopt( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btopt_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btopt_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - -#define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt -#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE ZSTD_compressBlock_btopt_dictMatchState -#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT ZSTD_compressBlock_btopt_extDict -#else -#define ZSTD_COMPRESSBLOCK_BTOPT NULL -#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE NULL -#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT NULL -#endif - -#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR -size_t ZSTD_compressBlock_btultra( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btultra_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btultra_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - - /* note : no btultra2 variant for extDict nor dictMatchState, - * because btultra2 is not meant to work with dictionaries - * and is only specific for the first block (no prefix) */ -size_t ZSTD_compressBlock_btultra2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); - -#define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra -#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE ZSTD_compressBlock_btultra_dictMatchState -#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT ZSTD_compressBlock_btultra_extDict -#define ZSTD_COMPRESSBLOCK_BTULTRA2 ZSTD_compressBlock_btultra2 -#else -#define ZSTD_COMPRESSBLOCK_BTULTRA NULL -#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE NULL -#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT NULL -#define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL -#endif - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTD_OPT_H */ diff --git a/zstandard_android/src/compress/zstdmt_compress.c b/zstandard_android/src/compress/zstdmt_compress.c deleted file mode 100644 index 86ccce3..0000000 --- a/zstandard_android/src/compress/zstdmt_compress.c +++ /dev/null @@ -1,1882 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - -/* ====== Compiler specifics ====== */ -#if defined(_MSC_VER) -# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ -#endif - - -/* ====== Dependencies ====== */ -#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ -#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */ -#include "../common/mem.h" /* MEM_STATIC */ -#include "../common/pool.h" /* threadpool */ -#include "../common/threading.h" /* mutex */ -#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ -#include "zstd_ldm.h" -#include "zstdmt_compress.h" - -/* Guards code to support resizing the SeqPool. - * We will want to resize the SeqPool to save memory in the future. - * Until then, comment the code out since it is unused. - */ -#define ZSTD_RESIZE_SEQPOOL 0 - -/* ====== Debug ====== */ -#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \ - && !defined(_MSC_VER) \ - && !defined(__MINGW32__) - -# include -# include -# include - -# define DEBUG_PRINTHEX(l,p,n) \ - do { \ - unsigned debug_u; \ - for (debug_u=0; debug_u<(n); debug_u++) \ - RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ - RAWLOG(l, " \n"); \ - } while (0) - -static unsigned long long GetCurrentClockTimeMicroseconds(void) -{ - static clock_t _ticksPerSecond = 0; - if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK); - - { struct tms junk; clock_t newTicks = (clock_t) times(&junk); - return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); -} } - -#define MUTEX_WAIT_TIME_DLEVEL 6 -#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) \ - do { \ - if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ - unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ - ZSTD_pthread_mutex_lock(mutex); \ - { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ - unsigned long long const elapsedTime = (afterTime-beforeTime); \ - if (elapsedTime > 1000) { \ - /* or whatever threshold you like; I'm using 1 millisecond here */ \ - DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, \ - "Thread took %llu microseconds to acquire mutex %s \n", \ - elapsedTime, #mutex); \ - } } \ - } else { \ - ZSTD_pthread_mutex_lock(mutex); \ - } \ - } while (0) - -#else - -# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) -# define DEBUG_PRINTHEX(l,p,n) do { } while (0) - -#endif - - -/* ===== Buffer Pool ===== */ -/* a single Buffer Pool can be invoked from multiple threads in parallel */ - -typedef struct buffer_s { - void* start; - size_t capacity; -} buffer_t; - -static const buffer_t g_nullBuffer = { NULL, 0 }; - -typedef struct ZSTDMT_bufferPool_s { - ZSTD_pthread_mutex_t poolMutex; - size_t bufferSize; - unsigned totalBuffers; - unsigned nbBuffers; - ZSTD_customMem cMem; - buffer_t* buffers; -} ZSTDMT_bufferPool; - -static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) -{ - DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); - if (!bufPool) return; /* compatibility with free on NULL */ - if (bufPool->buffers) { - unsigned u; - for (u=0; utotalBuffers; u++) { - DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->buffers[u].start); - ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem); - } - ZSTD_customFree(bufPool->buffers, bufPool->cMem); - } - ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); - ZSTD_customFree(bufPool, bufPool->cMem); -} - -static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem) -{ - ZSTDMT_bufferPool* const bufPool = - (ZSTDMT_bufferPool*)ZSTD_customCalloc(sizeof(ZSTDMT_bufferPool), cMem); - if (bufPool==NULL) return NULL; - if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) { - ZSTD_customFree(bufPool, cMem); - return NULL; - } - bufPool->buffers = (buffer_t*)ZSTD_customCalloc(maxNbBuffers * sizeof(buffer_t), cMem); - if (bufPool->buffers==NULL) { - ZSTDMT_freeBufferPool(bufPool); - return NULL; - } - bufPool->bufferSize = 64 KB; - bufPool->totalBuffers = maxNbBuffers; - bufPool->nbBuffers = 0; - bufPool->cMem = cMem; - return bufPool; -} - -/* only works at initialization, not during compression */ -static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) -{ - size_t const poolSize = sizeof(*bufPool); - size_t const arraySize = bufPool->totalBuffers * sizeof(buffer_t); - unsigned u; - size_t totalBufferSize = 0; - ZSTD_pthread_mutex_lock(&bufPool->poolMutex); - for (u=0; utotalBuffers; u++) - totalBufferSize += bufPool->buffers[u].capacity; - ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); - - return poolSize + arraySize + totalBufferSize; -} - -/* ZSTDMT_setBufferSize() : - * all future buffers provided by this buffer pool will have _at least_ this size - * note : it's better for all buffers to have same size, - * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */ -static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize) -{ - ZSTD_pthread_mutex_lock(&bufPool->poolMutex); - DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize); - bufPool->bufferSize = bSize; - ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); -} - - -static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers) -{ - if (srcBufPool==NULL) return NULL; - if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */ - return srcBufPool; - /* need a larger buffer pool */ - { ZSTD_customMem const cMem = srcBufPool->cMem; - size_t const bSize = srcBufPool->bufferSize; /* forward parameters */ - ZSTDMT_bufferPool* newBufPool; - ZSTDMT_freeBufferPool(srcBufPool); - newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem); - if (newBufPool==NULL) return newBufPool; - ZSTDMT_setBufferSize(newBufPool, bSize); - return newBufPool; - } -} - -/** ZSTDMT_getBuffer() : - * assumption : bufPool must be valid - * @return : a buffer, with start pointer and size - * note: allocation may fail, in this case, start==NULL and size==0 */ -static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) -{ - size_t const bSize = bufPool->bufferSize; - DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize); - ZSTD_pthread_mutex_lock(&bufPool->poolMutex); - if (bufPool->nbBuffers) { /* try to use an existing buffer */ - buffer_t const buf = bufPool->buffers[--(bufPool->nbBuffers)]; - size_t const availBufferSize = buf.capacity; - bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer; - if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) { - /* large enough, but not too much */ - DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u", - bufPool->nbBuffers, (U32)buf.capacity); - ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); - return buf; - } - /* size conditions not respected : scratch this buffer, create new one */ - DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing"); - ZSTD_customFree(buf.start, bufPool->cMem); - } - ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); - /* create new buffer */ - DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer"); - { buffer_t buffer; - void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); - buffer.start = start; /* note : start can be NULL if malloc fails ! */ - buffer.capacity = (start==NULL) ? 0 : bSize; - if (start==NULL) { - DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!"); - } else { - DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize); - } - return buffer; - } -} - -#if ZSTD_RESIZE_SEQPOOL -/** ZSTDMT_resizeBuffer() : - * assumption : bufPool must be valid - * @return : a buffer that is at least the buffer pool buffer size. - * If a reallocation happens, the data in the input buffer is copied. - */ -static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer) -{ - size_t const bSize = bufPool->bufferSize; - if (buffer.capacity < bSize) { - void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); - buffer_t newBuffer; - newBuffer.start = start; - newBuffer.capacity = start == NULL ? 0 : bSize; - if (start != NULL) { - assert(newBuffer.capacity >= buffer.capacity); - ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity); - DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize); - return newBuffer; - } - DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!"); - } - return buffer; -} -#endif - -/* store buffer for later re-use, up to pool capacity */ -static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) -{ - DEBUGLOG(5, "ZSTDMT_releaseBuffer"); - if (buf.start == NULL) return; /* compatible with release on NULL */ - ZSTD_pthread_mutex_lock(&bufPool->poolMutex); - if (bufPool->nbBuffers < bufPool->totalBuffers) { - bufPool->buffers[bufPool->nbBuffers++] = buf; /* stored for later use */ - DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u", - (U32)buf.capacity, (U32)(bufPool->nbBuffers-1)); - ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); - return; - } - ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); - /* Reached bufferPool capacity (note: should not happen) */ - DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing "); - ZSTD_customFree(buf.start, bufPool->cMem); -} - -/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released. - * The 3 additional buffers are as follows: - * 1 buffer for input loading - * 1 buffer for "next input" when submitting current one - * 1 buffer stuck in queue */ -#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3) - -/* After a worker releases its rawSeqStore, it is immediately ready for reuse. - * So we only need one seq buffer per worker. */ -#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) (nbWorkers) - -/* ===== Seq Pool Wrapper ====== */ - -typedef ZSTDMT_bufferPool ZSTDMT_seqPool; - -static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool) -{ - return ZSTDMT_sizeof_bufferPool(seqPool); -} - -static rawSeqStore_t bufferToSeq(buffer_t buffer) -{ - rawSeqStore_t seq = kNullRawSeqStore; - seq.seq = (rawSeq*)buffer.start; - seq.capacity = buffer.capacity / sizeof(rawSeq); - return seq; -} - -static buffer_t seqToBuffer(rawSeqStore_t seq) -{ - buffer_t buffer; - buffer.start = seq.seq; - buffer.capacity = seq.capacity * sizeof(rawSeq); - return buffer; -} - -static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) -{ - if (seqPool->bufferSize == 0) { - return kNullRawSeqStore; - } - return bufferToSeq(ZSTDMT_getBuffer(seqPool)); -} - -#if ZSTD_RESIZE_SEQPOOL -static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) -{ - return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq))); -} -#endif - -static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) -{ - ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); -} - -static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq) -{ - ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq)); -} - -static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem) -{ - ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem); - if (seqPool == NULL) return NULL; - ZSTDMT_setNbSeq(seqPool, 0); - return seqPool; -} - -static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool) -{ - ZSTDMT_freeBufferPool(seqPool); -} - -static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers) -{ - return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers)); -} - - -/* ===== CCtx Pool ===== */ -/* a single CCtx Pool can be invoked from multiple threads in parallel */ - -typedef struct { - ZSTD_pthread_mutex_t poolMutex; - int totalCCtx; - int availCCtx; - ZSTD_customMem cMem; - ZSTD_CCtx** cctxs; -} ZSTDMT_CCtxPool; - -/* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */ -static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) -{ - if (!pool) return; - ZSTD_pthread_mutex_destroy(&pool->poolMutex); - if (pool->cctxs) { - int cid; - for (cid=0; cidtotalCCtx; cid++) - ZSTD_freeCCtx(pool->cctxs[cid]); /* free compatible with NULL */ - ZSTD_customFree(pool->cctxs, pool->cMem); - } - ZSTD_customFree(pool, pool->cMem); -} - -/* ZSTDMT_createCCtxPool() : - * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ -static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, - ZSTD_customMem cMem) -{ - ZSTDMT_CCtxPool* const cctxPool = - (ZSTDMT_CCtxPool*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtxPool), cMem); - assert(nbWorkers > 0); - if (!cctxPool) return NULL; - if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) { - ZSTD_customFree(cctxPool, cMem); - return NULL; - } - cctxPool->totalCCtx = nbWorkers; - cctxPool->cctxs = (ZSTD_CCtx**)ZSTD_customCalloc(nbWorkers * sizeof(ZSTD_CCtx*), cMem); - if (!cctxPool->cctxs) { - ZSTDMT_freeCCtxPool(cctxPool); - return NULL; - } - cctxPool->cMem = cMem; - cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem); - if (!cctxPool->cctxs[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } - cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ - DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers); - return cctxPool; -} - -static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool, - int nbWorkers) -{ - if (srcPool==NULL) return NULL; - if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */ - /* need a larger cctx pool */ - { ZSTD_customMem const cMem = srcPool->cMem; - ZSTDMT_freeCCtxPool(srcPool); - return ZSTDMT_createCCtxPool(nbWorkers, cMem); - } -} - -/* only works during initialization phase, not during compression */ -static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) -{ - ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); - { unsigned const nbWorkers = cctxPool->totalCCtx; - size_t const poolSize = sizeof(*cctxPool); - size_t const arraySize = cctxPool->totalCCtx * sizeof(ZSTD_CCtx*); - size_t totalCCtxSize = 0; - unsigned u; - for (u=0; ucctxs[u]); - } - ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); - assert(nbWorkers > 0); - return poolSize + arraySize + totalCCtxSize; - } -} - -static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) -{ - DEBUGLOG(5, "ZSTDMT_getCCtx"); - ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); - if (cctxPool->availCCtx) { - cctxPool->availCCtx--; - { ZSTD_CCtx* const cctx = cctxPool->cctxs[cctxPool->availCCtx]; - ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); - return cctx; - } } - ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); - DEBUGLOG(5, "create one more CCtx"); - return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */ -} - -static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) -{ - if (cctx==NULL) return; /* compatibility with release on NULL */ - ZSTD_pthread_mutex_lock(&pool->poolMutex); - if (pool->availCCtx < pool->totalCCtx) - pool->cctxs[pool->availCCtx++] = cctx; - else { - /* pool overflow : should not happen, since totalCCtx==nbWorkers */ - DEBUGLOG(4, "CCtx pool overflow : free cctx"); - ZSTD_freeCCtx(cctx); - } - ZSTD_pthread_mutex_unlock(&pool->poolMutex); -} - -/* ==== Serial State ==== */ - -typedef struct { - void const* start; - size_t size; -} range_t; - -typedef struct { - /* All variables in the struct are protected by mutex. */ - ZSTD_pthread_mutex_t mutex; - ZSTD_pthread_cond_t cond; - ZSTD_CCtx_params params; - ldmState_t ldmState; - XXH64_state_t xxhState; - unsigned nextJobID; - /* Protects ldmWindow. - * Must be acquired after the main mutex when acquiring both. - */ - ZSTD_pthread_mutex_t ldmWindowMutex; - ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */ - ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */ -} serialState_t; - -static int -ZSTDMT_serialState_reset(serialState_t* serialState, - ZSTDMT_seqPool* seqPool, - ZSTD_CCtx_params params, - size_t jobSize, - const void* dict, size_t const dictSize, - ZSTD_dictContentType_e dictContentType) -{ - /* Adjust parameters */ - if (params.ldmParams.enableLdm == ZSTD_ps_enable) { - DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10); - ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); - assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); - assert(params.ldmParams.hashRateLog < 32); - } else { - ZSTD_memset(¶ms.ldmParams, 0, sizeof(params.ldmParams)); - } - serialState->nextJobID = 0; - if (params.fParams.checksumFlag) - XXH64_reset(&serialState->xxhState, 0); - if (params.ldmParams.enableLdm == ZSTD_ps_enable) { - ZSTD_customMem cMem = params.customMem; - unsigned const hashLog = params.ldmParams.hashLog; - size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t); - unsigned const bucketLog = - params.ldmParams.hashLog - params.ldmParams.bucketSizeLog; - unsigned const prevBucketLog = - serialState->params.ldmParams.hashLog - - serialState->params.ldmParams.bucketSizeLog; - size_t const numBuckets = (size_t)1 << bucketLog; - /* Size the seq pool tables */ - ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize)); - /* Reset the window */ - ZSTD_window_init(&serialState->ldmState.window); - /* Resize tables and output space if necessary. */ - if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) { - ZSTD_customFree(serialState->ldmState.hashTable, cMem); - serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem); - } - if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) { - ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); - serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem); - } - if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets) - return 1; - /* Zero the tables */ - ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize); - ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets); - - /* Update window state and fill hash table with dict */ - serialState->ldmState.loadedDictEnd = 0; - if (dictSize > 0) { - if (dictContentType == ZSTD_dct_rawContent) { - BYTE const* const dictEnd = (const BYTE*)dict + dictSize; - ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0); - ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, ¶ms.ldmParams); - serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base); - } else { - /* don't even load anything */ - } - } - - /* Initialize serialState's copy of ldmWindow. */ - serialState->ldmWindow = serialState->ldmState.window; - } - - serialState->params = params; - serialState->params.jobSize = (U32)jobSize; - return 0; -} - -static int ZSTDMT_serialState_init(serialState_t* serialState) -{ - int initError = 0; - ZSTD_memset(serialState, 0, sizeof(*serialState)); - initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL); - initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL); - initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL); - initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL); - return initError; -} - -static void ZSTDMT_serialState_free(serialState_t* serialState) -{ - ZSTD_customMem cMem = serialState->params.customMem; - ZSTD_pthread_mutex_destroy(&serialState->mutex); - ZSTD_pthread_cond_destroy(&serialState->cond); - ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex); - ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond); - ZSTD_customFree(serialState->ldmState.hashTable, cMem); - ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); -} - -static void ZSTDMT_serialState_update(serialState_t* serialState, - ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore, - range_t src, unsigned jobID) -{ - /* Wait for our turn */ - ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); - while (serialState->nextJobID < jobID) { - DEBUGLOG(5, "wait for serialState->cond"); - ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex); - } - /* A future job may error and skip our job */ - if (serialState->nextJobID == jobID) { - /* It is now our turn, do any processing necessary */ - if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) { - size_t error; - assert(seqStore.seq != NULL && seqStore.pos == 0 && - seqStore.size == 0 && seqStore.capacity > 0); - assert(src.size <= serialState->params.jobSize); - ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0); - error = ZSTD_ldm_generateSequences( - &serialState->ldmState, &seqStore, - &serialState->params.ldmParams, src.start, src.size); - /* We provide a large enough buffer to never fail. */ - assert(!ZSTD_isError(error)); (void)error; - /* Update ldmWindow to match the ldmState.window and signal the main - * thread if it is waiting for a buffer. - */ - ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); - serialState->ldmWindow = serialState->ldmState.window; - ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); - ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); - } - if (serialState->params.fParams.checksumFlag && src.size > 0) - XXH64_update(&serialState->xxhState, src.start, src.size); - } - /* Now it is the next jobs turn */ - serialState->nextJobID++; - ZSTD_pthread_cond_broadcast(&serialState->cond); - ZSTD_pthread_mutex_unlock(&serialState->mutex); - - if (seqStore.size > 0) { - ZSTD_referenceExternalSequences(jobCCtx, seqStore.seq, seqStore.size); - assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable); - } -} - -static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState, - unsigned jobID, size_t cSize) -{ - ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); - if (serialState->nextJobID <= jobID) { - assert(ZSTD_isError(cSize)); (void)cSize; - DEBUGLOG(5, "Skipping past job %u because of error", jobID); - serialState->nextJobID = jobID + 1; - ZSTD_pthread_cond_broadcast(&serialState->cond); - - ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); - ZSTD_window_clear(&serialState->ldmWindow); - ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); - ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); - } - ZSTD_pthread_mutex_unlock(&serialState->mutex); - -} - - -/* ------------------------------------------ */ -/* ===== Worker thread ===== */ -/* ------------------------------------------ */ - -static const range_t kNullRange = { NULL, 0 }; - -typedef struct { - size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ - size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ - ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ - ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ - ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ - ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ - ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ - serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */ - buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ - range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ - range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */ - unsigned jobID; /* set by mtctx, then read by worker => no barrier */ - unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ - unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ - ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ - const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ - unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ - size_t dstFlushed; /* used only by mtctx */ - unsigned frameChecksumNeeded; /* used only by mtctx */ -} ZSTDMT_jobDescription; - -#define JOB_ERROR(e) \ - do { \ - ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ - job->cSize = e; \ - ZSTD_pthread_mutex_unlock(&job->job_mutex); \ - goto _endJob; \ - } while (0) - -/* ZSTDMT_compressionJob() is a POOL_function type */ -static void ZSTDMT_compressionJob(void* jobDescription) -{ - ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; - ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */ - ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool); - rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); - buffer_t dstBuff = job->dstBuff; - size_t lastCBlockSize = 0; - - /* resources */ - if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation)); - if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */ - dstBuff = ZSTDMT_getBuffer(job->bufPool); - if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation)); - job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */ - } - if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL) - JOB_ERROR(ERROR(memory_allocation)); - - /* Don't compute the checksum for chunks, since we compute it externally, - * but write it in the header. - */ - if (job->jobID != 0) jobParams.fParams.checksumFlag = 0; - /* Don't run LDM for the chunks, since we handle it externally */ - jobParams.ldmParams.enableLdm = ZSTD_ps_disable; - /* Correct nbWorkers to 0. */ - jobParams.nbWorkers = 0; - - - /* init */ - if (job->cdict) { - size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize); - assert(job->firstJob); /* only allowed for first job */ - if (ZSTD_isError(initError)) JOB_ERROR(initError); - } else { /* srcStart points at reloaded section */ - U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size; - { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob); - if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError); - } - if (!job->firstJob) { - size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0); - if (ZSTD_isError(err)) JOB_ERROR(err); - } - { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, - job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */ - ZSTD_dtlm_fast, - NULL, /*cdict*/ - &jobParams, pledgedSrcSize); - if (ZSTD_isError(initError)) JOB_ERROR(initError); - } } - - /* Perform serial step as early as possible, but after CCtx initialization */ - ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID); - - if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */ - size_t const hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); - if (ZSTD_isError(hSize)) JOB_ERROR(hSize); - DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize); - ZSTD_invalidateRepCodes(cctx); - } - - /* compress */ - { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX; - int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize); - const BYTE* ip = (const BYTE*) job->src.start; - BYTE* const ostart = (BYTE*)dstBuff.start; - BYTE* op = ostart; - BYTE* oend = op + dstBuff.capacity; - int chunkNb; - if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */ - DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks); - assert(job->cSize == 0); - for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { - size_t const cSize = ZSTD_compressContinue_public(cctx, op, oend-op, ip, chunkSize); - if (ZSTD_isError(cSize)) JOB_ERROR(cSize); - ip += chunkSize; - op += cSize; assert(op < oend); - /* stats */ - ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); - job->cSize += cSize; - job->consumed = chunkSize * chunkNb; - DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)", - (U32)cSize, (U32)job->cSize); - ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */ - ZSTD_pthread_mutex_unlock(&job->job_mutex); - } - /* last block */ - assert(chunkSize > 0); - assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */ - if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) { - size_t const lastBlockSize1 = job->src.size & (chunkSize-1); - size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1; - size_t const cSize = (job->lastJob) ? - ZSTD_compressEnd_public(cctx, op, oend-op, ip, lastBlockSize) : - ZSTD_compressContinue_public(cctx, op, oend-op, ip, lastBlockSize); - if (ZSTD_isError(cSize)) JOB_ERROR(cSize); - lastCBlockSize = cSize; - } } - if (!job->firstJob) { - /* Double check that we don't have an ext-dict, because then our - * repcode invalidation doesn't work. - */ - assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); - } - ZSTD_CCtx_trace(cctx, 0); - -_endJob: - ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); - if (job->prefix.size > 0) - DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start); - DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start); - /* release resources */ - ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); - ZSTDMT_releaseCCtx(job->cctxPool, cctx); - /* report */ - ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); - if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0); - job->cSize += lastCBlockSize; - job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */ - ZSTD_pthread_cond_signal(&job->job_cond); - ZSTD_pthread_mutex_unlock(&job->job_mutex); -} - - -/* ------------------------------------------ */ -/* ===== Multi-threaded compression ===== */ -/* ------------------------------------------ */ - -typedef struct { - range_t prefix; /* read-only non-owned prefix buffer */ - buffer_t buffer; - size_t filled; -} inBuff_t; - -typedef struct { - BYTE* buffer; /* The round input buffer. All jobs get references - * to pieces of the buffer. ZSTDMT_tryGetInputRange() - * handles handing out job input buffers, and makes - * sure it doesn't overlap with any pieces still in use. - */ - size_t capacity; /* The capacity of buffer. */ - size_t pos; /* The position of the current inBuff in the round - * buffer. Updated past the end if the inBuff once - * the inBuff is sent to the worker thread. - * pos <= capacity. - */ -} roundBuff_t; - -static const roundBuff_t kNullRoundBuff = {NULL, 0, 0}; - -#define RSYNC_LENGTH 32 -/* Don't create chunks smaller than the zstd block size. - * This stops us from regressing compression ratio too much, - * and ensures our output fits in ZSTD_compressBound(). - * - * If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then - * ZSTD_COMPRESSBOUND() will need to be updated. - */ -#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX -#define RSYNC_MIN_BLOCK_SIZE (1< one job is already prepared, but pool has shortage of workers. Don't create a new job. */ - inBuff_t inBuff; - roundBuff_t roundBuff; - serialState_t serial; - rsyncState_t rsync; - unsigned jobIDMask; - unsigned doneJobID; - unsigned nextJobID; - unsigned frameEnded; - unsigned allJobsCompleted; - unsigned long long frameContentSize; - unsigned long long consumed; - unsigned long long produced; - ZSTD_customMem cMem; - ZSTD_CDict* cdictLocal; - const ZSTD_CDict* cdict; - unsigned providedFactory: 1; -}; - -static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem) -{ - U32 jobNb; - if (jobTable == NULL) return; - for (jobNb=0; jobNb mtctx->jobIDMask+1) { /* need more job capacity */ - ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); - mtctx->jobIDMask = 0; - mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem); - if (mtctx->jobs==NULL) return ERROR(memory_allocation); - assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */ - mtctx->jobIDMask = nbJobs - 1; - } - return 0; -} - - -/* ZSTDMT_CCtxParam_setNbWorkers(): - * Internal use only */ -static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers) -{ - return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers); -} - -MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool) -{ - ZSTDMT_CCtx* mtctx; - U32 nbJobs = nbWorkers + 2; - int initError; - DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers); - - if (nbWorkers < 1) return NULL; - nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX); - if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL)) - /* invalid custom allocator */ - return NULL; - - mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem); - if (!mtctx) return NULL; - ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); - mtctx->cMem = cMem; - mtctx->allJobsCompleted = 1; - if (pool != NULL) { - mtctx->factory = pool; - mtctx->providedFactory = 1; - } - else { - mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); - mtctx->providedFactory = 0; - } - mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); - assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */ - mtctx->jobIDMask = nbJobs - 1; - mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem); - mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem); - mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); - initError = ZSTDMT_serialState_init(&mtctx->serial); - mtctx->roundBuff = kNullRoundBuff; - if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) { - ZSTDMT_freeCCtx(mtctx); - return NULL; - } - DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers); - return mtctx; -} - -ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool) -{ -#ifdef ZSTD_MULTITHREAD - return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool); -#else - (void)nbWorkers; - (void)cMem; - (void)pool; - return NULL; -#endif -} - - -/* ZSTDMT_releaseAllJobResources() : - * note : ensure all workers are killed first ! */ -static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) -{ - unsigned jobID; - DEBUGLOG(3, "ZSTDMT_releaseAllJobResources"); - for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { - /* Copy the mutex/cond out */ - ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex; - ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond; - - DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); - ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); - - /* Clear the job description, but keep the mutex/cond */ - ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID])); - mtctx->jobs[jobID].job_mutex = mutex; - mtctx->jobs[jobID].job_cond = cond; - } - mtctx->inBuff.buffer = g_nullBuffer; - mtctx->inBuff.filled = 0; - mtctx->allJobsCompleted = 1; -} - -static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx) -{ - DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted"); - while (mtctx->doneJobID < mtctx->nextJobID) { - unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask; - ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex); - while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { - DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */ - ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); - } - ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); - mtctx->doneJobID++; - } -} - -size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) -{ - if (mtctx==NULL) return 0; /* compatible with free on NULL */ - if (!mtctx->providedFactory) - POOL_free(mtctx->factory); /* stop and free worker threads */ - ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */ - ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); - ZSTDMT_freeBufferPool(mtctx->bufPool); - ZSTDMT_freeCCtxPool(mtctx->cctxPool); - ZSTDMT_freeSeqPool(mtctx->seqPool); - ZSTDMT_serialState_free(&mtctx->serial); - ZSTD_freeCDict(mtctx->cdictLocal); - if (mtctx->roundBuff.buffer) - ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); - ZSTD_customFree(mtctx, mtctx->cMem); - return 0; -} - -size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx) -{ - if (mtctx == NULL) return 0; /* supports sizeof NULL */ - return sizeof(*mtctx) - + POOL_sizeof(mtctx->factory) - + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) - + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription) - + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) - + ZSTDMT_sizeof_seqPool(mtctx->seqPool) - + ZSTD_sizeof_CDict(mtctx->cdictLocal) - + mtctx->roundBuff.capacity; -} - - -/* ZSTDMT_resize() : - * @return : error code if fails, 0 on success */ -static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers) -{ - if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation); - FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , ""); - mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers)); - if (mtctx->bufPool == NULL) return ERROR(memory_allocation); - mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers); - if (mtctx->cctxPool == NULL) return ERROR(memory_allocation); - mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers); - if (mtctx->seqPool == NULL) return ERROR(memory_allocation); - ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); - return 0; -} - - -/*! ZSTDMT_updateCParams_whileCompressing() : - * Updates a selected set of compression parameters, remaining compatible with currently active frame. - * New parameters will be applied to next compression job. */ -void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams) -{ - U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */ - int const compressionLevel = cctxParams->compressionLevel; - DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)", - compressionLevel); - mtctx->params.compressionLevel = compressionLevel; - { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - cParams.windowLog = saved_wlog; - mtctx->params.cParams = cParams; - } -} - -/* ZSTDMT_getFrameProgression(): - * tells how much data has been consumed (input) and produced (output) for current frame. - * able to count progression inside worker threads. - * Note : mutex will be acquired during statistics collection inside workers. */ -ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx) -{ - ZSTD_frameProgression fps; - DEBUGLOG(5, "ZSTDMT_getFrameProgression"); - fps.ingested = mtctx->consumed + mtctx->inBuff.filled; - fps.consumed = mtctx->consumed; - fps.produced = fps.flushed = mtctx->produced; - fps.currentJobID = mtctx->nextJobID; - fps.nbActiveWorkers = 0; - { unsigned jobNb; - unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1); - DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)", - mtctx->doneJobID, lastJobNb, mtctx->jobReady); - for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) { - unsigned const wJobID = jobNb & mtctx->jobIDMask; - ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; - ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); - { size_t const cResult = jobPtr->cSize; - size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; - size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; - assert(flushed <= produced); - fps.ingested += jobPtr->src.size; - fps.consumed += jobPtr->consumed; - fps.produced += produced; - fps.flushed += flushed; - fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size); - } - ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); - } - } - return fps; -} - - -size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx) -{ - size_t toFlush; - unsigned const jobID = mtctx->doneJobID; - assert(jobID <= mtctx->nextJobID); - if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */ - - /* look into oldest non-fully-flushed job */ - { unsigned const wJobID = jobID & mtctx->jobIDMask; - ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID]; - ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); - { size_t const cResult = jobPtr->cSize; - size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; - size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; - assert(flushed <= produced); - assert(jobPtr->consumed <= jobPtr->src.size); - toFlush = produced - flushed; - /* if toFlush==0, nothing is available to flush. - * However, jobID is expected to still be active: - * if jobID was already completed and fully flushed, - * ZSTDMT_flushProduced() should have already moved onto next job. - * Therefore, some input has not yet been consumed. */ - if (toFlush==0) { - assert(jobPtr->consumed < jobPtr->src.size); - } - } - ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); - } - - return toFlush; -} - - -/* ------------------------------------------ */ -/* ===== Multi-threaded compression ===== */ -/* ------------------------------------------ */ - -static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params) -{ - unsigned jobLog; - if (params->ldmParams.enableLdm == ZSTD_ps_enable) { - /* In Long Range Mode, the windowLog is typically oversized. - * In which case, it's preferable to determine the jobSize - * based on cycleLog instead. */ - jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3); - } else { - jobLog = MAX(20, params->cParams.windowLog + 2); - } - return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX); -} - -static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) -{ - switch(strat) - { - case ZSTD_btultra2: - return 9; - case ZSTD_btultra: - case ZSTD_btopt: - return 8; - case ZSTD_btlazy2: - case ZSTD_lazy2: - return 7; - case ZSTD_lazy: - case ZSTD_greedy: - case ZSTD_dfast: - case ZSTD_fast: - default:; - } - return 6; -} - -static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) -{ - assert(0 <= ovlog && ovlog <= 9); - if (ovlog == 0) return ZSTDMT_overlapLog_default(strat); - return ovlog; -} - -static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params) -{ - int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy); - int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog); - assert(0 <= overlapRLog && overlapRLog <= 8); - if (params->ldmParams.enableLdm == ZSTD_ps_enable) { - /* In Long Range Mode, the windowLog is typically oversized. - * In which case, it's preferable to determine the jobSize - * based on chainLog instead. - * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */ - ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) - - overlapRLog; - } - assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX); - DEBUGLOG(4, "overlapLog : %i", params->overlapLog); - DEBUGLOG(4, "overlap size : %i", 1 << ovLog); - return (ovLog==0) ? 0 : (size_t)1 << ovLog; -} - -/* ====================================== */ -/* ======= Streaming API ======= */ -/* ====================================== */ - -size_t ZSTDMT_initCStream_internal( - ZSTDMT_CCtx* mtctx, - const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, - const ZSTD_CDict* cdict, ZSTD_CCtx_params params, - unsigned long long pledgedSrcSize) -{ - DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)", - (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx); - - /* params supposed partially fully validated at this point */ - assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); - assert(!((dict) && (cdict))); /* either dict or cdict, not both */ - - /* init */ - if (params.nbWorkers != mtctx->params.nbWorkers) - FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , ""); - - if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN; - if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX; - - DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers); - - if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */ - ZSTDMT_waitForAllJobsCompleted(mtctx); - ZSTDMT_releaseAllJobResources(mtctx); - mtctx->allJobsCompleted = 1; - } - - mtctx->params = params; - mtctx->frameContentSize = pledgedSrcSize; - if (dict) { - ZSTD_freeCDict(mtctx->cdictLocal); - mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, - ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */ - params.cParams, mtctx->cMem); - mtctx->cdict = mtctx->cdictLocal; - if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); - } else { - ZSTD_freeCDict(mtctx->cdictLocal); - mtctx->cdictLocal = NULL; - mtctx->cdict = cdict; - } - - mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms); - DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10)); - mtctx->targetSectionSize = params.jobSize; - if (mtctx->targetSectionSize == 0) { - mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms); - } - assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX); - - if (params.rsyncable) { - /* Aim for the targetsectionSize as the average job size. */ - U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10); - U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10); - /* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our - * expected job size is at least 4x larger. */ - assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2); - DEBUGLOG(4, "rsyncLog = %u", rsyncBits); - mtctx->rsync.hash = 0; - mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1; - mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH); - } - if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */ - DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize); - DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10)); - ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); - { - /* If ldm is enabled we need windowSize space. */ - size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0; - /* Two buffers of slack, plus extra space for the overlap - * This is the minimum slack that LDM works with. One extra because - * flush might waste up to targetSectionSize-1 bytes. Another extra - * for the overlap (if > 0), then one to fill which doesn't overlap - * with the LDM window. - */ - size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0); - size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers; - /* Compute the total size, and always have enough slack */ - size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1); - size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers; - size_t const capacity = MAX(windowSize, sectionsSize) + slackSize; - if (mtctx->roundBuff.capacity < capacity) { - if (mtctx->roundBuff.buffer) - ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); - mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem); - if (mtctx->roundBuff.buffer == NULL) { - mtctx->roundBuff.capacity = 0; - return ERROR(memory_allocation); - } - mtctx->roundBuff.capacity = capacity; - } - } - DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10)); - mtctx->roundBuff.pos = 0; - mtctx->inBuff.buffer = g_nullBuffer; - mtctx->inBuff.filled = 0; - mtctx->inBuff.prefix = kNullRange; - mtctx->doneJobID = 0; - mtctx->nextJobID = 0; - mtctx->frameEnded = 0; - mtctx->allJobsCompleted = 0; - mtctx->consumed = 0; - mtctx->produced = 0; - if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize, - dict, dictSize, dictContentType)) - return ERROR(memory_allocation); - return 0; -} - - -/* ZSTDMT_writeLastEmptyBlock() - * Write a single empty block with an end-of-frame to finish a frame. - * Job must be created from streaming variant. - * This function is always successful if expected conditions are fulfilled. - */ -static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) -{ - assert(job->lastJob == 1); - assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */ - assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */ - assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */ - job->dstBuff = ZSTDMT_getBuffer(job->bufPool); - if (job->dstBuff.start == NULL) { - job->cSize = ERROR(memory_allocation); - return; - } - assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */ - job->src = kNullRange; - job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); - assert(!ZSTD_isError(job->cSize)); - assert(job->consumed == 0); -} - -static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp) -{ - unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask; - int const endFrame = (endOp == ZSTD_e_end); - - if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) { - DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full"); - assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask)); - return 0; - } - - if (!mtctx->jobReady) { - BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start; - DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ", - mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size); - mtctx->jobs[jobID].src.start = src; - mtctx->jobs[jobID].src.size = srcSize; - assert(mtctx->inBuff.filled >= srcSize); - mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix; - mtctx->jobs[jobID].consumed = 0; - mtctx->jobs[jobID].cSize = 0; - mtctx->jobs[jobID].params = mtctx->params; - mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL; - mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize; - mtctx->jobs[jobID].dstBuff = g_nullBuffer; - mtctx->jobs[jobID].cctxPool = mtctx->cctxPool; - mtctx->jobs[jobID].bufPool = mtctx->bufPool; - mtctx->jobs[jobID].seqPool = mtctx->seqPool; - mtctx->jobs[jobID].serial = &mtctx->serial; - mtctx->jobs[jobID].jobID = mtctx->nextJobID; - mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0); - mtctx->jobs[jobID].lastJob = endFrame; - mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0); - mtctx->jobs[jobID].dstFlushed = 0; - - /* Update the round buffer pos and clear the input buffer to be reset */ - mtctx->roundBuff.pos += srcSize; - mtctx->inBuff.buffer = g_nullBuffer; - mtctx->inBuff.filled = 0; - /* Set the prefix */ - if (!endFrame) { - size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize); - mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; - mtctx->inBuff.prefix.size = newPrefixSize; - } else { /* endFrame==1 => no need for another input buffer */ - mtctx->inBuff.prefix = kNullRange; - mtctx->frameEnded = endFrame; - if (mtctx->nextJobID == 0) { - /* single job exception : checksum is already calculated directly within worker thread */ - mtctx->params.fParams.checksumFlag = 0; - } } - - if ( (srcSize == 0) - && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) { - DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame"); - assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */ - ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID); - mtctx->nextJobID++; - return 0; - } - } - - DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))", - mtctx->nextJobID, - (U32)mtctx->jobs[jobID].src.size, - mtctx->jobs[jobID].lastJob, - mtctx->nextJobID, - jobID); - if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) { - mtctx->nextJobID++; - mtctx->jobReady = 0; - } else { - DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID); - mtctx->jobReady = 1; - } - return 0; -} - - -/*! ZSTDMT_flushProduced() : - * flush whatever data has been produced but not yet flushed in current job. - * move to next job if current one is fully flushed. - * `output` : `pos` will be updated with amount of data flushed . - * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . - * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ -static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end) -{ - unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask; - DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)", - blockToFlush, mtctx->doneJobID, mtctx->nextJobID); - assert(output->size >= output->pos); - - ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); - if ( blockToFlush - && (mtctx->doneJobID < mtctx->nextJobID) ) { - assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); - while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */ - if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) { - DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none", - mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size); - break; - } - DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)", - mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); - ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */ - } } - - /* try to flush something */ - { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */ - size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */ - size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */ - ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); - if (ZSTD_isError(cSize)) { - DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s", - mtctx->doneJobID, ZSTD_getErrorName(cSize)); - ZSTDMT_waitForAllJobsCompleted(mtctx); - ZSTDMT_releaseAllJobResources(mtctx); - return cSize; - } - /* add frame checksum if necessary (can only happen once) */ - assert(srcConsumed <= srcSize); - if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */ - && mtctx->jobs[wJobID].frameChecksumNeeded ) { - U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState); - DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum); - MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum); - cSize += 4; - mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */ - mtctx->jobs[wJobID].frameChecksumNeeded = 0; - } - - if (cSize > 0) { /* compression is ongoing or completed */ - size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos); - DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)", - (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize); - assert(mtctx->doneJobID < mtctx->nextJobID); - assert(cSize >= mtctx->jobs[wJobID].dstFlushed); - assert(mtctx->jobs[wJobID].dstBuff.start != NULL); - if (toFlush > 0) { - ZSTD_memcpy((char*)output->dst + output->pos, - (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, - toFlush); - } - output->pos += toFlush; - mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */ - - if ( (srcConsumed == srcSize) /* job is completed */ - && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */ - DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one", - mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); - ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff); - DEBUGLOG(5, "dstBuffer released"); - mtctx->jobs[wJobID].dstBuff = g_nullBuffer; - mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */ - mtctx->consumed += srcSize; - mtctx->produced += cSize; - mtctx->doneJobID++; - } } - - /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */ - if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed); - if (srcSize > srcConsumed) return 1; /* current job not completely compressed */ - } - if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */ - if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */ - if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */ - mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */ - if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */ - return 0; /* internal buffers fully flushed */ -} - -/** - * Returns the range of data used by the earliest job that is not yet complete. - * If the data of the first job is broken up into two segments, we cover both - * sections. - */ -static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) -{ - unsigned const firstJobID = mtctx->doneJobID; - unsigned const lastJobID = mtctx->nextJobID; - unsigned jobID; - - for (jobID = firstJobID; jobID < lastJobID; ++jobID) { - unsigned const wJobID = jobID & mtctx->jobIDMask; - size_t consumed; - - ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); - consumed = mtctx->jobs[wJobID].consumed; - ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); - - if (consumed < mtctx->jobs[wJobID].src.size) { - range_t range = mtctx->jobs[wJobID].prefix; - if (range.size == 0) { - /* Empty prefix */ - range = mtctx->jobs[wJobID].src; - } - /* Job source in multiple segments not supported yet */ - assert(range.start <= mtctx->jobs[wJobID].src.start); - return range; - } - } - return kNullRange; -} - -/** - * Returns non-zero iff buffer and range overlap. - */ -static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range) -{ - BYTE const* const bufferStart = (BYTE const*)buffer.start; - BYTE const* const rangeStart = (BYTE const*)range.start; - - if (rangeStart == NULL || bufferStart == NULL) - return 0; - - { - BYTE const* const bufferEnd = bufferStart + buffer.capacity; - BYTE const* const rangeEnd = rangeStart + range.size; - - /* Empty ranges cannot overlap */ - if (bufferStart == bufferEnd || rangeStart == rangeEnd) - return 0; - - return bufferStart < rangeEnd && rangeStart < bufferEnd; - } -} - -static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) -{ - range_t extDict; - range_t prefix; - - DEBUGLOG(5, "ZSTDMT_doesOverlapWindow"); - extDict.start = window.dictBase + window.lowLimit; - extDict.size = window.dictLimit - window.lowLimit; - - prefix.start = window.base + window.dictLimit; - prefix.size = window.nextSrc - (window.base + window.dictLimit); - DEBUGLOG(5, "extDict [0x%zx, 0x%zx)", - (size_t)extDict.start, - (size_t)extDict.start + extDict.size); - DEBUGLOG(5, "prefix [0x%zx, 0x%zx)", - (size_t)prefix.start, - (size_t)prefix.start + prefix.size); - - return ZSTDMT_isOverlapped(buffer, extDict) - || ZSTDMT_isOverlapped(buffer, prefix); -} - -static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) -{ - if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) { - ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; - DEBUGLOG(5, "ZSTDMT_waitForLdmComplete"); - DEBUGLOG(5, "source [0x%zx, 0x%zx)", - (size_t)buffer.start, - (size_t)buffer.start + buffer.capacity); - ZSTD_PTHREAD_MUTEX_LOCK(mutex); - while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) { - DEBUGLOG(5, "Waiting for LDM to finish..."); - ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex); - } - DEBUGLOG(6, "Done waiting for LDM to finish"); - ZSTD_pthread_mutex_unlock(mutex); - } -} - -/** - * Attempts to set the inBuff to the next section to fill. - * If any part of the new section is still in use we give up. - * Returns non-zero if the buffer is filled. - */ -static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) -{ - range_t const inUse = ZSTDMT_getInputDataInUse(mtctx); - size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; - size_t const target = mtctx->targetSectionSize; - buffer_t buffer; - - DEBUGLOG(5, "ZSTDMT_tryGetInputRange"); - assert(mtctx->inBuff.buffer.start == NULL); - assert(mtctx->roundBuff.capacity >= target); - - if (spaceLeft < target) { - /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. - * Simply copy the prefix to the beginning in that case. - */ - BYTE* const start = (BYTE*)mtctx->roundBuff.buffer; - size_t const prefixSize = mtctx->inBuff.prefix.size; - - buffer.start = start; - buffer.capacity = prefixSize; - if (ZSTDMT_isOverlapped(buffer, inUse)) { - DEBUGLOG(5, "Waiting for buffer..."); - return 0; - } - ZSTDMT_waitForLdmComplete(mtctx, buffer); - ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize); - mtctx->inBuff.prefix.start = start; - mtctx->roundBuff.pos = prefixSize; - } - buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; - buffer.capacity = target; - - if (ZSTDMT_isOverlapped(buffer, inUse)) { - DEBUGLOG(5, "Waiting for buffer..."); - return 0; - } - assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix)); - - ZSTDMT_waitForLdmComplete(mtctx, buffer); - - DEBUGLOG(5, "Using prefix range [%zx, %zx)", - (size_t)mtctx->inBuff.prefix.start, - (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size); - DEBUGLOG(5, "Using source range [%zx, %zx)", - (size_t)buffer.start, - (size_t)buffer.start + buffer.capacity); - - - mtctx->inBuff.buffer = buffer; - mtctx->inBuff.filled = 0; - assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity); - return 1; -} - -typedef struct { - size_t toLoad; /* The number of bytes to load from the input. */ - int flush; /* Boolean declaring if we must flush because we found a synchronization point. */ -} syncPoint_t; - -/** - * Searches through the input for a synchronization point. If one is found, we - * will instruct the caller to flush, and return the number of bytes to load. - * Otherwise, we will load as many bytes as possible and instruct the caller - * to continue as normal. - */ -static syncPoint_t -findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) -{ - BYTE const* const istart = (BYTE const*)input.src + input.pos; - U64 const primePower = mtctx->rsync.primePower; - U64 const hitMask = mtctx->rsync.hitMask; - - syncPoint_t syncPoint; - U64 hash; - BYTE const* prev; - size_t pos; - - syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled); - syncPoint.flush = 0; - if (!mtctx->params.rsyncable) - /* Rsync is disabled. */ - return syncPoint; - if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE) - /* We don't emit synchronization points if it would produce too small blocks. - * We don't have enough input to find a synchronization point, so don't look. - */ - return syncPoint; - if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH) - /* Not enough to compute the hash. - * We will miss any synchronization points in this RSYNC_LENGTH byte - * window. However, since it depends only in the internal buffers, if the - * state is already synchronized, we will remain synchronized. - * Additionally, the probability that we miss a synchronization point is - * low: RSYNC_LENGTH / targetSectionSize. - */ - return syncPoint; - /* Initialize the loop variables. */ - if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) { - /* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions - * because they can't possibly be a sync point. So we can start - * part way through the input buffer. - */ - pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled; - if (pos >= RSYNC_LENGTH) { - prev = istart + pos - RSYNC_LENGTH; - hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); - } else { - assert(mtctx->inBuff.filled >= RSYNC_LENGTH); - prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; - hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos)); - hash = ZSTD_rollingHash_append(hash, istart, pos); - } - } else { - /* We have enough bytes buffered to initialize the hash, - * and have processed enough bytes to find a sync point. - * Start scanning at the beginning of the input. - */ - assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE); - assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH); - pos = 0; - prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; - hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); - if ((hash & hitMask) == hitMask) { - /* We're already at a sync point so don't load any more until - * we're able to flush this sync point. - * This likely happened because the job table was full so we - * couldn't add our job. - */ - syncPoint.toLoad = 0; - syncPoint.flush = 1; - return syncPoint; - } - } - /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll - * through the input. If we hit a synchronization point, then cut the - * job off, and tell the compressor to flush the job. Otherwise, load - * all the bytes and continue as normal. - * If we go too long without a synchronization point (targetSectionSize) - * then a block will be emitted anyways, but this is okay, since if we - * are already synchronized we will remain synchronized. - */ - assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); - for (; pos < syncPoint.toLoad; ++pos) { - BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; - /* This assert is very expensive, and Debian compiles with asserts enabled. - * So disable it for now. We can get similar coverage by checking it at the - * beginning & end of the loop. - * assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); - */ - hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); - assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE); - if ((hash & hitMask) == hitMask) { - syncPoint.toLoad = pos + 1; - syncPoint.flush = 1; - ++pos; /* for assert */ - break; - } - } - assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); - return syncPoint; -} - -size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx) -{ - size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled; - if (hintInSize==0) hintInSize = mtctx->targetSectionSize; - return hintInSize; -} - -/** ZSTDMT_compressStream_generic() : - * internal use only - exposed to be invoked from zstd_compress.c - * assumption : output and input are valid (pos <= size) - * @return : minimum amount of data remaining to flush, 0 if none */ -size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, - ZSTD_outBuffer* output, - ZSTD_inBuffer* input, - ZSTD_EndDirective endOp) -{ - unsigned forwardInputProgress = 0; - DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)", - (U32)endOp, (U32)(input->size - input->pos)); - assert(output->pos <= output->size); - assert(input->pos <= input->size); - - if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) { - /* current frame being ended. Only flush/end are allowed */ - return ERROR(stage_wrong); - } - - /* fill input buffer */ - if ( (!mtctx->jobReady) - && (input->size > input->pos) ) { /* support NULL input */ - if (mtctx->inBuff.buffer.start == NULL) { - assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */ - if (!ZSTDMT_tryGetInputRange(mtctx)) { - /* It is only possible for this operation to fail if there are - * still compression jobs ongoing. - */ - DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed"); - assert(mtctx->doneJobID != mtctx->nextJobID); - } else - DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start); - } - if (mtctx->inBuff.buffer.start != NULL) { - syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input); - if (syncPoint.flush && endOp == ZSTD_e_continue) { - endOp = ZSTD_e_flush; - } - assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); - DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u", - (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize); - ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad); - input->pos += syncPoint.toLoad; - mtctx->inBuff.filled += syncPoint.toLoad; - forwardInputProgress = syncPoint.toLoad>0; - } - } - if ((input->pos < input->size) && (endOp == ZSTD_e_end)) { - /* Can't end yet because the input is not fully consumed. - * We are in one of these cases: - * - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job. - * - We filled the input buffer: flush this job but don't end the frame. - * - We hit a synchronization point: flush this job but don't end the frame. - */ - assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable); - endOp = ZSTD_e_flush; - } - - if ( (mtctx->jobReady) - || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */ - || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */ - || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */ - size_t const jobSize = mtctx->inBuff.filled; - assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); - FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , ""); - } - - /* check for potential compressed data ready to be flushed */ - { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */ - if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */ - DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush); - return remainingToFlush; - } -} diff --git a/zstandard_android/src/compress/zstdmt_compress.h b/zstandard_android/src/compress/zstdmt_compress.h deleted file mode 100644 index ed4dc0e..0000000 --- a/zstandard_android/src/compress/zstdmt_compress.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - #ifndef ZSTDMT_COMPRESS_H - #define ZSTDMT_COMPRESS_H - - #if defined (__cplusplus) - extern "C" { - #endif - - -/* Note : This is an internal API. - * These APIs used to be exposed with ZSTDLIB_API, - * because it used to be the only way to invoke MT compression. - * Now, you must use ZSTD_compress2 and ZSTD_compressStream2() instead. - * - * This API requires ZSTD_MULTITHREAD to be defined during compilation, - * otherwise ZSTDMT_createCCtx*() will fail. - */ - -/* === Dependencies === */ -#include "../common/zstd_deps.h" /* size_t */ -#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ -#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ - - -/* === Constants === */ -#ifndef ZSTDMT_NBWORKERS_MAX /* a different value can be selected at compile time */ -# define ZSTDMT_NBWORKERS_MAX ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256) -#endif -#ifndef ZSTDMT_JOBSIZE_MIN /* a different value can be selected at compile time */ -# define ZSTDMT_JOBSIZE_MIN (512 KB) -#endif -#define ZSTDMT_JOBLOG_MAX (MEM_32bits() ? 29 : 30) -#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB)) - - -/* ======================================================== - * === Private interface, for use by ZSTD_compress.c === - * === Not exposed in libzstd. Never invoke directly === - * ======================================================== */ - -/* === Memory management === */ -typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx; -/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ -ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, - ZSTD_customMem cMem, - ZSTD_threadPool *pool); -size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx); - -size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx); - -/* === Streaming functions === */ - -size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx); - -/*! ZSTDMT_initCStream_internal() : - * Private use only. Init streaming operation. - * expects params to be valid. - * must receive dict, or cdict, or none, but not both. - * mtctx can be freshly constructed or reused from a prior compression. - * If mtctx is reused, memory allocations from the prior compression may not be freed, - * even if they are not needed for the current compression. - * @return : 0, or an error code */ -size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* mtctx, - const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, - const ZSTD_CDict* cdict, - ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); - -/*! ZSTDMT_compressStream_generic() : - * Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream() - * depending on flush directive. - * @return : minimum amount of data still to be flushed - * 0 if fully flushed - * or an error code - * note : needs to be init using any ZSTD_initCStream*() variant */ -size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, - ZSTD_outBuffer* output, - ZSTD_inBuffer* input, - ZSTD_EndDirective endOp); - - /*! ZSTDMT_toFlushNow() - * Tell how many bytes are ready to be flushed immediately. - * Probe the oldest active job (not yet entirely flushed) and check its output buffer. - * If return 0, it means there is no active job, - * or, it means oldest job is still active, but everything produced has been flushed so far, - * therefore flushing is limited by speed of oldest job. */ -size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx); - -/*! ZSTDMT_updateCParams_whileCompressing() : - * Updates only a selected set of compression parameters, to remain compatible with current frame. - * New parameters will be applied to next compression job. */ -void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams); - -/*! ZSTDMT_getFrameProgression(): - * tells how much data has been consumed (input) and produced (output) for current frame. - * able to count progression inside worker threads. - */ -ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx); - - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTDMT_COMPRESS_H */ diff --git a/zstandard_android/src/decompress/huf_decompress.c b/zstandard_android/src/decompress/huf_decompress.c deleted file mode 100644 index f85dd0b..0000000 --- a/zstandard_android/src/decompress/huf_decompress.c +++ /dev/null @@ -1,1944 +0,0 @@ -/* ****************************************************************** - * huff0 huffman decoder, - * part of Finite State Entropy library - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * You can contact the author at : - * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. -****************************************************************** */ - -/* ************************************************************** -* Dependencies -****************************************************************/ -#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */ -#include "../common/compiler.h" -#include "../common/bitstream.h" /* BIT_* */ -#include "../common/fse.h" /* to compress headers */ -#include "../common/huf.h" -#include "../common/error_private.h" -#include "../common/zstd_internal.h" -#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_countTrailingZeros64 */ - -/* ************************************************************** -* Constants -****************************************************************/ - -#define HUF_DECODER_FAST_TABLELOG 11 - -/* ************************************************************** -* Macros -****************************************************************/ - -#ifdef HUF_DISABLE_FAST_DECODE -# define HUF_ENABLE_FAST_DECODE 0 -#else -# define HUF_ENABLE_FAST_DECODE 1 -#endif - -/* These two optional macros force the use one way or another of the two - * Huffman decompression implementations. You can't force in both directions - * at the same time. - */ -#if defined(HUF_FORCE_DECOMPRESS_X1) && \ - defined(HUF_FORCE_DECOMPRESS_X2) -#error "Cannot force the use of the X1 and X2 decoders at the same time!" -#endif - -/* When DYNAMIC_BMI2 is enabled, fast decoders are only called when bmi2 is - * supported at runtime, so we can add the BMI2 target attribute. - * When it is disabled, we will still get BMI2 if it is enabled statically. - */ -#if DYNAMIC_BMI2 -# define HUF_FAST_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE -#else -# define HUF_FAST_BMI2_ATTRS -#endif - -#ifdef __cplusplus -# define HUF_EXTERN_C extern "C" -#else -# define HUF_EXTERN_C -#endif -#define HUF_ASM_DECL HUF_EXTERN_C - -#if DYNAMIC_BMI2 -# define HUF_NEED_BMI2_FUNCTION 1 -#else -# define HUF_NEED_BMI2_FUNCTION 0 -#endif - -/* ************************************************************** -* Error Management -****************************************************************/ -#define HUF_isError ERR_isError - - -/* ************************************************************** -* Byte alignment for workSpace management -****************************************************************/ -#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1) -#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) - - -/* ************************************************************** -* BMI2 Variant Wrappers -****************************************************************/ -typedef size_t (*HUF_DecompressUsingDTableFn)(void *dst, size_t dstSize, - const void *cSrc, - size_t cSrcSize, - const HUF_DTable *DTable); - -#if DYNAMIC_BMI2 - -#define HUF_DGEN(fn) \ - \ - static size_t fn##_default( \ - void* dst, size_t dstSize, \ - const void* cSrc, size_t cSrcSize, \ - const HUF_DTable* DTable) \ - { \ - return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ - } \ - \ - static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \ - void* dst, size_t dstSize, \ - const void* cSrc, size_t cSrcSize, \ - const HUF_DTable* DTable) \ - { \ - return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ - } \ - \ - static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int flags) \ - { \ - if (flags & HUF_flags_bmi2) { \ - return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ - } \ - return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ - } - -#else - -#define HUF_DGEN(fn) \ - static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int flags) \ - { \ - (void)flags; \ - return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ - } - -#endif - - -/*-***************************/ -/* generic DTableDesc */ -/*-***************************/ -typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; - -static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) -{ - DTableDesc dtd; - ZSTD_memcpy(&dtd, table, sizeof(dtd)); - return dtd; -} - -static size_t HUF_initFastDStream(BYTE const* ip) { - BYTE const lastByte = ip[7]; - size_t const bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; - size_t const value = MEM_readLEST(ip) | 1; - assert(bitsConsumed <= 8); - assert(sizeof(size_t) == 8); - return value << bitsConsumed; -} - - -/** - * The input/output arguments to the Huffman fast decoding loop: - * - * ip [in/out] - The input pointers, must be updated to reflect what is consumed. - * op [in/out] - The output pointers, must be updated to reflect what is written. - * bits [in/out] - The bitstream containers, must be updated to reflect the current state. - * dt [in] - The decoding table. - * ilowest [in] - The beginning of the valid range of the input. Decoders may read - * down to this pointer. It may be below iend[0]. - * oend [in] - The end of the output stream. op[3] must not cross oend. - * iend [in] - The end of each input stream. ip[i] may cross iend[i], - * as long as it is above ilowest, but that indicates corruption. - */ -typedef struct { - BYTE const* ip[4]; - BYTE* op[4]; - U64 bits[4]; - void const* dt; - BYTE const* ilowest; - BYTE* oend; - BYTE const* iend[4]; -} HUF_DecompressFastArgs; - -typedef void (*HUF_DecompressFastLoopFn)(HUF_DecompressFastArgs*); - -/** - * Initializes args for the fast decoding loop. - * @returns 1 on success - * 0 if the fallback implementation should be used. - * Or an error code on failure. - */ -static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) -{ - void const* dt = DTable + 1; - U32 const dtLog = HUF_getDTableDesc(DTable).tableLog; - - const BYTE* const istart = (const BYTE*)src; - - BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize); - - /* The fast decoding loop assumes 64-bit little-endian. - * This condition is false on x32. - */ - if (!MEM_isLittleEndian() || MEM_32bits()) - return 0; - - /* Avoid nullptr addition */ - if (dstSize == 0) - return 0; - assert(dst != NULL); - - /* strict minimum : jump table + 1 byte per stream */ - if (srcSize < 10) - return ERROR(corruption_detected); - - /* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers. - * If table log is not correct at this point, fallback to the old decoder. - * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder. - */ - if (dtLog != HUF_DECODER_FAST_TABLELOG) - return 0; - - /* Read the jump table. */ - { - size_t const length1 = MEM_readLE16(istart); - size_t const length2 = MEM_readLE16(istart+2); - size_t const length3 = MEM_readLE16(istart+4); - size_t const length4 = srcSize - (length1 + length2 + length3 + 6); - args->iend[0] = istart + 6; /* jumpTable */ - args->iend[1] = args->iend[0] + length1; - args->iend[2] = args->iend[1] + length2; - args->iend[3] = args->iend[2] + length3; - - /* HUF_initFastDStream() requires this, and this small of an input - * won't benefit from the ASM loop anyways. - */ - if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8) - return 0; - if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */ - } - /* ip[] contains the position that is currently loaded into bits[]. */ - args->ip[0] = args->iend[1] - sizeof(U64); - args->ip[1] = args->iend[2] - sizeof(U64); - args->ip[2] = args->iend[3] - sizeof(U64); - args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64); - - /* op[] contains the output pointers. */ - args->op[0] = (BYTE*)dst; - args->op[1] = args->op[0] + (dstSize+3)/4; - args->op[2] = args->op[1] + (dstSize+3)/4; - args->op[3] = args->op[2] + (dstSize+3)/4; - - /* No point to call the ASM loop for tiny outputs. */ - if (args->op[3] >= oend) - return 0; - - /* bits[] is the bit container. - * It is read from the MSB down to the LSB. - * It is shifted left as it is read, and zeros are - * shifted in. After the lowest valid bit a 1 is - * set, so that CountTrailingZeros(bits[]) can be used - * to count how many bits we've consumed. - */ - args->bits[0] = HUF_initFastDStream(args->ip[0]); - args->bits[1] = HUF_initFastDStream(args->ip[1]); - args->bits[2] = HUF_initFastDStream(args->ip[2]); - args->bits[3] = HUF_initFastDStream(args->ip[3]); - - /* The decoders must be sure to never read beyond ilowest. - * This is lower than iend[0], but allowing decoders to read - * down to ilowest can allow an extra iteration or two in the - * fast loop. - */ - args->ilowest = istart; - - args->oend = oend; - args->dt = dt; - - return 1; -} - -static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs const* args, int stream, BYTE* segmentEnd) -{ - /* Validate that we haven't overwritten. */ - if (args->op[stream] > segmentEnd) - return ERROR(corruption_detected); - /* Validate that we haven't read beyond iend[]. - * Note that ip[] may be < iend[] because the MSB is - * the next bit to read, and we may have consumed 100% - * of the stream, so down to iend[i] - 8 is valid. - */ - if (args->ip[stream] < args->iend[stream] - 8) - return ERROR(corruption_detected); - - /* Construct the BIT_DStream_t. */ - assert(sizeof(size_t) == 8); - bit->bitContainer = MEM_readLEST(args->ip[stream]); - bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]); - bit->start = (const char*)args->ilowest; - bit->limitPtr = bit->start + sizeof(size_t); - bit->ptr = (const char*)args->ip[stream]; - - return 0; -} - -/* Calls X(N) for each stream 0, 1, 2, 3. */ -#define HUF_4X_FOR_EACH_STREAM(X) \ - do { \ - X(0); \ - X(1); \ - X(2); \ - X(3); \ - } while (0) - -/* Calls X(N, var) for each stream 0, 1, 2, 3. */ -#define HUF_4X_FOR_EACH_STREAM_WITH_VAR(X, var) \ - do { \ - X(0, (var)); \ - X(1, (var)); \ - X(2, (var)); \ - X(3, (var)); \ - } while (0) - - -#ifndef HUF_FORCE_DECOMPRESS_X2 - -/*-***************************/ -/* single-symbol decoding */ -/*-***************************/ -typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */ - -/** - * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at - * a time. - */ -static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) { - U64 D4; - if (MEM_isLittleEndian()) { - D4 = (U64)((symbol << 8) + nbBits); - } else { - D4 = (U64)(symbol + (nbBits << 8)); - } - assert(D4 < (1U << 16)); - D4 *= 0x0001000100010001ULL; - return D4; -} - -/** - * Increase the tableLog to targetTableLog and rescales the stats. - * If tableLog > targetTableLog this is a no-op. - * @returns New tableLog - */ -static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog) -{ - if (tableLog > targetTableLog) - return tableLog; - if (tableLog < targetTableLog) { - U32 const scale = targetTableLog - tableLog; - U32 s; - /* Increase the weight for all non-zero probability symbols by scale. */ - for (s = 0; s < nbSymbols; ++s) { - huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale); - } - /* Update rankVal to reflect the new weights. - * All weights except 0 get moved to weight + scale. - * Weights [1, scale] are empty. - */ - for (s = targetTableLog; s > scale; --s) { - rankVal[s] = rankVal[s - scale]; - } - for (s = scale; s > 0; --s) { - rankVal[s] = 0; - } - } - return targetTableLog; -} - -typedef struct { - U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; - U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1]; - U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; - BYTE symbols[HUF_SYMBOLVALUE_MAX + 1]; - BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; -} HUF_ReadDTableX1_Workspace; - -size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags) -{ - U32 tableLog = 0; - U32 nbSymbols = 0; - size_t iSize; - void* const dtPtr = DTable + 1; - HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr; - HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace; - - DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp)); - if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge); - - DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); - /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ - - iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), flags); - if (HUF_isError(iSize)) return iSize; - - - /* Table header */ - { DTableDesc dtd = HUF_getDTableDesc(DTable); - U32 const maxTableLog = dtd.maxTableLog + 1; - U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG); - tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog); - if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ - dtd.tableType = 0; - dtd.tableLog = (BYTE)tableLog; - ZSTD_memcpy(DTable, &dtd, sizeof(dtd)); - } - - /* Compute symbols and rankStart given rankVal: - * - * rankVal already contains the number of values of each weight. - * - * symbols contains the symbols ordered by weight. First are the rankVal[0] - * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on. - * symbols[0] is filled (but unused) to avoid a branch. - * - * rankStart contains the offset where each rank belongs in the DTable. - * rankStart[0] is not filled because there are no entries in the table for - * weight 0. - */ - { int n; - U32 nextRankStart = 0; - int const unroll = 4; - int const nLimit = (int)nbSymbols - unroll + 1; - for (n=0; n<(int)tableLog+1; n++) { - U32 const curr = nextRankStart; - nextRankStart += wksp->rankVal[n]; - wksp->rankStart[n] = curr; - } - for (n=0; n < nLimit; n += unroll) { - int u; - for (u=0; u < unroll; ++u) { - size_t const w = wksp->huffWeight[n+u]; - wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u); - } - } - for (; n < (int)nbSymbols; ++n) { - size_t const w = wksp->huffWeight[n]; - wksp->symbols[wksp->rankStart[w]++] = (BYTE)n; - } - } - - /* fill DTable - * We fill all entries of each weight in order. - * That way length is a constant for each iteration of the outer loop. - * We can switch based on the length to a different inner loop which is - * optimized for that particular case. - */ - { U32 w; - int symbol = wksp->rankVal[0]; - int rankStart = 0; - for (w=1; wrankVal[w]; - int const length = (1 << w) >> 1; - int uStart = rankStart; - BYTE const nbBits = (BYTE)(tableLog + 1 - w); - int s; - int u; - switch (length) { - case 1: - for (s=0; ssymbols[symbol + s]; - D.nbBits = nbBits; - dt[uStart] = D; - uStart += 1; - } - break; - case 2: - for (s=0; ssymbols[symbol + s]; - D.nbBits = nbBits; - dt[uStart+0] = D; - dt[uStart+1] = D; - uStart += 2; - } - break; - case 4: - for (s=0; ssymbols[symbol + s], nbBits); - MEM_write64(dt + uStart, D4); - uStart += 4; - } - break; - case 8: - for (s=0; ssymbols[symbol + s], nbBits); - MEM_write64(dt + uStart, D4); - MEM_write64(dt + uStart + 4, D4); - uStart += 8; - } - break; - default: - for (s=0; ssymbols[symbol + s], nbBits); - for (u=0; u < length; u += 16) { - MEM_write64(dt + uStart + u + 0, D4); - MEM_write64(dt + uStart + u + 4, D4); - MEM_write64(dt + uStart + u + 8, D4); - MEM_write64(dt + uStart + u + 12, D4); - } - assert(u == length); - uStart += length; - } - break; - } - symbol += symbolCount; - rankStart += symbolCount * length; - } - } - return iSize; -} - -FORCE_INLINE_TEMPLATE BYTE -HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog) -{ - size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ - BYTE const c = dt[val].byte; - BIT_skipBits(Dstream, dt[val].nbBits); - return c; -} - -#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \ - do { *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog); } while (0) - -#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \ - do { \ - if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ - HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \ - } while (0) - -#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \ - do { \ - if (MEM_64bits()) \ - HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \ - } while (0) - -HINT_INLINE size_t -HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog) -{ - BYTE* const pStart = p; - - /* up to 4 symbols at a time */ - if ((pEnd - p) > 3) { - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_1(p, bitDPtr); - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_0(p, bitDPtr); - } - } else { - BIT_reloadDStream(bitDPtr); - } - - /* [0-3] symbols remaining */ - if (MEM_32bits()) - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd)) - HUF_DECODE_SYMBOLX1_0(p, bitDPtr); - - /* no more data to retrieve from bitstream, no need to reload */ - while (p < pEnd) - HUF_DECODE_SYMBOLX1_0(p, bitDPtr); - - return (size_t)(pEnd-pStart); -} - -FORCE_INLINE_TEMPLATE size_t -HUF_decompress1X1_usingDTable_internal_body( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - BYTE* op = (BYTE*)dst; - BYTE* const oend = ZSTD_maybeNullPtrAdd(op, dstSize); - const void* dtPtr = DTable + 1; - const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; - BIT_DStream_t bitD; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - U32 const dtLog = dtd.tableLog; - - CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); - - HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog); - - if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); - - return dstSize; -} - -/* HUF_decompress4X1_usingDTable_internal_body(): - * Conditions : - * @dstSize >= 6 - */ -FORCE_INLINE_TEMPLATE size_t -HUF_decompress4X1_usingDTable_internal_body( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - /* Check */ - if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ - if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ - - { const BYTE* const istart = (const BYTE*) cSrc; - BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ostart + dstSize; - BYTE* const olimit = oend - 3; - const void* const dtPtr = DTable + 1; - const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; - - /* Init */ - BIT_DStream_t bitD1; - BIT_DStream_t bitD2; - BIT_DStream_t bitD3; - BIT_DStream_t bitD4; - size_t const length1 = MEM_readLE16(istart); - size_t const length2 = MEM_readLE16(istart+2); - size_t const length3 = MEM_readLE16(istart+4); - size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); - const BYTE* const istart1 = istart + 6; /* jumpTable */ - const BYTE* const istart2 = istart1 + length1; - const BYTE* const istart3 = istart2 + length2; - const BYTE* const istart4 = istart3 + length3; - const size_t segmentSize = (dstSize+3) / 4; - BYTE* const opStart2 = ostart + segmentSize; - BYTE* const opStart3 = opStart2 + segmentSize; - BYTE* const opStart4 = opStart3 + segmentSize; - BYTE* op1 = ostart; - BYTE* op2 = opStart2; - BYTE* op3 = opStart3; - BYTE* op4 = opStart4; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - U32 const dtLog = dtd.tableLog; - U32 endSignal = 1; - - if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ - if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ - assert(dstSize >= 6); /* validated above */ - CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); - CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); - CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); - CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); - - /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */ - if ((size_t)(oend - op4) >= sizeof(size_t)) { - for ( ; (endSignal) & (op4 < olimit) ; ) { - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_1(op1, &bitD1); - HUF_DECODE_SYMBOLX1_1(op2, &bitD2); - HUF_DECODE_SYMBOLX1_1(op3, &bitD3); - HUF_DECODE_SYMBOLX1_1(op4, &bitD4); - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_0(op1, &bitD1); - HUF_DECODE_SYMBOLX1_0(op2, &bitD2); - HUF_DECODE_SYMBOLX1_0(op3, &bitD3); - HUF_DECODE_SYMBOLX1_0(op4, &bitD4); - endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; - } - } - - /* check corruption */ - /* note : should not be necessary : op# advance in lock step, and we control op4. - * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */ - if (op1 > opStart2) return ERROR(corruption_detected); - if (op2 > opStart3) return ERROR(corruption_detected); - if (op3 > opStart4) return ERROR(corruption_detected); - /* note : op4 supposed already verified within main loop */ - - /* finish bitStreams one by one */ - HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog); - - /* check */ - { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); - if (!endCheck) return ERROR(corruption_detected); } - - /* decoded size */ - return dstSize; - } -} - -#if HUF_NEED_BMI2_FUNCTION -static BMI2_TARGET_ATTRIBUTE -size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable) { - return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); -} -#endif - -static -size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable) { - return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); -} - -#if ZSTD_ENABLE_ASM_X86_64_BMI2 - -HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; - -#endif - -static HUF_FAST_BMI2_ATTRS -void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) -{ - U64 bits[4]; - BYTE const* ip[4]; - BYTE* op[4]; - U16 const* const dtable = (U16 const*)args->dt; - BYTE* const oend = args->oend; - BYTE const* const ilowest = args->ilowest; - - /* Copy the arguments to local variables */ - ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); - ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip)); - ZSTD_memcpy(&op, &args->op, sizeof(op)); - - assert(MEM_isLittleEndian()); - assert(!MEM_32bits()); - - for (;;) { - BYTE* olimit; - int stream; - - /* Assert loop preconditions */ -#ifndef NDEBUG - for (stream = 0; stream < 4; ++stream) { - assert(op[stream] <= (stream == 3 ? oend : op[stream + 1])); - assert(ip[stream] >= ilowest); - } -#endif - /* Compute olimit */ - { - /* Each iteration produces 5 output symbols per stream */ - size_t const oiters = (size_t)(oend - op[3]) / 5; - /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes - * per stream. - */ - size_t const iiters = (size_t)(ip[0] - ilowest) / 7; - /* We can safely run iters iterations before running bounds checks */ - size_t const iters = MIN(oiters, iiters); - size_t const symbols = iters * 5; - - /* We can simply check that op[3] < olimit, instead of checking all - * of our bounds, since we can't hit the other bounds until we've run - * iters iterations, which only happens when op[3] == olimit. - */ - olimit = op[3] + symbols; - - /* Exit fast decoding loop once we reach the end. */ - if (op[3] == olimit) - break; - - /* Exit the decoding loop if any input pointer has crossed the - * previous one. This indicates corruption, and a precondition - * to our loop is that ip[i] >= ip[0]. - */ - for (stream = 1; stream < 4; ++stream) { - if (ip[stream] < ip[stream - 1]) - goto _out; - } - } - -#ifndef NDEBUG - for (stream = 1; stream < 4; ++stream) { - assert(ip[stream] >= ip[stream - 1]); - } -#endif - -#define HUF_4X1_DECODE_SYMBOL(_stream, _symbol) \ - do { \ - int const index = (int)(bits[(_stream)] >> 53); \ - int const entry = (int)dtable[index]; \ - bits[(_stream)] <<= (entry & 0x3F); \ - op[(_stream)][(_symbol)] = (BYTE)((entry >> 8) & 0xFF); \ - } while (0) - -#define HUF_4X1_RELOAD_STREAM(_stream) \ - do { \ - int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \ - int const nbBits = ctz & 7; \ - int const nbBytes = ctz >> 3; \ - op[(_stream)] += 5; \ - ip[(_stream)] -= nbBytes; \ - bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \ - bits[(_stream)] <<= nbBits; \ - } while (0) - - /* Manually unroll the loop because compilers don't consistently - * unroll the inner loops, which destroys performance. - */ - do { - /* Decode 5 symbols in each of the 4 streams */ - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 0); - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 1); - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 2); - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 3); - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 4); - - /* Reload each of the 4 the bitstreams */ - HUF_4X_FOR_EACH_STREAM(HUF_4X1_RELOAD_STREAM); - } while (op[3] < olimit); - -#undef HUF_4X1_DECODE_SYMBOL -#undef HUF_4X1_RELOAD_STREAM - } - -_out: - - /* Save the final values of each of the state variables back to args. */ - ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); - ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip)); - ZSTD_memcpy(&args->op, &op, sizeof(op)); -} - -/** - * @returns @p dstSize on success (>= 6) - * 0 if the fallback implementation should be used - * An error if an error occurred - */ -static HUF_FAST_BMI2_ATTRS -size_t -HUF_decompress4X1_usingDTable_internal_fast( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable, - HUF_DecompressFastLoopFn loopFn) -{ - void const* dt = DTable + 1; - BYTE const* const ilowest = (BYTE const*)cSrc; - BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize); - HUF_DecompressFastArgs args; - { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); - FORWARD_IF_ERROR(ret, "Failed to init fast loop args"); - if (ret == 0) - return 0; - } - - assert(args.ip[0] >= args.ilowest); - loopFn(&args); - - /* Our loop guarantees that ip[] >= ilowest and that we haven't - * overwritten any op[]. - */ - assert(args.ip[0] >= ilowest); - assert(args.ip[0] >= ilowest); - assert(args.ip[1] >= ilowest); - assert(args.ip[2] >= ilowest); - assert(args.ip[3] >= ilowest); - assert(args.op[3] <= oend); - - assert(ilowest == args.ilowest); - assert(ilowest + 6 == args.iend[0]); - (void)ilowest; - - /* finish bit streams one by one. */ - { size_t const segmentSize = (dstSize+3) / 4; - BYTE* segmentEnd = (BYTE*)dst; - int i; - for (i = 0; i < 4; ++i) { - BIT_DStream_t bit; - if (segmentSize <= (size_t)(oend - segmentEnd)) - segmentEnd += segmentSize; - else - segmentEnd = oend; - FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption"); - /* Decompress and validate that we've produced exactly the expected length. */ - args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG); - if (args.op[i] != segmentEnd) return ERROR(corruption_detected); - } - } - - /* decoded size */ - assert(dstSize != 0); - return dstSize; -} - -HUF_DGEN(HUF_decompress1X1_usingDTable_internal) - -static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable, int flags) -{ - HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X1_usingDTable_internal_default; - HUF_DecompressFastLoopFn loopFn = HUF_decompress4X1_usingDTable_internal_fast_c_loop; - -#if DYNAMIC_BMI2 - if (flags & HUF_flags_bmi2) { - fallbackFn = HUF_decompress4X1_usingDTable_internal_bmi2; -# if ZSTD_ENABLE_ASM_X86_64_BMI2 - if (!(flags & HUF_flags_disableAsm)) { - loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; - } -# endif - } else { - return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); - } -#endif - -#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) - if (!(flags & HUF_flags_disableAsm)) { - loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; - } -#endif - - if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) { - size_t const ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); - if (ret != 0) - return ret; - } - return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); -} - -static size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int flags) -{ - const BYTE* ip = (const BYTE*) cSrc; - - size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; cSrcSize -= hSize; - - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); -} - -#endif /* HUF_FORCE_DECOMPRESS_X2 */ - - -#ifndef HUF_FORCE_DECOMPRESS_X1 - -/* *************************/ -/* double-symbols decoding */ -/* *************************/ - -typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */ -typedef struct { BYTE symbol; } sortedSymbol_t; -typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; -typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; - -/** - * Constructs a HUF_DEltX2 in a U32. - */ -static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level) -{ - U32 seq; - DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0); - DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2); - DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3); - DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32)); - if (MEM_isLittleEndian()) { - seq = level == 1 ? symbol : (baseSeq + (symbol << 8)); - return seq + (nbBits << 16) + ((U32)level << 24); - } else { - seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol); - return (seq << 16) + (nbBits << 8) + (U32)level; - } -} - -/** - * Constructs a HUF_DEltX2. - */ -static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level) -{ - HUF_DEltX2 DElt; - U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); - DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val)); - ZSTD_memcpy(&DElt, &val, sizeof(val)); - return DElt; -} - -/** - * Constructs 2 HUF_DEltX2s and packs them into a U64. - */ -static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level) -{ - U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); - return (U64)DElt + ((U64)DElt << 32); -} - -/** - * Fills the DTable rank with all the symbols from [begin, end) that are each - * nbBits long. - * - * @param DTableRank The start of the rank in the DTable. - * @param begin The first symbol to fill (inclusive). - * @param end The last symbol to fill (exclusive). - * @param nbBits Each symbol is nbBits long. - * @param tableLog The table log. - * @param baseSeq If level == 1 { 0 } else { the first level symbol } - * @param level The level in the table. Must be 1 or 2. - */ -static void HUF_fillDTableX2ForWeight( - HUF_DEltX2* DTableRank, - sortedSymbol_t const* begin, sortedSymbol_t const* end, - U32 nbBits, U32 tableLog, - U16 baseSeq, int const level) -{ - U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */); - const sortedSymbol_t* ptr; - assert(level >= 1 && level <= 2); - switch (length) { - case 1: - for (ptr = begin; ptr != end; ++ptr) { - HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); - *DTableRank++ = DElt; - } - break; - case 2: - for (ptr = begin; ptr != end; ++ptr) { - HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); - DTableRank[0] = DElt; - DTableRank[1] = DElt; - DTableRank += 2; - } - break; - case 4: - for (ptr = begin; ptr != end; ++ptr) { - U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); - ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); - DTableRank += 4; - } - break; - case 8: - for (ptr = begin; ptr != end; ++ptr) { - U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); - ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2)); - DTableRank += 8; - } - break; - default: - for (ptr = begin; ptr != end; ++ptr) { - U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); - HUF_DEltX2* const DTableRankEnd = DTableRank + length; - for (; DTableRank != DTableRankEnd; DTableRank += 8) { - ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2)); - } - } - break; - } -} - -/* HUF_fillDTableX2Level2() : - * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ -static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits, - const U32* rankVal, const int minWeight, const int maxWeight1, - const sortedSymbol_t* sortedSymbols, U32 const* rankStart, - U32 nbBitsBaseline, U16 baseSeq) -{ - /* Fill skipped values (all positions up to rankVal[minWeight]). - * These are positions only get a single symbol because the combined weight - * is too large. - */ - if (minWeight>1) { - U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */); - U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1); - int const skipSize = rankVal[minWeight]; - assert(length > 1); - assert((U32)skipSize < length); - switch (length) { - case 2: - assert(skipSize == 1); - ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2)); - break; - case 4: - assert(skipSize <= 4); - ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2)); - break; - default: - { - int i; - for (i = 0; i < skipSize; i += 8) { - ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2)); - ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2)); - } - } - } - } - - /* Fill each of the second level symbols by weight. */ - { - int w; - for (w = minWeight; w < maxWeight1; ++w) { - int const begin = rankStart[w]; - int const end = rankStart[w+1]; - U32 const nbBits = nbBitsBaseline - w; - U32 const totalBits = nbBits + consumedBits; - HUF_fillDTableX2ForWeight( - DTable + rankVal[w], - sortedSymbols + begin, sortedSymbols + end, - totalBits, targetLog, - baseSeq, /* level */ 2); - } - } -} - -static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, - const sortedSymbol_t* sortedList, - const U32* rankStart, rankValCol_t* rankValOrigin, const U32 maxWeight, - const U32 nbBitsBaseline) -{ - U32* const rankVal = rankValOrigin[0]; - const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ - const U32 minBits = nbBitsBaseline - maxWeight; - int w; - int const wEnd = (int)maxWeight + 1; - - /* Fill DTable in order of weight. */ - for (w = 1; w < wEnd; ++w) { - int const begin = (int)rankStart[w]; - int const end = (int)rankStart[w+1]; - U32 const nbBits = nbBitsBaseline - w; - - if (targetLog-nbBits >= minBits) { - /* Enough room for a second symbol. */ - int start = rankVal[w]; - U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */); - int minWeight = nbBits + scaleLog; - int s; - if (minWeight < 1) minWeight = 1; - /* Fill the DTable for every symbol of weight w. - * These symbols get at least 1 second symbol. - */ - for (s = begin; s != end; ++s) { - HUF_fillDTableX2Level2( - DTable + start, targetLog, nbBits, - rankValOrigin[nbBits], minWeight, wEnd, - sortedList, rankStart, - nbBitsBaseline, sortedList[s].symbol); - start += length; - } - } else { - /* Only a single symbol. */ - HUF_fillDTableX2ForWeight( - DTable + rankVal[w], - sortedList + begin, sortedList + end, - nbBits, targetLog, - /* baseSeq */ 0, /* level */ 1); - } - } -} - -typedef struct { - rankValCol_t rankVal[HUF_TABLELOG_MAX]; - U32 rankStats[HUF_TABLELOG_MAX + 1]; - U32 rankStart0[HUF_TABLELOG_MAX + 3]; - sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1]; - BYTE weightList[HUF_SYMBOLVALUE_MAX + 1]; - U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; -} HUF_ReadDTableX2_Workspace; - -size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, - const void* src, size_t srcSize, - void* workSpace, size_t wkspSize, int flags) -{ - U32 tableLog, maxW, nbSymbols; - DTableDesc dtd = HUF_getDTableDesc(DTable); - U32 maxTableLog = dtd.maxTableLog; - size_t iSize; - void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ - HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; - U32 *rankStart; - - HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace; - - if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC); - - rankStart = wksp->rankStart0 + 1; - ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats)); - ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0)); - - DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ - if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); - /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ - - iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), flags); - if (HUF_isError(iSize)) return iSize; - - /* check result */ - if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ - if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG; - - /* find maxWeight */ - for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ - - /* Get start index of each weight */ - { U32 w, nextRankStart = 0; - for (w=1; wrankStats[w]; - rankStart[w] = curr; - } - rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ - rankStart[maxW+1] = nextRankStart; - } - - /* sort symbols by weight */ - { U32 s; - for (s=0; sweightList[s]; - U32 const r = rankStart[w]++; - wksp->sortedSymbol[r].symbol = (BYTE)s; - } - rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ - } - - /* Build rankVal */ - { U32* const rankVal0 = wksp->rankVal[0]; - { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */ - U32 nextRankVal = 0; - U32 w; - for (w=1; wrankStats[w] << (w+rescale); - rankVal0[w] = curr; - } } - { U32 const minBits = tableLog+1 - maxW; - U32 consumed; - for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) { - U32* const rankValPtr = wksp->rankVal[consumed]; - U32 w; - for (w = 1; w < maxW+1; w++) { - rankValPtr[w] = rankVal0[w] >> consumed; - } } } } - - HUF_fillDTableX2(dt, maxTableLog, - wksp->sortedSymbol, - wksp->rankStart0, wksp->rankVal, maxW, - tableLog+1); - - dtd.tableLog = (BYTE)maxTableLog; - dtd.tableType = 1; - ZSTD_memcpy(DTable, &dtd, sizeof(dtd)); - return iSize; -} - - -FORCE_INLINE_TEMPLATE U32 -HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) -{ - size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - ZSTD_memcpy(op, &dt[val].sequence, 2); - BIT_skipBits(DStream, dt[val].nbBits); - return dt[val].length; -} - -FORCE_INLINE_TEMPLATE U32 -HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) -{ - size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - ZSTD_memcpy(op, &dt[val].sequence, 1); - if (dt[val].length==1) { - BIT_skipBits(DStream, dt[val].nbBits); - } else { - if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { - BIT_skipBits(DStream, dt[val].nbBits); - if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) - /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ - DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); - } - } - return 1; -} - -#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ - do { ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); } while (0) - -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ - do { \ - if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \ - } while (0) - -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ - do { \ - if (MEM_64bits()) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \ - } while (0) - -HINT_INLINE size_t -HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, - const HUF_DEltX2* const dt, const U32 dtLog) -{ - BYTE* const pStart = p; - - /* up to 8 symbols at a time */ - if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) { - if (dtLog <= 11 && MEM_64bits()) { - /* up to 10 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) { - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - } - } else { - /* up to 8 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_1(p, bitDPtr); - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - } - } - } else { - BIT_reloadDStream(bitDPtr); - } - - /* closer to end : up to 2 symbols at a time */ - if ((size_t)(pEnd - p) >= 2) { - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - - while (p <= pEnd-2) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ - } - - if (p < pEnd) - p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); - - return p-pStart; -} - -FORCE_INLINE_TEMPLATE size_t -HUF_decompress1X2_usingDTable_internal_body( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - BIT_DStream_t bitD; - - /* Init */ - CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); - - /* decode */ - { BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, dstSize); - const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ - const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog); - } - - /* check */ - if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); - - /* decoded size */ - return dstSize; -} - -/* HUF_decompress4X2_usingDTable_internal_body(): - * Conditions: - * @dstSize >= 6 - */ -FORCE_INLINE_TEMPLATE size_t -HUF_decompress4X2_usingDTable_internal_body( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ - if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ - - { const BYTE* const istart = (const BYTE*) cSrc; - BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ostart + dstSize; - BYTE* const olimit = oend - (sizeof(size_t)-1); - const void* const dtPtr = DTable+1; - const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; - - /* Init */ - BIT_DStream_t bitD1; - BIT_DStream_t bitD2; - BIT_DStream_t bitD3; - BIT_DStream_t bitD4; - size_t const length1 = MEM_readLE16(istart); - size_t const length2 = MEM_readLE16(istart+2); - size_t const length3 = MEM_readLE16(istart+4); - size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); - const BYTE* const istart1 = istart + 6; /* jumpTable */ - const BYTE* const istart2 = istart1 + length1; - const BYTE* const istart3 = istart2 + length2; - const BYTE* const istart4 = istart3 + length3; - size_t const segmentSize = (dstSize+3) / 4; - BYTE* const opStart2 = ostart + segmentSize; - BYTE* const opStart3 = opStart2 + segmentSize; - BYTE* const opStart4 = opStart3 + segmentSize; - BYTE* op1 = ostart; - BYTE* op2 = opStart2; - BYTE* op3 = opStart3; - BYTE* op4 = opStart4; - U32 endSignal = 1; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - U32 const dtLog = dtd.tableLog; - - if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ - if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ - assert(dstSize >= 6 /* validated above */); - CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); - CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); - CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); - CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); - - /* 16-32 symbols per loop (4-8 symbols per stream) */ - if ((size_t)(oend - op4) >= sizeof(size_t)) { - for ( ; (endSignal) & (op4 < olimit); ) { -#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; -#else - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - endSignal = (U32)LIKELY((U32) - (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished) - & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished) - & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished) - & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished)); -#endif - } - } - - /* check corruption */ - if (op1 > opStart2) return ERROR(corruption_detected); - if (op2 > opStart3) return ERROR(corruption_detected); - if (op3 > opStart4) return ERROR(corruption_detected); - /* note : op4 already verified within main loop */ - - /* finish bitStreams one by one */ - HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); - - /* check */ - { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); - if (!endCheck) return ERROR(corruption_detected); } - - /* decoded size */ - return dstSize; - } -} - -#if HUF_NEED_BMI2_FUNCTION -static BMI2_TARGET_ATTRIBUTE -size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable) { - return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); -} -#endif - -static -size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable) { - return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); -} - -#if ZSTD_ENABLE_ASM_X86_64_BMI2 - -HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; - -#endif - -static HUF_FAST_BMI2_ATTRS -void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) -{ - U64 bits[4]; - BYTE const* ip[4]; - BYTE* op[4]; - BYTE* oend[4]; - HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt; - BYTE const* const ilowest = args->ilowest; - - /* Copy the arguments to local registers. */ - ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); - ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip)); - ZSTD_memcpy(&op, &args->op, sizeof(op)); - - oend[0] = op[1]; - oend[1] = op[2]; - oend[2] = op[3]; - oend[3] = args->oend; - - assert(MEM_isLittleEndian()); - assert(!MEM_32bits()); - - for (;;) { - BYTE* olimit; - int stream; - - /* Assert loop preconditions */ -#ifndef NDEBUG - for (stream = 0; stream < 4; ++stream) { - assert(op[stream] <= oend[stream]); - assert(ip[stream] >= ilowest); - } -#endif - /* Compute olimit */ - { - /* Each loop does 5 table lookups for each of the 4 streams. - * Each table lookup consumes up to 11 bits of input, and produces - * up to 2 bytes of output. - */ - /* We can consume up to 7 bytes of input per iteration per stream. - * We also know that each input pointer is >= ip[0]. So we can run - * iters loops before running out of input. - */ - size_t iters = (size_t)(ip[0] - ilowest) / 7; - /* Each iteration can produce up to 10 bytes of output per stream. - * Each output stream my advance at different rates. So take the - * minimum number of safe iterations among all the output streams. - */ - for (stream = 0; stream < 4; ++stream) { - size_t const oiters = (size_t)(oend[stream] - op[stream]) / 10; - iters = MIN(iters, oiters); - } - - /* Each iteration produces at least 5 output symbols. So until - * op[3] crosses olimit, we know we haven't executed iters - * iterations yet. This saves us maintaining an iters counter, - * at the expense of computing the remaining # of iterations - * more frequently. - */ - olimit = op[3] + (iters * 5); - - /* Exit the fast decoding loop once we reach the end. */ - if (op[3] == olimit) - break; - - /* Exit the decoding loop if any input pointer has crossed the - * previous one. This indicates corruption, and a precondition - * to our loop is that ip[i] >= ip[0]. - */ - for (stream = 1; stream < 4; ++stream) { - if (ip[stream] < ip[stream - 1]) - goto _out; - } - } - -#ifndef NDEBUG - for (stream = 1; stream < 4; ++stream) { - assert(ip[stream] >= ip[stream - 1]); - } -#endif - -#define HUF_4X2_DECODE_SYMBOL(_stream, _decode3) \ - do { \ - if ((_decode3) || (_stream) != 3) { \ - int const index = (int)(bits[(_stream)] >> 53); \ - HUF_DEltX2 const entry = dtable[index]; \ - MEM_write16(op[(_stream)], entry.sequence); \ - bits[(_stream)] <<= (entry.nbBits) & 0x3F; \ - op[(_stream)] += (entry.length); \ - } \ - } while (0) - -#define HUF_4X2_RELOAD_STREAM(_stream) \ - do { \ - HUF_4X2_DECODE_SYMBOL(3, 1); \ - { \ - int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \ - int const nbBits = ctz & 7; \ - int const nbBytes = ctz >> 3; \ - ip[(_stream)] -= nbBytes; \ - bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \ - bits[(_stream)] <<= nbBits; \ - } \ - } while (0) - - /* Manually unroll the loop because compilers don't consistently - * unroll the inner loops, which destroys performance. - */ - do { - /* Decode 5 symbols from each of the first 3 streams. - * The final stream will be decoded during the reload phase - * to reduce register pressure. - */ - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0); - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0); - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0); - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0); - HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0); - - /* Decode one symbol from the final stream */ - HUF_4X2_DECODE_SYMBOL(3, 1); - - /* Decode 4 symbols from the final stream & reload bitstreams. - * The final stream is reloaded last, meaning that all 5 symbols - * are decoded from the final stream before it is reloaded. - */ - HUF_4X_FOR_EACH_STREAM(HUF_4X2_RELOAD_STREAM); - } while (op[3] < olimit); - } - -#undef HUF_4X2_DECODE_SYMBOL -#undef HUF_4X2_RELOAD_STREAM - -_out: - - /* Save the final values of each of the state variables back to args. */ - ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); - ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip)); - ZSTD_memcpy(&args->op, &op, sizeof(op)); -} - - -static HUF_FAST_BMI2_ATTRS size_t -HUF_decompress4X2_usingDTable_internal_fast( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable, - HUF_DecompressFastLoopFn loopFn) { - void const* dt = DTable + 1; - const BYTE* const ilowest = (const BYTE*)cSrc; - BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize); - HUF_DecompressFastArgs args; - { - size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); - FORWARD_IF_ERROR(ret, "Failed to init asm args"); - if (ret == 0) - return 0; - } - - assert(args.ip[0] >= args.ilowest); - loopFn(&args); - - /* note : op4 already verified within main loop */ - assert(args.ip[0] >= ilowest); - assert(args.ip[1] >= ilowest); - assert(args.ip[2] >= ilowest); - assert(args.ip[3] >= ilowest); - assert(args.op[3] <= oend); - - assert(ilowest == args.ilowest); - assert(ilowest + 6 == args.iend[0]); - (void)ilowest; - - /* finish bitStreams one by one */ - { - size_t const segmentSize = (dstSize+3) / 4; - BYTE* segmentEnd = (BYTE*)dst; - int i; - for (i = 0; i < 4; ++i) { - BIT_DStream_t bit; - if (segmentSize <= (size_t)(oend - segmentEnd)) - segmentEnd += segmentSize; - else - segmentEnd = oend; - FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption"); - args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG); - if (args.op[i] != segmentEnd) - return ERROR(corruption_detected); - } - } - - /* decoded size */ - return dstSize; -} - -static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable, int flags) -{ - HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X2_usingDTable_internal_default; - HUF_DecompressFastLoopFn loopFn = HUF_decompress4X2_usingDTable_internal_fast_c_loop; - -#if DYNAMIC_BMI2 - if (flags & HUF_flags_bmi2) { - fallbackFn = HUF_decompress4X2_usingDTable_internal_bmi2; -# if ZSTD_ENABLE_ASM_X86_64_BMI2 - if (!(flags & HUF_flags_disableAsm)) { - loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; - } -# endif - } else { - return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); - } -#endif - -#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) - if (!(flags & HUF_flags_disableAsm)) { - loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; - } -#endif - - if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) { - size_t const ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); - if (ret != 0) - return ret; - } - return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); -} - -HUF_DGEN(HUF_decompress1X2_usingDTable_internal) - -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int flags) -{ - const BYTE* ip = (const BYTE*) cSrc; - - size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, - workSpace, wkspSize, flags); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; cSrcSize -= hSize; - - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags); -} - -static size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int flags) -{ - const BYTE* ip = (const BYTE*) cSrc; - - size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, - workSpace, wkspSize, flags); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; cSrcSize -= hSize; - - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); -} - -#endif /* HUF_FORCE_DECOMPRESS_X1 */ - - -/* ***********************************/ -/* Universal decompression selectors */ -/* ***********************************/ - - -#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) -typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; -static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] = -{ - /* single, double, quad */ - {{0,0}, {1,1}}, /* Q==0 : impossible */ - {{0,0}, {1,1}}, /* Q==1 : impossible */ - {{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */ - {{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */ - {{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */ - {{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */ - {{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */ - {{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */ - {{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */ - {{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */ - {{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */ - {{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */ - {{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */ - {{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */ - {{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */ - {{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */ -}; -#endif - -/** HUF_selectDecoder() : - * Tells which decoder is likely to decode faster, - * based on a set of pre-computed metrics. - * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . - * Assumption : 0 < dstSize <= 128 KB */ -U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) -{ - assert(dstSize > 0); - assert(dstSize <= 128*1024); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dstSize; - (void)cSrcSize; - return 0; -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dstSize; - (void)cSrcSize; - return 1; -#else - /* decoder timing evaluation */ - { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */ - U32 const D256 = (U32)(dstSize >> 8); - U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); - U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); - DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */ - return DTime1 < DTime0; - } -#endif -} - -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int flags) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize, flags); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize, flags); -#else - return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize, flags): - HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize, flags); -#endif - } -} - - -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); -#else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); -#endif -} - -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) -{ - const BYTE* ip = (const BYTE*) cSrc; - - size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; cSrcSize -= hSize; - - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); -} -#endif - -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); -#else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); -#endif -} - -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize == 0) return ERROR(corruption_detected); - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); -#else - return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) : - HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); -#endif - } -} diff --git a/zstandard_android/src/decompress/huf_decompress_amd64.S b/zstandard_android/src/decompress/huf_decompress_amd64.S deleted file mode 100644 index 78da291..0000000 --- a/zstandard_android/src/decompress/huf_decompress_amd64.S +++ /dev/null @@ -1,595 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#include "../common/portability_macros.h" - -#if defined(__ELF__) && defined(__GNUC__) -/* Stack marking - * ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart - */ -.section .note.GNU-stack,"",%progbits - -#if defined(__aarch64__) -/* Mark that this assembly supports BTI & PAC, because it is empty for aarch64. - * See: https://github.com/facebook/zstd/issues/3841 - * See: https://gcc.godbolt.org/z/sqr5T4ffK - * See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/ - * See: https://reviews.llvm.org/D62609 - */ -.pushsection .note.gnu.property, "a" -.p2align 3 -.long 4 /* size of the name - "GNU\0" */ -.long 0x10 /* size of descriptor */ -.long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */ -.asciz "GNU" -.long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */ -.long 4 /* pr_datasz - 4 bytes */ -.long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */ -.p2align 3 /* pr_padding - bring everything to 8 byte alignment */ -.popsection -#endif - -#endif - -#if ZSTD_ENABLE_ASM_X86_64_BMI2 - -/* Calling convention: - * - * %rdi contains the first argument: HUF_DecompressAsmArgs*. - * %rbp isn't maintained (no frame pointer). - * %rsp contains the stack pointer that grows down. - * No red-zone is assumed, only addresses >= %rsp are used. - * All register contents are preserved. - * - * TODO: Support Windows calling convention. - */ - -ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop) -.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop -.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop -.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop -.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop -.text - -/* Sets up register mappings for clarity. - * op[], bits[], dtable & ip[0] each get their own register. - * ip[1,2,3] & olimit alias var[]. - * %rax is a scratch register. - */ - -#define op0 rsi -#define op1 rbx -#define op2 rcx -#define op3 rdi - -#define ip0 r8 -#define ip1 r9 -#define ip2 r10 -#define ip3 r11 - -#define bits0 rbp -#define bits1 rdx -#define bits2 r12 -#define bits3 r13 -#define dtable r14 -#define olimit r15 - -/* var[] aliases ip[1,2,3] & olimit - * ip[1,2,3] are saved every iteration. - * olimit is only used in compute_olimit. - */ -#define var0 r15 -#define var1 r9 -#define var2 r10 -#define var3 r11 - -/* 32-bit var registers */ -#define vard0 r15d -#define vard1 r9d -#define vard2 r10d -#define vard3 r11d - -/* Calls X(N) for each stream 0, 1, 2, 3. */ -#define FOR_EACH_STREAM(X) \ - X(0); \ - X(1); \ - X(2); \ - X(3) - -/* Calls X(N, idx) for each stream 0, 1, 2, 3. */ -#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \ - X(0, idx); \ - X(1, idx); \ - X(2, idx); \ - X(3, idx) - -/* Define both _HUF_* & HUF_* symbols because MacOS - * C symbols are prefixed with '_' & Linux symbols aren't. - */ -_HUF_decompress4X1_usingDTable_internal_fast_asm_loop: -HUF_decompress4X1_usingDTable_internal_fast_asm_loop: - ZSTD_CET_ENDBRANCH - /* Save all registers - even if they are callee saved for simplicity. */ - push %rax - push %rbx - push %rcx - push %rdx - push %rbp - push %rsi - push %rdi - push %r8 - push %r9 - push %r10 - push %r11 - push %r12 - push %r13 - push %r14 - push %r15 - - /* Read HUF_DecompressAsmArgs* args from %rax */ - movq %rdi, %rax - movq 0(%rax), %ip0 - movq 8(%rax), %ip1 - movq 16(%rax), %ip2 - movq 24(%rax), %ip3 - movq 32(%rax), %op0 - movq 40(%rax), %op1 - movq 48(%rax), %op2 - movq 56(%rax), %op3 - movq 64(%rax), %bits0 - movq 72(%rax), %bits1 - movq 80(%rax), %bits2 - movq 88(%rax), %bits3 - movq 96(%rax), %dtable - push %rax /* argument */ - push 104(%rax) /* ilowest */ - push 112(%rax) /* oend */ - push %olimit /* olimit space */ - - subq $24, %rsp - -.L_4X1_compute_olimit: - /* Computes how many iterations we can do safely - * %r15, %rax may be clobbered - * rbx, rdx must be saved - * op3 & ip0 mustn't be clobbered - */ - movq %rbx, 0(%rsp) - movq %rdx, 8(%rsp) - - movq 32(%rsp), %rax /* rax = oend */ - subq %op3, %rax /* rax = oend - op3 */ - - /* r15 = (oend - op3) / 5 */ - movabsq $-3689348814741910323, %rdx - mulq %rdx - movq %rdx, %r15 - shrq $2, %r15 - - movq %ip0, %rax /* rax = ip0 */ - movq 40(%rsp), %rdx /* rdx = ilowest */ - subq %rdx, %rax /* rax = ip0 - ilowest */ - movq %rax, %rbx /* rbx = ip0 - ilowest */ - - /* rdx = (ip0 - ilowest) / 7 */ - movabsq $2635249153387078803, %rdx - mulq %rdx - subq %rdx, %rbx - shrq %rbx - addq %rbx, %rdx - shrq $2, %rdx - - /* r15 = min(%rdx, %r15) */ - cmpq %rdx, %r15 - cmova %rdx, %r15 - - /* r15 = r15 * 5 */ - leaq (%r15, %r15, 4), %r15 - - /* olimit = op3 + r15 */ - addq %op3, %olimit - - movq 8(%rsp), %rdx - movq 0(%rsp), %rbx - - /* If (op3 + 20 > olimit) */ - movq %op3, %rax /* rax = op3 */ - cmpq %rax, %olimit /* op3 == olimit */ - je .L_4X1_exit - - /* If (ip1 < ip0) go to exit */ - cmpq %ip0, %ip1 - jb .L_4X1_exit - - /* If (ip2 < ip1) go to exit */ - cmpq %ip1, %ip2 - jb .L_4X1_exit - - /* If (ip3 < ip2) go to exit */ - cmpq %ip2, %ip3 - jb .L_4X1_exit - -/* Reads top 11 bits from bits[n] - * Loads dt[bits[n]] into var[n] - */ -#define GET_NEXT_DELT(n) \ - movq $53, %var##n; \ - shrxq %var##n, %bits##n, %var##n; \ - movzwl (%dtable,%var##n,2),%vard##n - -/* var[n] must contain the DTable entry computed with GET_NEXT_DELT - * Moves var[n] to %rax - * bits[n] <<= var[n] & 63 - * op[n][idx] = %rax >> 8 - * %ah is a way to access bits [8, 16) of %rax - */ -#define DECODE_FROM_DELT(n, idx) \ - movq %var##n, %rax; \ - shlxq %var##n, %bits##n, %bits##n; \ - movb %ah, idx(%op##n) - -/* Assumes GET_NEXT_DELT has been called. - * Calls DECODE_FROM_DELT then GET_NEXT_DELT - */ -#define DECODE_AND_GET_NEXT(n, idx) \ - DECODE_FROM_DELT(n, idx); \ - GET_NEXT_DELT(n) \ - -/* // ctz & nbBytes is stored in bits[n] - * // nbBits is stored in %rax - * ctz = CTZ[bits[n]] - * nbBits = ctz & 7 - * nbBytes = ctz >> 3 - * op[n] += 5 - * ip[n] -= nbBytes - * // Note: x86-64 is little-endian ==> no bswap - * bits[n] = MEM_readST(ip[n]) | 1 - * bits[n] <<= nbBits - */ -#define RELOAD_BITS(n) \ - bsfq %bits##n, %bits##n; \ - movq %bits##n, %rax; \ - andq $7, %rax; \ - shrq $3, %bits##n; \ - leaq 5(%op##n), %op##n; \ - subq %bits##n, %ip##n; \ - movq (%ip##n), %bits##n; \ - orq $1, %bits##n; \ - shlx %rax, %bits##n, %bits##n - - /* Store clobbered variables on the stack */ - movq %olimit, 24(%rsp) - movq %ip1, 0(%rsp) - movq %ip2, 8(%rsp) - movq %ip3, 16(%rsp) - - /* Call GET_NEXT_DELT for each stream */ - FOR_EACH_STREAM(GET_NEXT_DELT) - - .p2align 6 - -.L_4X1_loop_body: - /* Decode 5 symbols in each of the 4 streams (20 total) - * Must have called GET_NEXT_DELT for each stream - */ - FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0) - FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1) - FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2) - FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3) - FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4) - - /* Load ip[1,2,3] from stack (var[] aliases them) - * ip[] is needed for RELOAD_BITS - * Each will be stored back to the stack after RELOAD - */ - movq 0(%rsp), %ip1 - movq 8(%rsp), %ip2 - movq 16(%rsp), %ip3 - - /* Reload each stream & fetch the next table entry - * to prepare for the next iteration - */ - RELOAD_BITS(0) - GET_NEXT_DELT(0) - - RELOAD_BITS(1) - movq %ip1, 0(%rsp) - GET_NEXT_DELT(1) - - RELOAD_BITS(2) - movq %ip2, 8(%rsp) - GET_NEXT_DELT(2) - - RELOAD_BITS(3) - movq %ip3, 16(%rsp) - GET_NEXT_DELT(3) - - /* If op3 < olimit: continue the loop */ - cmp %op3, 24(%rsp) - ja .L_4X1_loop_body - - /* Reload ip[1,2,3] from stack */ - movq 0(%rsp), %ip1 - movq 8(%rsp), %ip2 - movq 16(%rsp), %ip3 - - /* Re-compute olimit */ - jmp .L_4X1_compute_olimit - -#undef GET_NEXT_DELT -#undef DECODE_FROM_DELT -#undef DECODE -#undef RELOAD_BITS -.L_4X1_exit: - addq $24, %rsp - - /* Restore stack (oend & olimit) */ - pop %rax /* olimit */ - pop %rax /* oend */ - pop %rax /* ilowest */ - pop %rax /* arg */ - - /* Save ip / op / bits */ - movq %ip0, 0(%rax) - movq %ip1, 8(%rax) - movq %ip2, 16(%rax) - movq %ip3, 24(%rax) - movq %op0, 32(%rax) - movq %op1, 40(%rax) - movq %op2, 48(%rax) - movq %op3, 56(%rax) - movq %bits0, 64(%rax) - movq %bits1, 72(%rax) - movq %bits2, 80(%rax) - movq %bits3, 88(%rax) - - /* Restore registers */ - pop %r15 - pop %r14 - pop %r13 - pop %r12 - pop %r11 - pop %r10 - pop %r9 - pop %r8 - pop %rdi - pop %rsi - pop %rbp - pop %rdx - pop %rcx - pop %rbx - pop %rax - ret - -_HUF_decompress4X2_usingDTable_internal_fast_asm_loop: -HUF_decompress4X2_usingDTable_internal_fast_asm_loop: - ZSTD_CET_ENDBRANCH - /* Save all registers - even if they are callee saved for simplicity. */ - push %rax - push %rbx - push %rcx - push %rdx - push %rbp - push %rsi - push %rdi - push %r8 - push %r9 - push %r10 - push %r11 - push %r12 - push %r13 - push %r14 - push %r15 - - movq %rdi, %rax - movq 0(%rax), %ip0 - movq 8(%rax), %ip1 - movq 16(%rax), %ip2 - movq 24(%rax), %ip3 - movq 32(%rax), %op0 - movq 40(%rax), %op1 - movq 48(%rax), %op2 - movq 56(%rax), %op3 - movq 64(%rax), %bits0 - movq 72(%rax), %bits1 - movq 80(%rax), %bits2 - movq 88(%rax), %bits3 - movq 96(%rax), %dtable - push %rax /* argument */ - push %rax /* olimit */ - push 104(%rax) /* ilowest */ - - movq 112(%rax), %rax - push %rax /* oend3 */ - - movq %op3, %rax - push %rax /* oend2 */ - - movq %op2, %rax - push %rax /* oend1 */ - - movq %op1, %rax - push %rax /* oend0 */ - - /* Scratch space */ - subq $8, %rsp - -.L_4X2_compute_olimit: - /* Computes how many iterations we can do safely - * %r15, %rax may be clobbered - * rdx must be saved - * op[1,2,3,4] & ip0 mustn't be clobbered - */ - movq %rdx, 0(%rsp) - - /* We can consume up to 7 input bytes each iteration. */ - movq %ip0, %rax /* rax = ip0 */ - movq 40(%rsp), %rdx /* rdx = ilowest */ - subq %rdx, %rax /* rax = ip0 - ilowest */ - movq %rax, %r15 /* r15 = ip0 - ilowest */ - - /* rdx = rax / 7 */ - movabsq $2635249153387078803, %rdx - mulq %rdx - subq %rdx, %r15 - shrq %r15 - addq %r15, %rdx - shrq $2, %rdx - - /* r15 = (ip0 - ilowest) / 7 */ - movq %rdx, %r15 - - /* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */ - movq 8(%rsp), %rax /* rax = oend0 */ - subq %op0, %rax /* rax = oend0 - op0 */ - movq 16(%rsp), %rdx /* rdx = oend1 */ - subq %op1, %rdx /* rdx = oend1 - op1 */ - - cmpq %rax, %rdx - cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ - - movq 24(%rsp), %rax /* rax = oend2 */ - subq %op2, %rax /* rax = oend2 - op2 */ - - cmpq %rax, %rdx - cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ - - movq 32(%rsp), %rax /* rax = oend3 */ - subq %op3, %rax /* rax = oend3 - op3 */ - - cmpq %rax, %rdx - cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ - - movabsq $-3689348814741910323, %rax - mulq %rdx - shrq $3, %rdx /* rdx = rdx / 10 */ - - /* r15 = min(%rdx, %r15) */ - cmpq %rdx, %r15 - cmova %rdx, %r15 - - /* olimit = op3 + 5 * r15 */ - movq %r15, %rax - leaq (%op3, %rax, 4), %olimit - addq %rax, %olimit - - movq 0(%rsp), %rdx - - /* If (op3 + 10 > olimit) */ - movq %op3, %rax /* rax = op3 */ - cmpq %rax, %olimit /* op3 == olimit */ - je .L_4X2_exit - - /* If (ip1 < ip0) go to exit */ - cmpq %ip0, %ip1 - jb .L_4X2_exit - - /* If (ip2 < ip1) go to exit */ - cmpq %ip1, %ip2 - jb .L_4X2_exit - - /* If (ip3 < ip2) go to exit */ - cmpq %ip2, %ip3 - jb .L_4X2_exit - -#define DECODE(n, idx) \ - movq %bits##n, %rax; \ - shrq $53, %rax; \ - movzwl 0(%dtable,%rax,4),%r8d; \ - movzbl 2(%dtable,%rax,4),%r15d; \ - movzbl 3(%dtable,%rax,4),%eax; \ - movw %r8w, (%op##n); \ - shlxq %r15, %bits##n, %bits##n; \ - addq %rax, %op##n - -#define RELOAD_BITS(n) \ - bsfq %bits##n, %bits##n; \ - movq %bits##n, %rax; \ - shrq $3, %bits##n; \ - andq $7, %rax; \ - subq %bits##n, %ip##n; \ - movq (%ip##n), %bits##n; \ - orq $1, %bits##n; \ - shlxq %rax, %bits##n, %bits##n - - - movq %olimit, 48(%rsp) - - .p2align 6 - -.L_4X2_loop_body: - /* We clobber r8, so store it on the stack */ - movq %r8, 0(%rsp) - - /* Decode 5 symbols from each of the 4 streams (20 symbols total). */ - FOR_EACH_STREAM_WITH_INDEX(DECODE, 0) - FOR_EACH_STREAM_WITH_INDEX(DECODE, 1) - FOR_EACH_STREAM_WITH_INDEX(DECODE, 2) - FOR_EACH_STREAM_WITH_INDEX(DECODE, 3) - FOR_EACH_STREAM_WITH_INDEX(DECODE, 4) - - /* Reload r8 */ - movq 0(%rsp), %r8 - - FOR_EACH_STREAM(RELOAD_BITS) - - cmp %op3, 48(%rsp) - ja .L_4X2_loop_body - jmp .L_4X2_compute_olimit - -#undef DECODE -#undef RELOAD_BITS -.L_4X2_exit: - addq $8, %rsp - /* Restore stack (oend & olimit) */ - pop %rax /* oend0 */ - pop %rax /* oend1 */ - pop %rax /* oend2 */ - pop %rax /* oend3 */ - pop %rax /* ilowest */ - pop %rax /* olimit */ - pop %rax /* arg */ - - /* Save ip / op / bits */ - movq %ip0, 0(%rax) - movq %ip1, 8(%rax) - movq %ip2, 16(%rax) - movq %ip3, 24(%rax) - movq %op0, 32(%rax) - movq %op1, 40(%rax) - movq %op2, 48(%rax) - movq %op3, 56(%rax) - movq %bits0, 64(%rax) - movq %bits1, 72(%rax) - movq %bits2, 80(%rax) - movq %bits3, 88(%rax) - - /* Restore registers */ - pop %r15 - pop %r14 - pop %r13 - pop %r12 - pop %r11 - pop %r10 - pop %r9 - pop %r8 - pop %rdi - pop %rsi - pop %rbp - pop %rdx - pop %rcx - pop %rbx - pop %rax - ret - -#endif diff --git a/zstandard_android/src/decompress/zstd_decompress.c b/zstandard_android/src/decompress/zstd_decompress.c deleted file mode 100644 index 26c9457..0000000 --- a/zstandard_android/src/decompress/zstd_decompress.c +++ /dev/null @@ -1,2407 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - -/* *************************************************************** -* Tuning parameters -*****************************************************************/ -/*! - * HEAPMODE : - * Select how default decompression function ZSTD_decompress() allocates its context, - * on stack (0), or into heap (1, default; requires malloc()). - * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected. - */ -#ifndef ZSTD_HEAPMODE -# define ZSTD_HEAPMODE 1 -#endif - -/*! -* LEGACY_SUPPORT : -* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+) -*/ -#ifndef ZSTD_LEGACY_SUPPORT -# define ZSTD_LEGACY_SUPPORT 0 -#endif - -/*! - * MAXWINDOWSIZE_DEFAULT : - * maximum window size accepted by DStream __by default__. - * Frames requiring more memory will be rejected. - * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize(). - */ -#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT -# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1) -#endif - -/*! - * NO_FORWARD_PROGRESS_MAX : - * maximum allowed nb of calls to ZSTD_decompressStream() - * without any forward progress - * (defined as: no byte read from input, and no byte flushed to output) - * before triggering an error. - */ -#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX -# define ZSTD_NO_FORWARD_PROGRESS_MAX 16 -#endif - - -/*-******************************************************* -* Dependencies -*********************************************************/ -#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ -#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ -#include "../common/error_private.h" -#include "../common/zstd_internal.h" /* blockProperties_t */ -#include "../common/mem.h" /* low level memory routines */ -#include "../common/bits.h" /* ZSTD_highbit32 */ -#define FSE_STATIC_LINKING_ONLY -#include "../common/fse.h" -#include "../common/huf.h" -#include "../common/xxhash.h" /* XXH64_reset, XXH64_update, XXH64_digest, XXH64 */ -#include "zstd_decompress_internal.h" /* ZSTD_DCtx */ -#include "zstd_ddict.h" /* ZSTD_DDictDictContent */ -#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */ - -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) -# include "../legacy/zstd_legacy.h" -#endif - - - -/************************************* - * Multiple DDicts Hashset internals * - *************************************/ - -#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4 -#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float. - * Currently, that means a 0.75 load factor. - * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded - * the load factor of the ddict hash set. - */ - -#define DDICT_HASHSET_TABLE_BASE_SIZE 64 -#define DDICT_HASHSET_RESIZE_FACTOR 2 - -/* Hash function to determine starting position of dict insertion within the table - * Returns an index between [0, hashSet->ddictPtrTableSize] - */ -static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) { - const U64 hash = XXH64(&dictID, sizeof(U32), 0); - /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */ - return hash & (hashSet->ddictPtrTableSize - 1); -} - -/* Adds DDict to a hashset without resizing it. - * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set. - * Returns 0 if successful, or a zstd error code if something went wrong. - */ -static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) { - const U32 dictID = ZSTD_getDictID_fromDDict(ddict); - size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); - const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1; - RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!"); - DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx); - while (hashSet->ddictPtrTable[idx] != NULL) { - /* Replace existing ddict if inserting ddict with same dictID */ - if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) { - DEBUGLOG(4, "DictID already exists, replacing rather than adding"); - hashSet->ddictPtrTable[idx] = ddict; - return 0; - } - idx &= idxRangeMask; - idx++; - } - DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx); - hashSet->ddictPtrTable[idx] = ddict; - hashSet->ddictPtrCount++; - return 0; -} - -/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and - * rehashes all values, allocates new table, frees old table. - * Returns 0 on success, otherwise a zstd error code. - */ -static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) { - size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR; - const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem); - const ZSTD_DDict** oldTable = hashSet->ddictPtrTable; - size_t oldTableSize = hashSet->ddictPtrTableSize; - size_t i; - - DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize); - RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!"); - hashSet->ddictPtrTable = newTable; - hashSet->ddictPtrTableSize = newTableSize; - hashSet->ddictPtrCount = 0; - for (i = 0; i < oldTableSize; ++i) { - if (oldTable[i] != NULL) { - FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), ""); - } - } - ZSTD_customFree((void*)oldTable, customMem); - DEBUGLOG(4, "Finished re-hash"); - return 0; -} - -/* Fetches a DDict with the given dictID - * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL. - */ -static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) { - size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); - const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1; - DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx); - for (;;) { - size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]); - if (currDictID == dictID || currDictID == 0) { - /* currDictID == 0 implies a NULL ddict entry */ - break; - } else { - idx &= idxRangeMask; /* Goes to start of table when we reach the end */ - idx++; - } - } - DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx); - return hashSet->ddictPtrTable[idx]; -} - -/* Allocates space for and returns a ddict hash set - * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with. - * Returns NULL if allocation failed. - */ -static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) { - ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem); - DEBUGLOG(4, "Allocating new hash set"); - if (!ret) - return NULL; - ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem); - if (!ret->ddictPtrTable) { - ZSTD_customFree(ret, customMem); - return NULL; - } - ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE; - ret->ddictPtrCount = 0; - return ret; -} - -/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself. - * Note: The ZSTD_DDict* within the table are NOT freed. - */ -static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) { - DEBUGLOG(4, "Freeing ddict hash set"); - if (hashSet && hashSet->ddictPtrTable) { - ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem); - } - if (hashSet) { - ZSTD_customFree(hashSet, customMem); - } -} - -/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set. - * Returns 0 on success, or a ZSTD error. - */ -static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) { - DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize); - if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) { - FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), ""); - } - FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), ""); - return 0; -} - -/*-************************************************************* -* Context management -***************************************************************/ -size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx) -{ - if (dctx==NULL) return 0; /* support sizeof NULL */ - return sizeof(*dctx) - + ZSTD_sizeof_DDict(dctx->ddictLocal) - + dctx->inBuffSize + dctx->outBuffSize; -} - -size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); } - - -static size_t ZSTD_startingInputLength(ZSTD_format_e format) -{ - size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format); - /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */ - assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) ); - return startingInputLength; -} - -static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx) -{ - assert(dctx->streamStage == zdss_init); - dctx->format = ZSTD_f_zstd1; - dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; - dctx->outBufferMode = ZSTD_bm_buffered; - dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum; - dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict; - dctx->disableHufAsm = 0; - dctx->maxBlockSizeParam = 0; -} - -static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) -{ - dctx->staticSize = 0; - dctx->ddict = NULL; - dctx->ddictLocal = NULL; - dctx->dictEnd = NULL; - dctx->ddictIsCold = 0; - dctx->dictUses = ZSTD_dont_use; - dctx->inBuff = NULL; - dctx->inBuffSize = 0; - dctx->outBuffSize = 0; - dctx->streamStage = zdss_init; -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) - dctx->legacyContext = NULL; - dctx->previousLegacyVersion = 0; -#endif - dctx->noForwardProgress = 0; - dctx->oversizedDuration = 0; - dctx->isFrameDecompression = 1; -#if DYNAMIC_BMI2 - dctx->bmi2 = ZSTD_cpuSupportsBmi2(); -#endif - dctx->ddictSet = NULL; - ZSTD_DCtx_resetParameters(dctx); -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - dctx->dictContentEndForFuzzing = NULL; -#endif -} - -ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) -{ - ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace; - - if ((size_t)workspace & 7) return NULL; /* 8-aligned */ - if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */ - - ZSTD_initDCtx_internal(dctx); - dctx->staticSize = workspaceSize; - dctx->inBuff = (char*)(dctx+1); - return dctx; -} - -static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) { - if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; - - { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem); - if (!dctx) return NULL; - dctx->customMem = customMem; - ZSTD_initDCtx_internal(dctx); - return dctx; - } -} - -ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) -{ - return ZSTD_createDCtx_internal(customMem); -} - -ZSTD_DCtx* ZSTD_createDCtx(void) -{ - DEBUGLOG(3, "ZSTD_createDCtx"); - return ZSTD_createDCtx_internal(ZSTD_defaultCMem); -} - -static void ZSTD_clearDict(ZSTD_DCtx* dctx) -{ - ZSTD_freeDDict(dctx->ddictLocal); - dctx->ddictLocal = NULL; - dctx->ddict = NULL; - dctx->dictUses = ZSTD_dont_use; -} - -size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) -{ - if (dctx==NULL) return 0; /* support free on NULL */ - RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx"); - { ZSTD_customMem const cMem = dctx->customMem; - ZSTD_clearDict(dctx); - ZSTD_customFree(dctx->inBuff, cMem); - dctx->inBuff = NULL; -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) - if (dctx->legacyContext) - ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion); -#endif - if (dctx->ddictSet) { - ZSTD_freeDDictHashSet(dctx->ddictSet, cMem); - dctx->ddictSet = NULL; - } - ZSTD_customFree(dctx, cMem); - return 0; - } -} - -/* no longer useful */ -void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) -{ - size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx); - ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */ -} - -/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on - * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then - * accordingly sets the ddict to be used to decompress the frame. - * - * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is. - * - * ZSTD_d_refMultipleDDicts must be enabled for this function to be called. - */ -static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) { - assert(dctx->refMultipleDDicts && dctx->ddictSet); - DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame"); - if (dctx->ddict) { - const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID); - if (frameDDict) { - DEBUGLOG(4, "DDict found!"); - ZSTD_clearDict(dctx); - dctx->dictID = dctx->fParams.dictID; - dctx->ddict = frameDDict; - dctx->dictUses = ZSTD_use_indefinitely; - } - } -} - - -/*-************************************************************* - * Frame header decoding - ***************************************************************/ - -/*! ZSTD_isFrame() : - * Tells if the content of `buffer` starts with a valid Frame Identifier. - * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. - * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. - * Note 3 : Skippable Frame Identifiers are considered valid. */ -unsigned ZSTD_isFrame(const void* buffer, size_t size) -{ - if (size < ZSTD_FRAMEIDSIZE) return 0; - { U32 const magic = MEM_readLE32(buffer); - if (magic == ZSTD_MAGICNUMBER) return 1; - if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; - } -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) - if (ZSTD_isLegacy(buffer, size)) return 1; -#endif - return 0; -} - -/*! ZSTD_isSkippableFrame() : - * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. - * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. - */ -unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size) -{ - if (size < ZSTD_FRAMEIDSIZE) return 0; - { U32 const magic = MEM_readLE32(buffer); - if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; - } - return 0; -} - -/** ZSTD_frameHeaderSize_internal() : - * srcSize must be large enough to reach header size fields. - * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. - * @return : size of the Frame Header - * or an error code, which can be tested with ZSTD_isError() */ -static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format) -{ - size_t const minInputSize = ZSTD_startingInputLength(format); - RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, ""); - - { BYTE const fhd = ((const BYTE*)src)[minInputSize-1]; - U32 const dictID= fhd & 3; - U32 const singleSegment = (fhd >> 5) & 1; - U32 const fcsId = fhd >> 6; - return minInputSize + !singleSegment - + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] - + (singleSegment && !fcsId); - } -} - -/** ZSTD_frameHeaderSize() : - * srcSize must be >= ZSTD_frameHeaderSize_prefix. - * @return : size of the Frame Header, - * or an error code (if srcSize is too small) */ -size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) -{ - return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1); -} - - -/** ZSTD_getFrameHeader_advanced() : - * decode Frame Header, or require larger `srcSize`. - * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless - * @return : 0, `zfhPtr` is correctly filled, - * >0, `srcSize` is too small, value is wanted `srcSize` amount, -** or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) -{ - const BYTE* ip = (const BYTE*)src; - size_t const minInputSize = ZSTD_startingInputLength(format); - - DEBUGLOG(5, "ZSTD_getFrameHeader_advanced: minInputSize = %zu, srcSize = %zu", minInputSize, srcSize); - - if (srcSize > 0) { - /* note : technically could be considered an assert(), since it's an invalid entry */ - RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter : src==NULL, but srcSize>0"); - } - if (srcSize < minInputSize) { - if (srcSize > 0 && format != ZSTD_f_zstd1_magicless) { - /* when receiving less than @minInputSize bytes, - * control these bytes at least correspond to a supported magic number - * in order to error out early if they don't. - **/ - size_t const toCopy = MIN(4, srcSize); - unsigned char hbuf[4]; MEM_writeLE32(hbuf, ZSTD_MAGICNUMBER); - assert(src != NULL); - ZSTD_memcpy(hbuf, src, toCopy); - if ( MEM_readLE32(hbuf) != ZSTD_MAGICNUMBER ) { - /* not a zstd frame : let's check if it's a skippable frame */ - MEM_writeLE32(hbuf, ZSTD_MAGIC_SKIPPABLE_START); - ZSTD_memcpy(hbuf, src, toCopy); - if ((MEM_readLE32(hbuf) & ZSTD_MAGIC_SKIPPABLE_MASK) != ZSTD_MAGIC_SKIPPABLE_START) { - RETURN_ERROR(prefix_unknown, - "first bytes don't correspond to any supported magic number"); - } } } - return minInputSize; - } - - ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzers may not understand that zfhPtr will be read only if return value is zero, since they are 2 different signals */ - if ( (format != ZSTD_f_zstd1_magicless) - && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { - if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { - /* skippable frame */ - if (srcSize < ZSTD_SKIPPABLEHEADERSIZE) - return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */ - ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); - zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); - zfhPtr->frameType = ZSTD_skippableFrame; - return 0; - } - RETURN_ERROR(prefix_unknown, ""); - } - - /* ensure there is enough `srcSize` to fully read/decode frame header */ - { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); - if (srcSize < fhsize) return fhsize; - zfhPtr->headerSize = (U32)fhsize; - } - - { BYTE const fhdByte = ip[minInputSize-1]; - size_t pos = minInputSize; - U32 const dictIDSizeCode = fhdByte&3; - U32 const checksumFlag = (fhdByte>>2)&1; - U32 const singleSegment = (fhdByte>>5)&1; - U32 const fcsID = fhdByte>>6; - U64 windowSize = 0; - U32 dictID = 0; - U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN; - RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported, - "reserved bits, must be zero"); - - if (!singleSegment) { - BYTE const wlByte = ip[pos++]; - U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; - RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, ""); - windowSize = (1ULL << windowLog); - windowSize += (windowSize >> 3) * (wlByte&7); - } - switch(dictIDSizeCode) - { - default: - assert(0); /* impossible */ - ZSTD_FALLTHROUGH; - case 0 : break; - case 1 : dictID = ip[pos]; pos++; break; - case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; - case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; - } - switch(fcsID) - { - default: - assert(0); /* impossible */ - ZSTD_FALLTHROUGH; - case 0 : if (singleSegment) frameContentSize = ip[pos]; break; - case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; - case 2 : frameContentSize = MEM_readLE32(ip+pos); break; - case 3 : frameContentSize = MEM_readLE64(ip+pos); break; - } - if (singleSegment) windowSize = frameContentSize; - - zfhPtr->frameType = ZSTD_frame; - zfhPtr->frameContentSize = frameContentSize; - zfhPtr->windowSize = windowSize; - zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); - zfhPtr->dictID = dictID; - zfhPtr->checksumFlag = checksumFlag; - } - return 0; -} - -/** ZSTD_getFrameHeader() : - * decode Frame Header, or require larger `srcSize`. - * note : this function does not consume input, it only reads it. - * @return : 0, `zfhPtr` is correctly filled, - * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) -{ - return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); -} - -/** ZSTD_getFrameContentSize() : - * compatible with legacy mode - * @return : decompressed size of the single frame pointed to be `src` if known, otherwise - * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) -{ -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) - if (ZSTD_isLegacy(src, srcSize)) { - unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize); - return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret; - } -#endif - { ZSTD_frameHeader zfh; - if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) - return ZSTD_CONTENTSIZE_ERROR; - if (zfh.frameType == ZSTD_skippableFrame) { - return 0; - } else { - return zfh.frameContentSize; - } } -} - -static size_t readSkippableFrameSize(void const* src, size_t srcSize) -{ - size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE; - U32 sizeU32; - - RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); - - sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE); - RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32, - frameParameter_unsupported, ""); - { size_t const skippableSize = skippableHeaderSize + sizeU32; - RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, ""); - return skippableSize; - } -} - -/*! ZSTD_readSkippableFrame() : - * Retrieves content of a skippable frame, and writes it to dst buffer. - * - * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, - * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested - * in the magicVariant. - * - * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame. - * - * @return : number of bytes written or a ZSTD error. - */ -size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, - unsigned* magicVariant, /* optional, can be NULL */ - const void* src, size_t srcSize) -{ - RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); - - { U32 const magicNumber = MEM_readLE32(src); - size_t skippableFrameSize = readSkippableFrameSize(src, srcSize); - size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE; - - /* check input validity */ - RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, ""); - RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, ""); - RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, ""); - - /* deliver payload */ - if (skippableContentSize > 0 && dst != NULL) - ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize); - if (magicVariant != NULL) - *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START; - return skippableContentSize; - } -} - -/** ZSTD_findDecompressedSize() : - * `srcSize` must be the exact length of some number of ZSTD compressed and/or - * skippable frames - * note: compatible with legacy mode - * @return : decompressed size of the frames contained */ -unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) -{ - unsigned long long totalDstSize = 0; - - while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) { - U32 const magicNumber = MEM_readLE32(src); - - if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { - size_t const skippableSize = readSkippableFrameSize(src, srcSize); - if (ZSTD_isError(skippableSize)) return ZSTD_CONTENTSIZE_ERROR; - assert(skippableSize <= srcSize); - - src = (const BYTE *)src + skippableSize; - srcSize -= skippableSize; - continue; - } - - { unsigned long long const fcs = ZSTD_getFrameContentSize(src, srcSize); - if (fcs >= ZSTD_CONTENTSIZE_ERROR) return fcs; - - if (totalDstSize + fcs < totalDstSize) - return ZSTD_CONTENTSIZE_ERROR; /* check for overflow */ - totalDstSize += fcs; - } - /* skip to next frame */ - { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); - if (ZSTD_isError(frameSrcSize)) return ZSTD_CONTENTSIZE_ERROR; - assert(frameSrcSize <= srcSize); - - src = (const BYTE *)src + frameSrcSize; - srcSize -= frameSrcSize; - } - } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ - - if (srcSize) return ZSTD_CONTENTSIZE_ERROR; - - return totalDstSize; -} - -/** ZSTD_getDecompressedSize() : - * compatible with legacy mode - * @return : decompressed size if known, 0 otherwise - note : 0 can mean any of the following : - - frame content is empty - - decompressed size field is not present in frame header - - frame header unknown / not supported - - frame header not complete (`srcSize` too small) */ -unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize) -{ - unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); - ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN); - return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret; -} - - -/** ZSTD_decodeFrameHeader() : - * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). - * If multiple DDict references are enabled, also will choose the correct DDict to use. - * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ -static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) -{ - size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format); - if (ZSTD_isError(result)) return result; /* invalid header */ - RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small"); - - /* Reference DDict requested by frame if dctx references multiple ddicts */ - if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) { - ZSTD_DCtx_selectFrameDDict(dctx); - } - -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - /* Skip the dictID check in fuzzing mode, because it makes the search - * harder. - */ - RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID), - dictionary_wrong, ""); -#endif - dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0; - if (dctx->validateChecksum) XXH64_reset(&dctx->xxhState, 0); - dctx->processedCSize += headerSize; - return 0; -} - -static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret) -{ - ZSTD_frameSizeInfo frameSizeInfo; - frameSizeInfo.compressedSize = ret; - frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; - return frameSizeInfo; -} - -static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize, ZSTD_format_e format) -{ - ZSTD_frameSizeInfo frameSizeInfo; - ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo)); - -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) - if (format == ZSTD_f_zstd1 && ZSTD_isLegacy(src, srcSize)) - return ZSTD_findFrameSizeInfoLegacy(src, srcSize); -#endif - - if (format == ZSTD_f_zstd1 && (srcSize >= ZSTD_SKIPPABLEHEADERSIZE) - && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { - frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); - assert(ZSTD_isError(frameSizeInfo.compressedSize) || - frameSizeInfo.compressedSize <= srcSize); - return frameSizeInfo; - } else { - const BYTE* ip = (const BYTE*)src; - const BYTE* const ipstart = ip; - size_t remainingSize = srcSize; - size_t nbBlocks = 0; - ZSTD_frameHeader zfh; - - /* Extract Frame Header */ - { size_t const ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format); - if (ZSTD_isError(ret)) - return ZSTD_errorFrameSizeInfo(ret); - if (ret > 0) - return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); - } - - ip += zfh.headerSize; - remainingSize -= zfh.headerSize; - - /* Iterate over each block */ - while (1) { - blockProperties_t blockProperties; - size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); - if (ZSTD_isError(cBlockSize)) - return ZSTD_errorFrameSizeInfo(cBlockSize); - - if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) - return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); - - ip += ZSTD_blockHeaderSize + cBlockSize; - remainingSize -= ZSTD_blockHeaderSize + cBlockSize; - nbBlocks++; - - if (blockProperties.lastBlock) break; - } - - /* Final frame content checksum */ - if (zfh.checksumFlag) { - if (remainingSize < 4) - return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); - ip += 4; - } - - frameSizeInfo.nbBlocks = nbBlocks; - frameSizeInfo.compressedSize = (size_t)(ip - ipstart); - frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) - ? zfh.frameContentSize - : (unsigned long long)nbBlocks * zfh.blockSizeMax; - return frameSizeInfo; - } -} - -static size_t ZSTD_findFrameCompressedSize_advanced(const void *src, size_t srcSize, ZSTD_format_e format) { - ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format); - return frameSizeInfo.compressedSize; -} - -/** ZSTD_findFrameCompressedSize() : - * See docs in zstd.h - * Note: compatible with legacy mode */ -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) -{ - return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_f_zstd1); -} - -/** ZSTD_decompressBound() : - * compatible with legacy mode - * `src` must point to the start of a ZSTD frame or a skippable frame - * `srcSize` must be at least as large as the frame contained - * @return : the maximum decompressed size of the compressed source - */ -unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) -{ - unsigned long long bound = 0; - /* Iterate over each frame */ - while (srcSize > 0) { - ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1); - size_t const compressedSize = frameSizeInfo.compressedSize; - unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; - if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) - return ZSTD_CONTENTSIZE_ERROR; - assert(srcSize >= compressedSize); - src = (const BYTE*)src + compressedSize; - srcSize -= compressedSize; - bound += decompressedBound; - } - return bound; -} - -size_t ZSTD_decompressionMargin(void const* src, size_t srcSize) -{ - size_t margin = 0; - unsigned maxBlockSize = 0; - - /* Iterate over each frame */ - while (srcSize > 0) { - ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1); - size_t const compressedSize = frameSizeInfo.compressedSize; - unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; - ZSTD_frameHeader zfh; - - FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), ""); - if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) - return ERROR(corruption_detected); - - if (zfh.frameType == ZSTD_frame) { - /* Add the frame header to our margin */ - margin += zfh.headerSize; - /* Add the checksum to our margin */ - margin += zfh.checksumFlag ? 4 : 0; - /* Add 3 bytes per block */ - margin += 3 * frameSizeInfo.nbBlocks; - - /* Compute the max block size */ - maxBlockSize = MAX(maxBlockSize, zfh.blockSizeMax); - } else { - assert(zfh.frameType == ZSTD_skippableFrame); - /* Add the entire skippable frame size to our margin. */ - margin += compressedSize; - } - - assert(srcSize >= compressedSize); - src = (const BYTE*)src + compressedSize; - srcSize -= compressedSize; - } - - /* Add the max block size back to the margin. */ - margin += maxBlockSize; - - return margin; -} - -/*-************************************************************* - * Frame decoding - ***************************************************************/ - -/** ZSTD_insertBlock() : - * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ -size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) -{ - DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize); - ZSTD_checkContinuity(dctx, blockStart, blockSize); - dctx->previousDstEnd = (const char*)blockStart + blockSize; - return blockSize; -} - - -static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, - const void* src, size_t srcSize) -{ - DEBUGLOG(5, "ZSTD_copyRawBlock"); - RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, ""); - if (dst == NULL) { - if (srcSize == 0) return 0; - RETURN_ERROR(dstBuffer_null, ""); - } - ZSTD_memmove(dst, src, srcSize); - return srcSize; -} - -static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, - BYTE b, - size_t regenSize) -{ - RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, ""); - if (dst == NULL) { - if (regenSize == 0) return 0; - RETURN_ERROR(dstBuffer_null, ""); - } - ZSTD_memset(dst, b, regenSize); - return regenSize; -} - -static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming) -{ -#if ZSTD_TRACE - if (dctx->traceCtx && ZSTD_trace_decompress_end != NULL) { - ZSTD_Trace trace; - ZSTD_memset(&trace, 0, sizeof(trace)); - trace.version = ZSTD_VERSION_NUMBER; - trace.streaming = streaming; - if (dctx->ddict) { - trace.dictionaryID = ZSTD_getDictID_fromDDict(dctx->ddict); - trace.dictionarySize = ZSTD_DDict_dictSize(dctx->ddict); - trace.dictionaryIsCold = dctx->ddictIsCold; - } - trace.uncompressedSize = (size_t)uncompressedSize; - trace.compressedSize = (size_t)compressedSize; - trace.dctx = dctx; - ZSTD_trace_decompress_end(dctx->traceCtx, &trace); - } -#else - (void)dctx; - (void)uncompressedSize; - (void)compressedSize; - (void)streaming; -#endif -} - - -/*! ZSTD_decompressFrame() : - * @dctx must be properly initialized - * will update *srcPtr and *srcSizePtr, - * to make *srcPtr progress by one frame. */ -static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void** srcPtr, size_t *srcSizePtr) -{ - const BYTE* const istart = (const BYTE*)(*srcPtr); - const BYTE* ip = istart; - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; - BYTE* op = ostart; - size_t remainingSrcSize = *srcSizePtr; - - DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr); - - /* check */ - RETURN_ERROR_IF( - remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize, - srcSize_wrong, ""); - - /* Frame Header */ - { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal( - ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format); - if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; - RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize, - srcSize_wrong, ""); - FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , ""); - ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize; - } - - /* Shrink the blockSizeMax if enabled */ - if (dctx->maxBlockSizeParam != 0) - dctx->fParams.blockSizeMax = MIN(dctx->fParams.blockSizeMax, (unsigned)dctx->maxBlockSizeParam); - - /* Loop on each block */ - while (1) { - BYTE* oBlockEnd = oend; - size_t decodedSize; - blockProperties_t blockProperties; - size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); - if (ZSTD_isError(cBlockSize)) return cBlockSize; - - ip += ZSTD_blockHeaderSize; - remainingSrcSize -= ZSTD_blockHeaderSize; - RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); - - if (ip >= op && ip < oBlockEnd) { - /* We are decompressing in-place. Limit the output pointer so that we - * don't overwrite the block that we are currently reading. This will - * fail decompression if the input & output pointers aren't spaced - * far enough apart. - * - * This is important to set, even when the pointers are far enough - * apart, because ZSTD_decompressBlock_internal() can decide to store - * literals in the output buffer, after the block it is decompressing. - * Since we don't want anything to overwrite our input, we have to tell - * ZSTD_decompressBlock_internal to never write past ip. - * - * See ZSTD_allocateLiteralsBuffer() for reference. - */ - oBlockEnd = op + (ip - op); - } - - switch(blockProperties.blockType) - { - case bt_compressed: - assert(dctx->isFrameDecompression == 1); - decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, not_streaming); - break; - case bt_raw : - /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */ - decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); - break; - case bt_rle : - decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize); - break; - case bt_reserved : - default: - RETURN_ERROR(corruption_detected, "invalid block type"); - } - FORWARD_IF_ERROR(decodedSize, "Block decompression failure"); - DEBUGLOG(5, "Decompressed block of dSize = %u", (unsigned)decodedSize); - if (dctx->validateChecksum) { - XXH64_update(&dctx->xxhState, op, decodedSize); - } - if (decodedSize) /* support dst = NULL,0 */ { - op += decodedSize; - } - assert(ip != NULL); - ip += cBlockSize; - remainingSrcSize -= cBlockSize; - if (blockProperties.lastBlock) break; - } - - if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { - RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize, - corruption_detected, ""); - } - if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ - RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, ""); - if (!dctx->forceIgnoreChecksum) { - U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); - U32 checkRead; - checkRead = MEM_readLE32(ip); - RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, ""); - } - ip += 4; - remainingSrcSize -= 4; - } - ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0); - /* Allow caller to get size read */ - DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %zi, consuming %zi bytes of input", op-ostart, ip - (const BYTE*)*srcPtr); - *srcPtr = ip; - *srcSizePtr = remainingSrcSize; - return (size_t)(op-ostart); -} - -static -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict, size_t dictSize, - const ZSTD_DDict* ddict) -{ - void* const dststart = dst; - int moreThan1Frame = 0; - - DEBUGLOG(5, "ZSTD_decompressMultiFrame"); - assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */ - - if (ddict) { - dict = ZSTD_DDict_dictContent(ddict); - dictSize = ZSTD_DDict_dictSize(ddict); - } - - while (srcSize >= ZSTD_startingInputLength(dctx->format)) { - -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) - if (dctx->format == ZSTD_f_zstd1 && ZSTD_isLegacy(src, srcSize)) { - size_t decodedSize; - size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize); - if (ZSTD_isError(frameSize)) return frameSize; - RETURN_ERROR_IF(dctx->staticSize, memory_allocation, - "legacy support is not compatible with static dctx"); - - decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize); - if (ZSTD_isError(decodedSize)) return decodedSize; - - { - unsigned long long const expectedSize = ZSTD_getFrameContentSize(src, srcSize); - RETURN_ERROR_IF(expectedSize == ZSTD_CONTENTSIZE_ERROR, corruption_detected, "Corrupted frame header!"); - if (expectedSize != ZSTD_CONTENTSIZE_UNKNOWN) { - RETURN_ERROR_IF(expectedSize != decodedSize, corruption_detected, - "Frame header size does not match decoded size!"); - } - } - - assert(decodedSize <= dstCapacity); - dst = (BYTE*)dst + decodedSize; - dstCapacity -= decodedSize; - - src = (const BYTE*)src + frameSize; - srcSize -= frameSize; - - continue; - } -#endif - - if (dctx->format == ZSTD_f_zstd1 && srcSize >= 4) { - U32 const magicNumber = MEM_readLE32(src); - DEBUGLOG(5, "reading magic number %08X", (unsigned)magicNumber); - if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { - /* skippable frame detected : skip it */ - size_t const skippableSize = readSkippableFrameSize(src, srcSize); - FORWARD_IF_ERROR(skippableSize, "invalid skippable frame"); - assert(skippableSize <= srcSize); - - src = (const BYTE *)src + skippableSize; - srcSize -= skippableSize; - continue; /* check next frame */ - } } - - if (ddict) { - /* we were called from ZSTD_decompress_usingDDict */ - FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), ""); - } else { - /* this will initialize correctly with no dict if dict == NULL, so - * use this in all cases but ddict */ - FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), ""); - } - ZSTD_checkContinuity(dctx, dst, dstCapacity); - - { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, - &src, &srcSize); - RETURN_ERROR_IF( - (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown) - && (moreThan1Frame==1), - srcSize_wrong, - "At least one frame successfully completed, " - "but following bytes are garbage: " - "it's more likely to be a srcSize error, " - "specifying more input bytes than size of frame(s). " - "Note: one could be unlucky, it might be a corruption error instead, " - "happening right at the place where we expect zstd magic bytes. " - "But this is _much_ less likely than a srcSize field error."); - if (ZSTD_isError(res)) return res; - assert(res <= dstCapacity); - if (res != 0) - dst = (BYTE*)dst + res; - dstCapacity -= res; - } - moreThan1Frame = 1; - } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ - - RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed"); - - return (size_t)((BYTE*)dst - (BYTE*)dststart); -} - -size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict, size_t dictSize) -{ - return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); -} - - -static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx) -{ - switch (dctx->dictUses) { - default: - assert(0 /* Impossible */); - ZSTD_FALLTHROUGH; - case ZSTD_dont_use: - ZSTD_clearDict(dctx); - return NULL; - case ZSTD_use_indefinitely: - return dctx->ddict; - case ZSTD_use_once: - dctx->dictUses = ZSTD_dont_use; - return dctx->ddict; - } -} - -size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx)); -} - - -size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ -#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) - size_t regenSize; - ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem); - RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!"); - regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); - ZSTD_freeDCtx(dctx); - return regenSize; -#else /* stack mode */ - ZSTD_DCtx dctx; - ZSTD_initDCtx_internal(&dctx); - return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); -#endif -} - - -/*-************************************** -* Advanced Streaming Decompression API -* Bufferless and synchronous -****************************************/ -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } - -/** - * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we - * allow taking a partial block as the input. Currently only raw uncompressed blocks can - * be streamed. - * - * For blocks that can be streamed, this allows us to reduce the latency until we produce - * output, and avoid copying the input. - * - * @param inputSize - The total amount of input that the caller currently has. - */ -static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) { - if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock)) - return dctx->expected; - if (dctx->bType != bt_raw) - return dctx->expected; - return BOUNDED(1, inputSize, dctx->expected); -} - -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { - switch(dctx->stage) - { - default: /* should not happen */ - assert(0); - ZSTD_FALLTHROUGH; - case ZSTDds_getFrameHeaderSize: - ZSTD_FALLTHROUGH; - case ZSTDds_decodeFrameHeader: - return ZSTDnit_frameHeader; - case ZSTDds_decodeBlockHeader: - return ZSTDnit_blockHeader; - case ZSTDds_decompressBlock: - return ZSTDnit_block; - case ZSTDds_decompressLastBlock: - return ZSTDnit_lastBlock; - case ZSTDds_checkChecksum: - return ZSTDnit_checksum; - case ZSTDds_decodeSkippableHeader: - ZSTD_FALLTHROUGH; - case ZSTDds_skipFrame: - return ZSTDnit_skippableFrame; - } -} - -static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } - -/** ZSTD_decompressContinue() : - * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) - * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) - * or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize); - /* Sanity check */ - RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed"); - ZSTD_checkContinuity(dctx, dst, dstCapacity); - - dctx->processedCSize += srcSize; - - switch (dctx->stage) - { - case ZSTDds_getFrameHeaderSize : - assert(src != NULL); - if (dctx->format == ZSTD_f_zstd1) { /* allows header */ - assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */ - if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ - ZSTD_memcpy(dctx->headerBuffer, src, srcSize); - dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */ - dctx->stage = ZSTDds_decodeSkippableHeader; - return 0; - } } - dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); - if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; - ZSTD_memcpy(dctx->headerBuffer, src, srcSize); - dctx->expected = dctx->headerSize - srcSize; - dctx->stage = ZSTDds_decodeFrameHeader; - return 0; - - case ZSTDds_decodeFrameHeader: - assert(src != NULL); - ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize); - FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), ""); - dctx->expected = ZSTD_blockHeaderSize; - dctx->stage = ZSTDds_decodeBlockHeader; - return 0; - - case ZSTDds_decodeBlockHeader: - { blockProperties_t bp; - size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); - if (ZSTD_isError(cBlockSize)) return cBlockSize; - RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum"); - dctx->expected = cBlockSize; - dctx->bType = bp.blockType; - dctx->rleSize = bp.origSize; - if (cBlockSize) { - dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; - return 0; - } - /* empty block */ - if (bp.lastBlock) { - if (dctx->fParams.checksumFlag) { - dctx->expected = 4; - dctx->stage = ZSTDds_checkChecksum; - } else { - dctx->expected = 0; /* end of frame */ - dctx->stage = ZSTDds_getFrameHeaderSize; - } - } else { - dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */ - dctx->stage = ZSTDds_decodeBlockHeader; - } - return 0; - } - - case ZSTDds_decompressLastBlock: - case ZSTDds_decompressBlock: - DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock"); - { size_t rSize; - switch(dctx->bType) - { - case bt_compressed: - DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); - assert(dctx->isFrameDecompression == 1); - rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, is_streaming); - dctx->expected = 0; /* Streaming not supported */ - break; - case bt_raw : - assert(srcSize <= dctx->expected); - rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); - FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed"); - assert(rSize == srcSize); - dctx->expected -= rSize; - break; - case bt_rle : - rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize); - dctx->expected = 0; /* Streaming not supported */ - break; - case bt_reserved : /* should never happen */ - default: - RETURN_ERROR(corruption_detected, "invalid block type"); - } - FORWARD_IF_ERROR(rSize, ""); - RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum"); - DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize); - dctx->decodedSize += rSize; - if (dctx->validateChecksum) XXH64_update(&dctx->xxhState, dst, rSize); - dctx->previousDstEnd = (char*)dst + rSize; - - /* Stay on the same stage until we are finished streaming the block. */ - if (dctx->expected > 0) { - return rSize; - } - - if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ - DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize); - RETURN_ERROR_IF( - dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN - && dctx->decodedSize != dctx->fParams.frameContentSize, - corruption_detected, ""); - if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ - dctx->expected = 4; - dctx->stage = ZSTDds_checkChecksum; - } else { - ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1); - dctx->expected = 0; /* ends here */ - dctx->stage = ZSTDds_getFrameHeaderSize; - } - } else { - dctx->stage = ZSTDds_decodeBlockHeader; - dctx->expected = ZSTD_blockHeaderSize; - } - return rSize; - } - - case ZSTDds_checkChecksum: - assert(srcSize == 4); /* guaranteed by dctx->expected */ - { - if (dctx->validateChecksum) { - U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); - U32 const check32 = MEM_readLE32(src); - DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32); - RETURN_ERROR_IF(check32 != h32, checksum_wrong, ""); - } - ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1); - dctx->expected = 0; - dctx->stage = ZSTDds_getFrameHeaderSize; - return 0; - } - - case ZSTDds_decodeSkippableHeader: - assert(src != NULL); - assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE); - assert(dctx->format != ZSTD_f_zstd1_magicless); - ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */ - dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */ - dctx->stage = ZSTDds_skipFrame; - return 0; - - case ZSTDds_skipFrame: - dctx->expected = 0; - dctx->stage = ZSTDds_getFrameHeaderSize; - return 0; - - default: - assert(0); /* impossible */ - RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ - } -} - - -static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) -{ - dctx->dictEnd = dctx->previousDstEnd; - dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); - dctx->prefixStart = dict; - dctx->previousDstEnd = (const char*)dict + dictSize; -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - dctx->dictContentBeginForFuzzing = dctx->prefixStart; - dctx->dictContentEndForFuzzing = dctx->previousDstEnd; -#endif - return 0; -} - -/*! ZSTD_loadDEntropy() : - * dict : must point at beginning of a valid zstd dictionary. - * @return : size of entropy tables read */ -size_t -ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, - const void* const dict, size_t const dictSize) -{ - const BYTE* dictPtr = (const BYTE*)dict; - const BYTE* const dictEnd = dictPtr + dictSize; - - RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small"); - assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */ - dictPtr += 8; /* skip header = magic + dictID */ - - ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable)); - ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable)); - ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE); - { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */ - size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable); -#ifdef HUF_FORCE_DECOMPRESS_X1 - /* in minimal huffman, we always use X1 variants */ - size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, - dictPtr, dictEnd - dictPtr, - workspace, workspaceSize, /* flags */ 0); -#else - size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable, - dictPtr, (size_t)(dictEnd - dictPtr), - workspace, workspaceSize, /* flags */ 0); -#endif - RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); - dictPtr += hSize; - } - - { short offcodeNCount[MaxOff+1]; - unsigned offcodeMaxValue = MaxOff, offcodeLog; - size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr)); - RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); - RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, ""); - RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); - ZSTD_buildFSETable( entropy->OFTable, - offcodeNCount, offcodeMaxValue, - OF_base, OF_bits, - offcodeLog, - entropy->workspace, sizeof(entropy->workspace), - /* bmi2 */0); - dictPtr += offcodeHeaderSize; - } - - { short matchlengthNCount[MaxML+1]; - unsigned matchlengthMaxValue = MaxML, matchlengthLog; - size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); - RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); - RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, ""); - RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); - ZSTD_buildFSETable( entropy->MLTable, - matchlengthNCount, matchlengthMaxValue, - ML_base, ML_bits, - matchlengthLog, - entropy->workspace, sizeof(entropy->workspace), - /* bmi2 */ 0); - dictPtr += matchlengthHeaderSize; - } - - { short litlengthNCount[MaxLL+1]; - unsigned litlengthMaxValue = MaxLL, litlengthLog; - size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); - RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); - RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, ""); - RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); - ZSTD_buildFSETable( entropy->LLTable, - litlengthNCount, litlengthMaxValue, - LL_base, LL_bits, - litlengthLog, - entropy->workspace, sizeof(entropy->workspace), - /* bmi2 */ 0); - dictPtr += litlengthHeaderSize; - } - - RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); - { int i; - size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); - for (i=0; i<3; i++) { - U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; - RETURN_ERROR_IF(rep==0 || rep > dictContentSize, - dictionary_corrupted, ""); - entropy->rep[i] = rep; - } } - - return (size_t)(dictPtr - (const BYTE*)dict); -} - -static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) -{ - if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); - { U32 const magic = MEM_readLE32(dict); - if (magic != ZSTD_MAGIC_DICTIONARY) { - return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ - } } - dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); - - /* load entropy tables */ - { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize); - RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, ""); - dict = (const char*)dict + eSize; - dictSize -= eSize; - } - dctx->litEntropy = dctx->fseEntropy = 1; - - /* reference dictionary content */ - return ZSTD_refDictContent(dctx, dict, dictSize); -} - -size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) -{ - assert(dctx != NULL); -#if ZSTD_TRACE - dctx->traceCtx = (ZSTD_trace_decompress_begin != NULL) ? ZSTD_trace_decompress_begin(dctx) : 0; -#endif - dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */ - dctx->stage = ZSTDds_getFrameHeaderSize; - dctx->processedCSize = 0; - dctx->decodedSize = 0; - dctx->previousDstEnd = NULL; - dctx->prefixStart = NULL; - dctx->virtualStart = NULL; - dctx->dictEnd = NULL; - dctx->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */ - dctx->litEntropy = dctx->fseEntropy = 0; - dctx->dictID = 0; - dctx->bType = bt_reserved; - dctx->isFrameDecompression = 1; - ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); - ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ - dctx->LLTptr = dctx->entropy.LLTable; - dctx->MLTptr = dctx->entropy.MLTable; - dctx->OFTptr = dctx->entropy.OFTable; - dctx->HUFptr = dctx->entropy.hufTable; - return 0; -} - -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) -{ - FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); - if (dict && dictSize) - RETURN_ERROR_IF( - ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)), - dictionary_corrupted, ""); - return 0; -} - - -/* ====== ZSTD_DDict ====== */ - -size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) -{ - DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict"); - assert(dctx != NULL); - if (ddict) { - const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict); - size_t const dictSize = ZSTD_DDict_dictSize(ddict); - const void* const dictEnd = dictStart + dictSize; - dctx->ddictIsCold = (dctx->dictEnd != dictEnd); - DEBUGLOG(4, "DDict is %s", - dctx->ddictIsCold ? "~cold~" : "hot!"); - } - FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); - if (ddict) { /* NULL ddict is equivalent to no dictionary */ - ZSTD_copyDDictParameters(dctx, ddict); - } - return 0; -} - -/*! ZSTD_getDictID_fromDict() : - * Provides the dictID stored within dictionary. - * if @return == 0, the dictionary is not conformant with Zstandard specification. - * It can still be loaded, but as a content-only dictionary. */ -unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) -{ - if (dictSize < 8) return 0; - if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0; - return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); -} - -/*! ZSTD_getDictID_fromFrame() : - * Provides the dictID required to decompress frame stored within `src`. - * If @return == 0, the dictID could not be decoded. - * This could for one of the following reasons : - * - The frame does not require a dictionary (most common case). - * - The frame was built with dictID intentionally removed. - * Needed dictionary is a hidden piece of information. - * Note : this use case also happens when using a non-conformant dictionary. - * - `srcSize` is too small, and as a result, frame header could not be decoded. - * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. - * - This is not a Zstandard frame. - * When identifying the exact failure cause, it's possible to use - * ZSTD_getFrameHeader(), which will provide a more precise error code. */ -unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) -{ - ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 }; - size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); - if (ZSTD_isError(hError)) return 0; - return zfp.dictID; -} - - -/*! ZSTD_decompress_usingDDict() : -* Decompression using a pre-digested Dictionary -* Use dictionary without significant overhead. */ -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_DDict* ddict) -{ - /* pass content and size in case legacy frames are encountered */ - return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, - NULL, 0, - ddict); -} - - -/*===================================== -* Streaming decompression -*====================================*/ - -ZSTD_DStream* ZSTD_createDStream(void) -{ - DEBUGLOG(3, "ZSTD_createDStream"); - return ZSTD_createDCtx_internal(ZSTD_defaultCMem); -} - -ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) -{ - return ZSTD_initStaticDCtx(workspace, workspaceSize); -} - -ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) -{ - return ZSTD_createDCtx_internal(customMem); -} - -size_t ZSTD_freeDStream(ZSTD_DStream* zds) -{ - return ZSTD_freeDCtx(zds); -} - - -/* *** Initialization *** */ - -size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; } -size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; } - -size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType) -{ - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); - ZSTD_clearDict(dctx); - if (dict && dictSize != 0) { - dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem); - RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!"); - dctx->ddict = dctx->ddictLocal; - dctx->dictUses = ZSTD_use_indefinitely; - } - return 0; -} - -size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) -{ - return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); -} - -size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) -{ - return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); -} - -size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) -{ - FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), ""); - dctx->dictUses = ZSTD_use_once; - return 0; -} - -size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize) -{ - return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent); -} - - -/* ZSTD_initDStream_usingDict() : - * return : expected size, aka ZSTD_startingInputLength(). - * this function cannot fail */ -size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize) -{ - DEBUGLOG(4, "ZSTD_initDStream_usingDict"); - FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , ""); - return ZSTD_startingInputLength(zds->format); -} - -/* note : this variant can't fail */ -size_t ZSTD_initDStream(ZSTD_DStream* zds) -{ - DEBUGLOG(4, "ZSTD_initDStream"); - FORWARD_IF_ERROR(ZSTD_DCtx_reset(zds, ZSTD_reset_session_only), ""); - FORWARD_IF_ERROR(ZSTD_DCtx_refDDict(zds, NULL), ""); - return ZSTD_startingInputLength(zds->format); -} - -/* ZSTD_initDStream_usingDDict() : - * ddict will just be referenced, and must outlive decompression session - * this function cannot fail */ -size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) -{ - DEBUGLOG(4, "ZSTD_initDStream_usingDDict"); - FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); - FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); - return ZSTD_startingInputLength(dctx->format); -} - -/* ZSTD_resetDStream() : - * return : expected size, aka ZSTD_startingInputLength(). - * this function cannot fail */ -size_t ZSTD_resetDStream(ZSTD_DStream* dctx) -{ - DEBUGLOG(4, "ZSTD_resetDStream"); - FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); - return ZSTD_startingInputLength(dctx->format); -} - - -size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) -{ - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); - ZSTD_clearDict(dctx); - if (ddict) { - dctx->ddict = ddict; - dctx->dictUses = ZSTD_use_indefinitely; - if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) { - if (dctx->ddictSet == NULL) { - dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem); - if (!dctx->ddictSet) { - RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!"); - } - } - assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */ - FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), ""); - } - } - return 0; -} - -/* ZSTD_DCtx_setMaxWindowSize() : - * note : no direct equivalence in ZSTD_DCtx_setParameter, - * since this version sets windowSize, and the other sets windowLog */ -size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize) -{ - ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax); - size_t const min = (size_t)1 << bounds.lowerBound; - size_t const max = (size_t)1 << bounds.upperBound; - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); - RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, ""); - RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, ""); - dctx->maxWindowSize = maxWindowSize; - return 0; -} - -size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format) -{ - return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format); -} - -ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) -{ - ZSTD_bounds bounds = { 0, 0, 0 }; - switch(dParam) { - case ZSTD_d_windowLogMax: - bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN; - bounds.upperBound = ZSTD_WINDOWLOG_MAX; - return bounds; - case ZSTD_d_format: - bounds.lowerBound = (int)ZSTD_f_zstd1; - bounds.upperBound = (int)ZSTD_f_zstd1_magicless; - ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); - return bounds; - case ZSTD_d_stableOutBuffer: - bounds.lowerBound = (int)ZSTD_bm_buffered; - bounds.upperBound = (int)ZSTD_bm_stable; - return bounds; - case ZSTD_d_forceIgnoreChecksum: - bounds.lowerBound = (int)ZSTD_d_validateChecksum; - bounds.upperBound = (int)ZSTD_d_ignoreChecksum; - return bounds; - case ZSTD_d_refMultipleDDicts: - bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict; - bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts; - return bounds; - case ZSTD_d_disableHuffmanAssembly: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_d_maxBlockSize: - bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; - bounds.upperBound = ZSTD_BLOCKSIZE_MAX; - return bounds; - - default:; - } - bounds.error = ERROR(parameter_unsupported); - return bounds; -} - -/* ZSTD_dParam_withinBounds: - * @return 1 if value is within dParam bounds, - * 0 otherwise */ -static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) -{ - ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam); - if (ZSTD_isError(bounds.error)) return 0; - if (value < bounds.lowerBound) return 0; - if (value > bounds.upperBound) return 0; - return 1; -} - -#define CHECK_DBOUNDS(p,v) { \ - RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \ -} - -size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value) -{ - switch (param) { - case ZSTD_d_windowLogMax: - *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize); - return 0; - case ZSTD_d_format: - *value = (int)dctx->format; - return 0; - case ZSTD_d_stableOutBuffer: - *value = (int)dctx->outBufferMode; - return 0; - case ZSTD_d_forceIgnoreChecksum: - *value = (int)dctx->forceIgnoreChecksum; - return 0; - case ZSTD_d_refMultipleDDicts: - *value = (int)dctx->refMultipleDDicts; - return 0; - case ZSTD_d_disableHuffmanAssembly: - *value = (int)dctx->disableHufAsm; - return 0; - case ZSTD_d_maxBlockSize: - *value = dctx->maxBlockSizeParam; - return 0; - default:; - } - RETURN_ERROR(parameter_unsupported, ""); -} - -size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value) -{ - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); - switch(dParam) { - case ZSTD_d_windowLogMax: - if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT; - CHECK_DBOUNDS(ZSTD_d_windowLogMax, value); - dctx->maxWindowSize = ((size_t)1) << value; - return 0; - case ZSTD_d_format: - CHECK_DBOUNDS(ZSTD_d_format, value); - dctx->format = (ZSTD_format_e)value; - return 0; - case ZSTD_d_stableOutBuffer: - CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value); - dctx->outBufferMode = (ZSTD_bufferMode_e)value; - return 0; - case ZSTD_d_forceIgnoreChecksum: - CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value); - dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value; - return 0; - case ZSTD_d_refMultipleDDicts: - CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value); - if (dctx->staticSize != 0) { - RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!"); - } - dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; - return 0; - case ZSTD_d_disableHuffmanAssembly: - CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value); - dctx->disableHufAsm = value != 0; - return 0; - case ZSTD_d_maxBlockSize: - if (value != 0) CHECK_DBOUNDS(ZSTD_d_maxBlockSize, value); - dctx->maxBlockSizeParam = value; - return 0; - default:; - } - RETURN_ERROR(parameter_unsupported, ""); -} - -size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) -{ - if ( (reset == ZSTD_reset_session_only) - || (reset == ZSTD_reset_session_and_parameters) ) { - dctx->streamStage = zdss_init; - dctx->noForwardProgress = 0; - dctx->isFrameDecompression = 1; - } - if ( (reset == ZSTD_reset_parameters) - || (reset == ZSTD_reset_session_and_parameters) ) { - RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); - ZSTD_clearDict(dctx); - ZSTD_DCtx_resetParameters(dctx); - } - return 0; -} - - -size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) -{ - return ZSTD_sizeof_DCtx(dctx); -} - -static size_t ZSTD_decodingBufferSize_internal(unsigned long long windowSize, unsigned long long frameContentSize, size_t blockSizeMax) -{ - size_t const blockSize = MIN((size_t)MIN(windowSize, ZSTD_BLOCKSIZE_MAX), blockSizeMax); - /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block - * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing - * the block at the beginning of the output buffer, and maintain a full window. - * - * We need another blockSize worth of buffer so that we can store split - * literals at the end of the block without overwriting the extDict window. - */ - unsigned long long const neededRBSize = windowSize + (blockSize * 2) + (WILDCOPY_OVERLENGTH * 2); - unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); - size_t const minRBSize = (size_t) neededSize; - RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, - frameParameter_windowTooLarge, ""); - return minRBSize; -} - -size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) -{ - return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, ZSTD_BLOCKSIZE_MAX); -} - -size_t ZSTD_estimateDStreamSize(size_t windowSize) -{ - size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); - size_t const inBuffSize = blockSize; /* no block can be larger */ - size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN); - return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; -} - -size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) -{ - U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ - ZSTD_frameHeader zfh; - size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); - if (ZSTD_isError(err)) return err; - RETURN_ERROR_IF(err>0, srcSize_wrong, ""); - RETURN_ERROR_IF(zfh.windowSize > windowSizeMax, - frameParameter_windowTooLarge, ""); - return ZSTD_estimateDStreamSize((size_t)zfh.windowSize); -} - - -/* ***** Decompression ***** */ - -static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) -{ - return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR; -} - -static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) -{ - if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize)) - zds->oversizedDuration++; - else - zds->oversizedDuration = 0; -} - -static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds) -{ - return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION; -} - -/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */ -static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output) -{ - ZSTD_outBuffer const expect = zds->expectedOutBuffer; - /* No requirement when ZSTD_obm_stable is not enabled. */ - if (zds->outBufferMode != ZSTD_bm_stable) - return 0; - /* Any buffer is allowed in zdss_init, this must be the same for every other call until - * the context is reset. - */ - if (zds->streamStage == zdss_init) - return 0; - /* The buffer must match our expectation exactly. */ - if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size) - return 0; - RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!"); -} - -/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream() - * and updates the stage and the output buffer state. This call is extracted so it can be - * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. - * NOTE: You must break after calling this function since the streamStage is modified. - */ -static size_t ZSTD_decompressContinueStream( - ZSTD_DStream* zds, char** op, char* oend, - void const* src, size_t srcSize) { - int const isSkipFrame = ZSTD_isSkipFrame(zds); - if (zds->outBufferMode == ZSTD_bm_buffered) { - size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart; - size_t const decodedSize = ZSTD_decompressContinue(zds, - zds->outBuff + zds->outStart, dstSize, src, srcSize); - FORWARD_IF_ERROR(decodedSize, ""); - if (!decodedSize && !isSkipFrame) { - zds->streamStage = zdss_read; - } else { - zds->outEnd = zds->outStart + decodedSize; - zds->streamStage = zdss_flush; - } - } else { - /* Write directly into the output buffer */ - size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op); - size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize); - FORWARD_IF_ERROR(decodedSize, ""); - *op += decodedSize; - /* Flushing is not needed. */ - zds->streamStage = zdss_read; - assert(*op <= oend); - assert(zds->outBufferMode == ZSTD_bm_stable); - } - return 0; -} - -size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) -{ - const char* const src = (const char*)input->src; - const char* const istart = input->pos != 0 ? src + input->pos : src; - const char* const iend = input->size != 0 ? src + input->size : src; - const char* ip = istart; - char* const dst = (char*)output->dst; - char* const ostart = output->pos != 0 ? dst + output->pos : dst; - char* const oend = output->size != 0 ? dst + output->size : dst; - char* op = ostart; - U32 someMoreWork = 1; - - DEBUGLOG(5, "ZSTD_decompressStream"); - RETURN_ERROR_IF( - input->pos > input->size, - srcSize_wrong, - "forbidden. in: pos: %u vs size: %u", - (U32)input->pos, (U32)input->size); - RETURN_ERROR_IF( - output->pos > output->size, - dstSize_tooSmall, - "forbidden. out: pos: %u vs size: %u", - (U32)output->pos, (U32)output->size); - DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos)); - FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), ""); - - while (someMoreWork) { - switch(zds->streamStage) - { - case zdss_init : - DEBUGLOG(5, "stage zdss_init => transparent reset "); - zds->streamStage = zdss_loadHeader; - zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) - zds->legacyVersion = 0; -#endif - zds->hostageByte = 0; - zds->expectedOutBuffer = *output; - ZSTD_FALLTHROUGH; - - case zdss_loadHeader : - DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) - if (zds->legacyVersion) { - RETURN_ERROR_IF(zds->staticSize, memory_allocation, - "legacy support is incompatible with static dctx"); - { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input); - if (hint==0) zds->streamStage = zdss_init; - return hint; - } } -#endif - { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); - if (zds->refMultipleDDicts && zds->ddictSet) { - ZSTD_DCtx_selectFrameDDict(zds); - } - if (ZSTD_isError(hSize)) { -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) - U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); - if (legacyVersion) { - ZSTD_DDict const* const ddict = ZSTD_getDDict(zds); - const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL; - size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0; - DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion); - RETURN_ERROR_IF(zds->staticSize, memory_allocation, - "legacy support is incompatible with static dctx"); - FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext, - zds->previousLegacyVersion, legacyVersion, - dict, dictSize), ""); - zds->legacyVersion = zds->previousLegacyVersion = legacyVersion; - { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input); - if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */ - return hint; - } } -#endif - return hSize; /* error */ - } - if (hSize != 0) { /* need more input */ - size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ - size_t const remainingInput = (size_t)(iend-ip); - assert(iend >= ip); - if (toLoad > remainingInput) { /* not enough input to load full header */ - if (remainingInput > 0) { - ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput); - zds->lhSize += remainingInput; - } - input->pos = input->size; - /* check first few bytes */ - FORWARD_IF_ERROR( - ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format), - "First few bytes detected incorrect" ); - /* return hint input size */ - return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ - } - assert(ip != NULL); - ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; - break; - } } - - /* check for single-pass mode opportunity */ - if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN - && zds->fParams.frameType != ZSTD_skippableFrame - && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { - size_t const cSize = ZSTD_findFrameCompressedSize_advanced(istart, (size_t)(iend-istart), zds->format); - if (cSize <= (size_t)(iend-istart)) { - /* shortcut : using single-pass mode */ - size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds)); - if (ZSTD_isError(decompressedSize)) return decompressedSize; - DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()"); - assert(istart != NULL); - ip = istart + cSize; - op = op ? op + decompressedSize : op; /* can occur if frameContentSize = 0 (empty frame) */ - zds->expected = 0; - zds->streamStage = zdss_init; - someMoreWork = 0; - break; - } } - - /* Check output buffer is large enough for ZSTD_odm_stable. */ - if (zds->outBufferMode == ZSTD_bm_stable - && zds->fParams.frameType != ZSTD_skippableFrame - && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN - && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) { - RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small"); - } - - /* Consume header (see ZSTDds_decodeFrameHeader) */ - DEBUGLOG(4, "Consume header"); - FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), ""); - - if (zds->format == ZSTD_f_zstd1 - && (MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ - zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE); - zds->stage = ZSTDds_skipFrame; - } else { - FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), ""); - zds->expected = ZSTD_blockHeaderSize; - zds->stage = ZSTDds_decodeBlockHeader; - } - - /* control buffer memory usage */ - DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)", - (U32)(zds->fParams.windowSize >>10), - (U32)(zds->maxWindowSize >> 10) ); - zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); - RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize, - frameParameter_windowTooLarge, ""); - if (zds->maxBlockSizeParam != 0) - zds->fParams.blockSizeMax = MIN(zds->fParams.blockSizeMax, (unsigned)zds->maxBlockSizeParam); - - /* Adapt buffer sizes to frame header instructions */ - { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */); - size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered - ? ZSTD_decodingBufferSize_internal(zds->fParams.windowSize, zds->fParams.frameContentSize, zds->fParams.blockSizeMax) - : 0; - - ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); - - { int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize); - int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); - - if (tooSmall || tooLarge) { - size_t const bufferSize = neededInBuffSize + neededOutBuffSize; - DEBUGLOG(4, "inBuff : from %u to %u", - (U32)zds->inBuffSize, (U32)neededInBuffSize); - DEBUGLOG(4, "outBuff : from %u to %u", - (U32)zds->outBuffSize, (U32)neededOutBuffSize); - if (zds->staticSize) { /* static DCtx */ - DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize); - assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */ - RETURN_ERROR_IF( - bufferSize > zds->staticSize - sizeof(ZSTD_DCtx), - memory_allocation, ""); - } else { - ZSTD_customFree(zds->inBuff, zds->customMem); - zds->inBuffSize = 0; - zds->outBuffSize = 0; - zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem); - RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, ""); - } - zds->inBuffSize = neededInBuffSize; - zds->outBuff = zds->inBuff + zds->inBuffSize; - zds->outBuffSize = neededOutBuffSize; - } } } - zds->streamStage = zdss_read; - ZSTD_FALLTHROUGH; - - case zdss_read: - DEBUGLOG(5, "stage zdss_read"); - { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip)); - DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize); - if (neededInSize==0) { /* end of frame */ - zds->streamStage = zdss_init; - someMoreWork = 0; - break; - } - if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ - FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); - assert(ip != NULL); - ip += neededInSize; - /* Function modifies the stage so we must break */ - break; - } } - if (ip==iend) { someMoreWork = 0; break; } /* no more input */ - zds->streamStage = zdss_load; - ZSTD_FALLTHROUGH; - - case zdss_load: - { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); - size_t const toLoad = neededInSize - zds->inPos; - int const isSkipFrame = ZSTD_isSkipFrame(zds); - size_t loadedSize; - /* At this point we shouldn't be decompressing a block that we can stream. */ - assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip))); - if (isSkipFrame) { - loadedSize = MIN(toLoad, (size_t)(iend-ip)); - } else { - RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos, - corruption_detected, - "should never happen"); - loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip)); - } - if (loadedSize != 0) { - /* ip may be NULL */ - ip += loadedSize; - zds->inPos += loadedSize; - } - if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ - - /* decode loaded input */ - zds->inPos = 0; /* input is consumed */ - FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), ""); - /* Function modifies the stage so we must break */ - break; - } - case zdss_flush: - { - size_t const toFlushSize = zds->outEnd - zds->outStart; - size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize); - - op = op ? op + flushedSize : op; - - zds->outStart += flushedSize; - if (flushedSize == toFlushSize) { /* flush completed */ - zds->streamStage = zdss_read; - if ( (zds->outBuffSize < zds->fParams.frameContentSize) - && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { - DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", - (int)(zds->outBuffSize - zds->outStart), - (U32)zds->fParams.blockSizeMax); - zds->outStart = zds->outEnd = 0; - } - break; - } } - /* cannot complete flush */ - someMoreWork = 0; - break; - - default: - assert(0); /* impossible */ - RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ - } } - - /* result */ - input->pos = (size_t)(ip - (const char*)(input->src)); - output->pos = (size_t)(op - (char*)(output->dst)); - - /* Update the expected output buffer for ZSTD_obm_stable. */ - zds->expectedOutBuffer = *output; - - if ((ip==istart) && (op==ostart)) { /* no forward progress */ - zds->noForwardProgress ++; - if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { - RETURN_ERROR_IF(op==oend, noForwardProgress_destFull, ""); - RETURN_ERROR_IF(ip==iend, noForwardProgress_inputEmpty, ""); - assert(0); - } - } else { - zds->noForwardProgress = 0; - } - { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); - if (!nextSrcSizeHint) { /* frame fully decoded */ - if (zds->outEnd == zds->outStart) { /* output fully flushed */ - if (zds->hostageByte) { - if (input->pos >= input->size) { - /* can't release hostage (not present) */ - zds->streamStage = zdss_read; - return 1; - } - input->pos++; /* release hostage */ - } /* zds->hostageByte */ - return 0; - } /* zds->outEnd == zds->outStart */ - if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ - input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ - zds->hostageByte=1; - } - return 1; - } /* nextSrcSizeHint==0 */ - nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */ - assert(zds->inPos <= nextSrcSizeHint); - nextSrcSizeHint -= zds->inPos; /* part already loaded*/ - return nextSrcSizeHint; - } -} - -size_t ZSTD_decompressStream_simpleArgs ( - ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, size_t* dstPos, - const void* src, size_t srcSize, size_t* srcPos) -{ - ZSTD_outBuffer output; - ZSTD_inBuffer input; - output.dst = dst; - output.size = dstCapacity; - output.pos = *dstPos; - input.src = src; - input.size = srcSize; - input.pos = *srcPos; - { size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; - } -} diff --git a/zstandard_android/src/decompress/zstd_decompress_block.c b/zstandard_android/src/decompress/zstd_decompress_block.c deleted file mode 100644 index f865692..0000000 --- a/zstandard_android/src/decompress/zstd_decompress_block.c +++ /dev/null @@ -1,2215 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -/* zstd_decompress_block : - * this module takes care of decompressing _compressed_ block */ - -/*-******************************************************* -* Dependencies -*********************************************************/ -#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ -#include "../common/compiler.h" /* prefetch */ -#include "../common/cpu.h" /* bmi2 */ -#include "../common/mem.h" /* low level memory routines */ -#define FSE_STATIC_LINKING_ONLY -#include "../common/fse.h" -#include "../common/huf.h" -#include "../common/zstd_internal.h" -#include "zstd_decompress_internal.h" /* ZSTD_DCtx */ -#include "zstd_ddict.h" /* ZSTD_DDictDictContent */ -#include "zstd_decompress_block.h" -#include "../common/bits.h" /* ZSTD_highbit32 */ - -/*_******************************************************* -* Macros -**********************************************************/ - -/* These two optional macros force the use one way or another of the two - * ZSTD_decompressSequences implementations. You can't force in both directions - * at the same time. - */ -#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) -#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!" -#endif - - -/*_******************************************************* -* Memory operations -**********************************************************/ -static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); } - - -/*-************************************************************* - * Block decoding - ***************************************************************/ - -static size_t ZSTD_blockSizeMax(ZSTD_DCtx const* dctx) -{ - size_t const blockSizeMax = dctx->isFrameDecompression ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX; - assert(blockSizeMax <= ZSTD_BLOCKSIZE_MAX); - return blockSizeMax; -} - -/*! ZSTD_getcBlockSize() : - * Provides the size of compressed block from block header `src` */ -size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, - blockProperties_t* bpPtr) -{ - RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, ""); - - { U32 const cBlockHeader = MEM_readLE24(src); - U32 const cSize = cBlockHeader >> 3; - bpPtr->lastBlock = cBlockHeader & 1; - bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); - bpPtr->origSize = cSize; /* only useful for RLE */ - if (bpPtr->blockType == bt_rle) return 1; - RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, ""); - return cSize; - } -} - -/* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */ -static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize, - const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately) -{ - size_t const blockSizeMax = ZSTD_blockSizeMax(dctx); - assert(litSize <= blockSizeMax); - assert(dctx->isFrameDecompression || streaming == not_streaming); - assert(expectedWriteSize <= blockSizeMax); - if (streaming == not_streaming && dstCapacity > blockSizeMax + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) { - /* If we aren't streaming, we can just put the literals after the output - * of the current block. We don't need to worry about overwriting the - * extDict of our window, because it doesn't exist. - * So if we have space after the end of the block, just put it there. - */ - dctx->litBuffer = (BYTE*)dst + blockSizeMax + WILDCOPY_OVERLENGTH; - dctx->litBufferEnd = dctx->litBuffer + litSize; - dctx->litBufferLocation = ZSTD_in_dst; - } else if (litSize <= ZSTD_LITBUFFEREXTRASIZE) { - /* Literals fit entirely within the extra buffer, put them there to avoid - * having to split the literals. - */ - dctx->litBuffer = dctx->litExtraBuffer; - dctx->litBufferEnd = dctx->litBuffer + litSize; - dctx->litBufferLocation = ZSTD_not_in_dst; - } else { - assert(blockSizeMax > ZSTD_LITBUFFEREXTRASIZE); - /* Literals must be split between the output block and the extra lit - * buffer. We fill the extra lit buffer with the tail of the literals, - * and put the rest of the literals at the end of the block, with - * WILDCOPY_OVERLENGTH of buffer room to allow for overreads. - * This MUST not write more than our maxBlockSize beyond dst, because in - * streaming mode, that could overwrite part of our extDict window. - */ - if (splitImmediately) { - /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */ - dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; - dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE; - } else { - /* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */ - dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize; - dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize; - } - dctx->litBufferLocation = ZSTD_split; - assert(dctx->litBufferEnd <= (BYTE*)dst + expectedWriteSize); - } -} - -/*! ZSTD_decodeLiteralsBlock() : - * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored - * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current - * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being - * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write. - * - * @return : nb of bytes read from src (< srcSize ) - * note : symbol not declared but exposed for fullbench */ -static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, - const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */ - void* dst, size_t dstCapacity, const streaming_operation streaming) -{ - DEBUGLOG(5, "ZSTD_decodeLiteralsBlock"); - RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); - - { const BYTE* const istart = (const BYTE*) src; - symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); - size_t const blockSizeMax = ZSTD_blockSizeMax(dctx); - - switch(litEncType) - { - case set_repeat: - DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block"); - RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, ""); - ZSTD_FALLTHROUGH; - - case set_compressed: - RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need up to 5 for case 3"); - { size_t lhSize, litSize, litCSize; - U32 singleStream=0; - U32 const lhlCode = (istart[0] >> 2) & 3; - U32 const lhc = MEM_readLE32(istart); - size_t hufSuccess; - size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); - int const flags = 0 - | (ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0) - | (dctx->disableHufAsm ? HUF_flags_disableAsm : 0); - switch(lhlCode) - { - case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ - /* 2 - 2 - 10 - 10 */ - singleStream = !lhlCode; - lhSize = 3; - litSize = (lhc >> 4) & 0x3FF; - litCSize = (lhc >> 14) & 0x3FF; - break; - case 2: - /* 2 - 2 - 14 - 14 */ - lhSize = 4; - litSize = (lhc >> 4) & 0x3FFF; - litCSize = lhc >> 18; - break; - case 3: - /* 2 - 2 - 18 - 18 */ - lhSize = 5; - litSize = (lhc >> 4) & 0x3FFFF; - litCSize = (lhc >> 22) + ((size_t)istart[4] << 10); - break; - } - RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); - RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); - if (!singleStream) - RETURN_ERROR_IF(litSize < MIN_LITERALS_FOR_4_STREAMS, literals_headerWrong, - "Not enough literals (%zu) for the 4-streams mode (min %u)", - litSize, MIN_LITERALS_FOR_4_STREAMS); - RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); - RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, ""); - ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); - - /* prefetch huffman table if cold */ - if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) { - PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable)); - } - - if (litEncType==set_repeat) { - if (singleStream) { - hufSuccess = HUF_decompress1X_usingDTable( - dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, flags); - } else { - assert(litSize >= MIN_LITERALS_FOR_4_STREAMS); - hufSuccess = HUF_decompress4X_usingDTable( - dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, flags); - } - } else { - if (singleStream) { -#if defined(HUF_FORCE_DECOMPRESS_X2) - hufSuccess = HUF_decompress1X_DCtx_wksp( - dctx->entropy.hufTable, dctx->litBuffer, litSize, - istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), flags); -#else - hufSuccess = HUF_decompress1X1_DCtx_wksp( - dctx->entropy.hufTable, dctx->litBuffer, litSize, - istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), flags); -#endif - } else { - hufSuccess = HUF_decompress4X_hufOnly_wksp( - dctx->entropy.hufTable, dctx->litBuffer, litSize, - istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), flags); - } - } - if (dctx->litBufferLocation == ZSTD_split) - { - assert(litSize > ZSTD_LITBUFFEREXTRASIZE); - ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); - ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE); - dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; - dctx->litBufferEnd -= WILDCOPY_OVERLENGTH; - assert(dctx->litBufferEnd <= (BYTE*)dst + blockSizeMax); - } - - RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, ""); - - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - dctx->litEntropy = 1; - if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; - return litCSize + lhSize; - } - - case set_basic: - { size_t litSize, lhSize; - U32 const lhlCode = ((istart[0]) >> 2) & 3; - size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); - switch(lhlCode) - { - case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ - lhSize = 1; - litSize = istart[0] >> 3; - break; - case 1: - lhSize = 2; - litSize = MEM_readLE16(istart) >> 4; - break; - case 3: - lhSize = 3; - RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize = 3"); - litSize = MEM_readLE24(istart) >> 4; - break; - } - - RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); - RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); - RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); - ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); - if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ - RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, ""); - if (dctx->litBufferLocation == ZSTD_split) - { - ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE); - ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); - } - else - { - ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize); - } - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - return lhSize+litSize; - } - /* direct reference into compressed stream */ - dctx->litPtr = istart+lhSize; - dctx->litSize = litSize; - dctx->litBufferEnd = dctx->litPtr + litSize; - dctx->litBufferLocation = ZSTD_not_in_dst; - return lhSize+litSize; - } - - case set_rle: - { U32 const lhlCode = ((istart[0]) >> 2) & 3; - size_t litSize, lhSize; - size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); - switch(lhlCode) - { - case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ - lhSize = 1; - litSize = istart[0] >> 3; - break; - case 1: - lhSize = 2; - RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 3"); - litSize = MEM_readLE16(istart) >> 4; - break; - case 3: - lhSize = 3; - RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 4"); - litSize = MEM_readLE24(istart) >> 4; - break; - } - RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); - RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); - RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); - ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); - if (dctx->litBufferLocation == ZSTD_split) - { - ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE); - ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE); - } - else - { - ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize); - } - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - return lhSize+1; - } - default: - RETURN_ERROR(corruption_detected, "impossible"); - } - } -} - -/* Hidden declaration for fullbench */ -size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx, - const void* src, size_t srcSize, - void* dst, size_t dstCapacity); -size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx, - const void* src, size_t srcSize, - void* dst, size_t dstCapacity) -{ - dctx->isFrameDecompression = 0; - return ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, not_streaming); -} - -/* Default FSE distribution tables. - * These are pre-calculated FSE decoding tables using default distributions as defined in specification : - * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions - * They were generated programmatically with following method : - * - start from default distributions, present in /lib/common/zstd_internal.h - * - generate tables normally, using ZSTD_buildFSETable() - * - printout the content of tables - * - prettify output, report below, test with fuzzer to ensure it's correct */ - -/* Default FSE distribution table for Literal Lengths */ -static const ZSTD_seqSymbol LL_defaultDTable[(1<tableLog = 0; - DTableH->fastMode = 0; - - cell->nbBits = 0; - cell->nextState = 0; - assert(nbAddBits < 255); - cell->nbAdditionalBits = nbAddBits; - cell->baseValue = baseValue; -} - - -/* ZSTD_buildFSETable() : - * generate FSE decoding table for one symbol (ll, ml or off) - * cannot fail if input is valid => - * all inputs are presumed validated at this stage */ -FORCE_INLINE_TEMPLATE -void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, - const short* normalizedCounter, unsigned maxSymbolValue, - const U32* baseValue, const U8* nbAdditionalBits, - unsigned tableLog, void* wksp, size_t wkspSize) -{ - ZSTD_seqSymbol* const tableDecode = dt+1; - U32 const maxSV1 = maxSymbolValue + 1; - U32 const tableSize = 1 << tableLog; - - U16* symbolNext = (U16*)wksp; - BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1); - U32 highThreshold = tableSize - 1; - - - /* Sanity Checks */ - assert(maxSymbolValue <= MaxSeq); - assert(tableLog <= MaxFSELog); - assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE); - (void)wkspSize; - /* Init, lay down lowprob symbols */ - { ZSTD_seqSymbol_header DTableH; - DTableH.tableLog = tableLog; - DTableH.fastMode = 1; - { S16 const largeLimit= (S16)(1 << (tableLog-1)); - U32 s; - for (s=0; s= largeLimit) DTableH.fastMode=0; - assert(normalizedCounter[s]>=0); - symbolNext[s] = (U16)normalizedCounter[s]; - } } } - ZSTD_memcpy(dt, &DTableH, sizeof(DTableH)); - } - - /* Spread symbols */ - assert(tableSize <= 512); - /* Specialized symbol spreading for the case when there are - * no low probability (-1 count) symbols. When compressing - * small blocks we avoid low probability symbols to hit this - * case, since header decoding speed matters more. - */ - if (highThreshold == tableSize - 1) { - size_t const tableMask = tableSize-1; - size_t const step = FSE_TABLESTEP(tableSize); - /* First lay down the symbols in order. - * We use a uint64_t to lay down 8 bytes at a time. This reduces branch - * misses since small blocks generally have small table logs, so nearly - * all symbols have counts <= 8. We ensure we have 8 bytes at the end of - * our buffer to handle the over-write. - */ - { - U64 const add = 0x0101010101010101ull; - size_t pos = 0; - U64 sv = 0; - U32 s; - for (s=0; s=0); - pos += (size_t)n; - } - } - /* Now we spread those positions across the table. - * The benefit of doing it in two stages is that we avoid the - * variable size inner loop, which caused lots of branch misses. - * Now we can run through all the positions without any branch misses. - * We unroll the loop twice, since that is what empirically worked best. - */ - { - size_t position = 0; - size_t s; - size_t const unroll = 2; - assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */ - for (s = 0; s < (size_t)tableSize; s += unroll) { - size_t u; - for (u = 0; u < unroll; ++u) { - size_t const uPosition = (position + (u * step)) & tableMask; - tableDecode[uPosition].baseValue = spread[s + u]; - } - position = (position + (unroll * step)) & tableMask; - } - assert(position == 0); - } - } else { - U32 const tableMask = tableSize-1; - U32 const step = FSE_TABLESTEP(tableSize); - U32 s, position = 0; - for (s=0; s highThreshold)) position = (position + step) & tableMask; /* lowprob area */ - } } - assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ - } - - /* Build Decoding table */ - { - U32 u; - for (u=0; u max, corruption_detected, ""); - { U32 const symbol = *(const BYTE*)src; - U32 const baseline = baseValue[symbol]; - U8 const nbBits = nbAdditionalBits[symbol]; - ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); - } - *DTablePtr = DTableSpace; - return 1; - case set_basic : - *DTablePtr = defaultTable; - return 0; - case set_repeat: - RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, ""); - /* prefetch FSE table if used */ - if (ddictIsCold && (nbSeq > 24 /* heuristic */)) { - const void* const pStart = *DTablePtr; - size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog)); - PREFETCH_AREA(pStart, pSize); - } - return 0; - case set_compressed : - { unsigned tableLog; - S16 norm[MaxSeq+1]; - size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); - RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, ""); - RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, ""); - ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2); - *DTablePtr = DTableSpace; - return headerSize; - } - default : - assert(0); - RETURN_ERROR(GENERIC, "impossible"); - } -} - -size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, - const void* src, size_t srcSize) -{ - const BYTE* const istart = (const BYTE*)src; - const BYTE* const iend = istart + srcSize; - const BYTE* ip = istart; - int nbSeq; - DEBUGLOG(5, "ZSTD_decodeSeqHeaders"); - - /* check */ - RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, ""); - - /* SeqHead */ - nbSeq = *ip++; - if (nbSeq > 0x7F) { - if (nbSeq == 0xFF) { - RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); - nbSeq = MEM_readLE16(ip) + LONGNBSEQ; - ip+=2; - } else { - RETURN_ERROR_IF(ip >= iend, srcSize_wrong, ""); - nbSeq = ((nbSeq-0x80)<<8) + *ip++; - } - } - *nbSeqPtr = nbSeq; - - if (nbSeq == 0) { - /* No sequence : section ends immediately */ - RETURN_ERROR_IF(ip != iend, corruption_detected, - "extraneous data present in the Sequences section"); - return (size_t)(ip - istart); - } - - /* FSE table descriptors */ - RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */ - RETURN_ERROR_IF(*ip & 3, corruption_detected, ""); /* The last field, Reserved, must be all-zeroes. */ - { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); - symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); - symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); - ip++; - - /* Build DTables */ - { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, - LLtype, MaxLL, LLFSELog, - ip, iend-ip, - LL_base, LL_bits, - LL_defaultDTable, dctx->fseEntropy, - dctx->ddictIsCold, nbSeq, - dctx->workspace, sizeof(dctx->workspace), - ZSTD_DCtx_get_bmi2(dctx)); - RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed"); - ip += llhSize; - } - - { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, - OFtype, MaxOff, OffFSELog, - ip, iend-ip, - OF_base, OF_bits, - OF_defaultDTable, dctx->fseEntropy, - dctx->ddictIsCold, nbSeq, - dctx->workspace, sizeof(dctx->workspace), - ZSTD_DCtx_get_bmi2(dctx)); - RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed"); - ip += ofhSize; - } - - { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, - MLtype, MaxML, MLFSELog, - ip, iend-ip, - ML_base, ML_bits, - ML_defaultDTable, dctx->fseEntropy, - dctx->ddictIsCold, nbSeq, - dctx->workspace, sizeof(dctx->workspace), - ZSTD_DCtx_get_bmi2(dctx)); - RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed"); - ip += mlhSize; - } - } - - return ip-istart; -} - - -typedef struct { - size_t litLength; - size_t matchLength; - size_t offset; -} seq_t; - -typedef struct { - size_t state; - const ZSTD_seqSymbol* table; -} ZSTD_fseState; - -typedef struct { - BIT_DStream_t DStream; - ZSTD_fseState stateLL; - ZSTD_fseState stateOffb; - ZSTD_fseState stateML; - size_t prevOffset[ZSTD_REP_NUM]; -} seqState_t; - -/*! ZSTD_overlapCopy8() : - * Copies 8 bytes from ip to op and updates op and ip where ip <= op. - * If the offset is < 8 then the offset is spread to at least 8 bytes. - * - * Precondition: *ip <= *op - * Postcondition: *op - *op >= 8 - */ -HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { - assert(*ip <= *op); - if (offset < 8) { - /* close range match, overlap */ - static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ - static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ - int const sub2 = dec64table[offset]; - (*op)[0] = (*ip)[0]; - (*op)[1] = (*ip)[1]; - (*op)[2] = (*ip)[2]; - (*op)[3] = (*ip)[3]; - *ip += dec32table[offset]; - ZSTD_copy4(*op+4, *ip); - *ip -= sub2; - } else { - ZSTD_copy8(*op, *ip); - } - *ip += 8; - *op += 8; - assert(*op - *ip >= 8); -} - -/*! ZSTD_safecopy() : - * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer - * and write up to 16 bytes past oend_w (op >= oend_w is allowed). - * This function is only called in the uncommon case where the sequence is near the end of the block. It - * should be fast for a single long sequence, but can be slow for several short sequences. - * - * @param ovtype controls the overlap detection - * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. - * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. - * The src buffer must be before the dst buffer. - */ -static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { - ptrdiff_t const diff = op - ip; - BYTE* const oend = op + length; - - assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) || - (ovtype == ZSTD_overlap_src_before_dst && diff >= 0)); - - if (length < 8) { - /* Handle short lengths. */ - while (op < oend) *op++ = *ip++; - return; - } - if (ovtype == ZSTD_overlap_src_before_dst) { - /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ - assert(length >= 8); - ZSTD_overlapCopy8(&op, &ip, diff); - length -= 8; - assert(op - ip >= 8); - assert(op <= oend); - } - - if (oend <= oend_w) { - /* No risk of overwrite. */ - ZSTD_wildcopy(op, ip, length, ovtype); - return; - } - if (op <= oend_w) { - /* Wildcopy until we get close to the end. */ - assert(oend > oend_w); - ZSTD_wildcopy(op, ip, oend_w - op, ovtype); - ip += oend_w - op; - op += oend_w - op; - } - /* Handle the leftovers. */ - while (op < oend) *op++ = *ip++; -} - -/* ZSTD_safecopyDstBeforeSrc(): - * This version allows overlap with dst before src, or handles the non-overlap case with dst after src - * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ -static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, ptrdiff_t length) { - ptrdiff_t const diff = op - ip; - BYTE* const oend = op + length; - - if (length < 8 || diff > -8) { - /* Handle short lengths, close overlaps, and dst not before src. */ - while (op < oend) *op++ = *ip++; - return; - } - - if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) { - ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap); - ip += oend - WILDCOPY_OVERLENGTH - op; - op += oend - WILDCOPY_OVERLENGTH - op; - } - - /* Handle the leftovers. */ - while (op < oend) *op++ = *ip++; -} - -/* ZSTD_execSequenceEnd(): - * This version handles cases that are near the end of the output buffer. It requires - * more careful checks to make sure there is no overflow. By separating out these hard - * and unlikely cases, we can speed up the common cases. - * - * NOTE: This function needs to be fast for a single long sequence, but doesn't need - * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). - */ -FORCE_NOINLINE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_execSequenceEnd(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) -{ - BYTE* const oLitEnd = op + sequence.litLength; - size_t const sequenceLength = sequence.litLength + sequence.matchLength; - const BYTE* const iLitEnd = *litPtr + sequence.litLength; - const BYTE* match = oLitEnd - sequence.offset; - BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; - - /* bounds checks : careful of address space overflow in 32-bit mode */ - RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); - RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); - assert(op < op + sequenceLength); - assert(oLitEnd < op + sequenceLength); - - /* copy literals */ - ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap); - op = oLitEnd; - *litPtr = iLitEnd; - - /* copy Match */ - if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { - /* offset beyond prefix */ - RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); - match = dictEnd - (prefixStart - match); - if (match + sequence.matchLength <= dictEnd) { - ZSTD_memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } - /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - ZSTD_memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; - } - } - ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); - return sequenceLength; -} - -/* ZSTD_execSequenceEndSplitLitBuffer(): - * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. - */ -FORCE_NOINLINE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op, - BYTE* const oend, const BYTE* const oend_w, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) -{ - BYTE* const oLitEnd = op + sequence.litLength; - size_t const sequenceLength = sequence.litLength + sequence.matchLength; - const BYTE* const iLitEnd = *litPtr + sequence.litLength; - const BYTE* match = oLitEnd - sequence.offset; - - - /* bounds checks : careful of address space overflow in 32-bit mode */ - RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); - RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); - assert(op < op + sequenceLength); - assert(oLitEnd < op + sequenceLength); - - /* copy literals */ - RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer"); - ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength); - op = oLitEnd; - *litPtr = iLitEnd; - - /* copy Match */ - if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { - /* offset beyond prefix */ - RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); - match = dictEnd - (prefixStart - match); - if (match + sequence.matchLength <= dictEnd) { - ZSTD_memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } - /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - ZSTD_memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; - } - } - ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); - return sequenceLength; -} - -HINT_INLINE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_execSequence(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) -{ - BYTE* const oLitEnd = op + sequence.litLength; - size_t const sequenceLength = sequence.litLength + sequence.matchLength; - BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ - BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */ - const BYTE* const iLitEnd = *litPtr + sequence.litLength; - const BYTE* match = oLitEnd - sequence.offset; - - assert(op != NULL /* Precondition */); - assert(oend_w < oend /* No underflow */); - -#if defined(__aarch64__) - /* prefetch sequence starting from match that will be used for copy later */ - PREFETCH_L1(match); -#endif - /* Handle edge cases in a slow path: - * - Read beyond end of literals - * - Match end is within WILDCOPY_OVERLIMIT of oend - * - 32-bit mode and the match length overflows - */ - if (UNLIKELY( - iLitEnd > litLimit || - oMatchEnd > oend_w || - (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) - return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); - - /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ - assert(op <= oLitEnd /* No overflow */); - assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); - assert(oMatchEnd <= oend /* No underflow */); - assert(iLitEnd <= litLimit /* Literal length is in bounds */); - assert(oLitEnd <= oend_w /* Can wildcopy literals */); - assert(oMatchEnd <= oend_w /* Can wildcopy matches */); - - /* Copy Literals: - * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. - * We likely don't need the full 32-byte wildcopy. - */ - assert(WILDCOPY_OVERLENGTH >= 16); - ZSTD_copy16(op, (*litPtr)); - if (UNLIKELY(sequence.litLength > 16)) { - ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap); - } - op = oLitEnd; - *litPtr = iLitEnd; /* update for next sequence */ - - /* Copy Match */ - if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { - /* offset beyond prefix -> go into extDict */ - RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); - match = dictEnd + (match - prefixStart); - if (match + sequence.matchLength <= dictEnd) { - ZSTD_memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } - /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - ZSTD_memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; - } - } - /* Match within prefix of 1 or more bytes */ - assert(op <= oMatchEnd); - assert(oMatchEnd <= oend_w); - assert(match >= prefixStart); - assert(sequence.matchLength >= 1); - - /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy - * without overlap checking. - */ - if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { - /* We bet on a full wildcopy for matches, since we expect matches to be - * longer than literals (in general). In silesia, ~10% of matches are longer - * than 16 bytes. - */ - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); - return sequenceLength; - } - assert(sequence.offset < WILDCOPY_VECLEN); - - /* Copy 8 bytes and spread the offset to be >= 8. */ - ZSTD_overlapCopy8(&op, &match, sequence.offset); - - /* If the match length is > 8 bytes, then continue with the wildcopy. */ - if (sequence.matchLength > 8) { - assert(op < oMatchEnd); - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst); - } - return sequenceLength; -} - -HINT_INLINE -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, - BYTE* const oend, const BYTE* const oend_w, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) -{ - BYTE* const oLitEnd = op + sequence.litLength; - size_t const sequenceLength = sequence.litLength + sequence.matchLength; - BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ - const BYTE* const iLitEnd = *litPtr + sequence.litLength; - const BYTE* match = oLitEnd - sequence.offset; - - assert(op != NULL /* Precondition */); - assert(oend_w < oend /* No underflow */); - /* Handle edge cases in a slow path: - * - Read beyond end of literals - * - Match end is within WILDCOPY_OVERLIMIT of oend - * - 32-bit mode and the match length overflows - */ - if (UNLIKELY( - iLitEnd > litLimit || - oMatchEnd > oend_w || - (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) - return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); - - /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ - assert(op <= oLitEnd /* No overflow */); - assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); - assert(oMatchEnd <= oend /* No underflow */); - assert(iLitEnd <= litLimit /* Literal length is in bounds */); - assert(oLitEnd <= oend_w /* Can wildcopy literals */); - assert(oMatchEnd <= oend_w /* Can wildcopy matches */); - - /* Copy Literals: - * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. - * We likely don't need the full 32-byte wildcopy. - */ - assert(WILDCOPY_OVERLENGTH >= 16); - ZSTD_copy16(op, (*litPtr)); - if (UNLIKELY(sequence.litLength > 16)) { - ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap); - } - op = oLitEnd; - *litPtr = iLitEnd; /* update for next sequence */ - - /* Copy Match */ - if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { - /* offset beyond prefix -> go into extDict */ - RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); - match = dictEnd + (match - prefixStart); - if (match + sequence.matchLength <= dictEnd) { - ZSTD_memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } - /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; - ZSTD_memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; - } } - /* Match within prefix of 1 or more bytes */ - assert(op <= oMatchEnd); - assert(oMatchEnd <= oend_w); - assert(match >= prefixStart); - assert(sequence.matchLength >= 1); - - /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy - * without overlap checking. - */ - if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { - /* We bet on a full wildcopy for matches, since we expect matches to be - * longer than literals (in general). In silesia, ~10% of matches are longer - * than 16 bytes. - */ - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); - return sequenceLength; - } - assert(sequence.offset < WILDCOPY_VECLEN); - - /* Copy 8 bytes and spread the offset to be >= 8. */ - ZSTD_overlapCopy8(&op, &match, sequence.offset); - - /* If the match length is > 8 bytes, then continue with the wildcopy. */ - if (sequence.matchLength > 8) { - assert(op < oMatchEnd); - ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); - } - return sequenceLength; -} - - -static void -ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt) -{ - const void* ptr = dt; - const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr; - DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); - DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits", - (U32)DStatePtr->state, DTableH->tableLog); - BIT_reloadDStream(bitD); - DStatePtr->table = dt + 1; -} - -FORCE_INLINE_TEMPLATE void -ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 nextState, U32 nbBits) -{ - size_t const lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = nextState + lowBits; -} - -/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum - * offset bits. But we can only read at most STREAM_ACCUMULATOR_MIN_32 - * bits before reloading. This value is the maximum number of bytes we read - * after reloading when we are decoding long offsets. - */ -#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \ - (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \ - ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \ - : 0) - -typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e; - -/** - * ZSTD_decodeSequence(): - * @p longOffsets : tells the decoder to reload more bit while decoding large offsets - * only used in 32-bit mode - * @return : Sequence (litL + matchL + offset) - */ -FORCE_INLINE_TEMPLATE seq_t -ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const int isLastSeq) -{ - seq_t seq; - /* - * ZSTD_seqSymbol is a 64 bits wide structure. - * It can be loaded in one operation - * and its fields extracted by simply shifting or bit-extracting on aarch64. - * GCC doesn't recognize this and generates more unnecessary ldr/ldrb/ldrh - * operations that cause performance drop. This can be avoided by using this - * ZSTD_memcpy hack. - */ -#if defined(__aarch64__) && (defined(__GNUC__) && !defined(__clang__)) - ZSTD_seqSymbol llDInfoS, mlDInfoS, ofDInfoS; - ZSTD_seqSymbol* const llDInfo = &llDInfoS; - ZSTD_seqSymbol* const mlDInfo = &mlDInfoS; - ZSTD_seqSymbol* const ofDInfo = &ofDInfoS; - ZSTD_memcpy(llDInfo, seqState->stateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol)); - ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol)); - ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol)); -#else - const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; - const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; - const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; -#endif - seq.matchLength = mlDInfo->baseValue; - seq.litLength = llDInfo->baseValue; - { U32 const ofBase = ofDInfo->baseValue; - BYTE const llBits = llDInfo->nbAdditionalBits; - BYTE const mlBits = mlDInfo->nbAdditionalBits; - BYTE const ofBits = ofDInfo->nbAdditionalBits; - BYTE const totalBits = llBits+mlBits+ofBits; - - U16 const llNext = llDInfo->nextState; - U16 const mlNext = mlDInfo->nextState; - U16 const ofNext = ofDInfo->nextState; - U32 const llnbBits = llDInfo->nbBits; - U32 const mlnbBits = mlDInfo->nbBits; - U32 const ofnbBits = ofDInfo->nbBits; - - assert(llBits <= MaxLLBits); - assert(mlBits <= MaxMLBits); - assert(ofBits <= MaxOff); - /* - * As gcc has better branch and block analyzers, sometimes it is only - * valuable to mark likeliness for clang, it gives around 3-4% of - * performance. - */ - - /* sequence */ - { size_t offset; - if (ofBits > 1) { - ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); - ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); - ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32); - ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits); - if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { - /* Always read extra bits, this keeps the logic simple, - * avoids branches, and avoids accidentally reading 0 bits. - */ - U32 const extraBits = LONG_OFFSETS_MAX_EXTRA_BITS_32; - offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); - BIT_reloadDStream(&seqState->DStream); - offset += BIT_readBitsFast(&seqState->DStream, extraBits); - } else { - offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); - } - seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset; - } else { - U32 const ll0 = (llDInfo->baseValue == 0); - if (LIKELY((ofBits == 0))) { - offset = seqState->prevOffset[ll0]; - seqState->prevOffset[1] = seqState->prevOffset[!ll0]; - seqState->prevOffset[0] = offset; - } else { - offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); - { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; - temp -= !temp; /* 0 is not valid: input corrupted => force offset to -1 => corruption detected at execSequence */ - if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset = temp; - } } } - seq.offset = offset; - } - - if (mlBits > 0) - seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); - - if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) - BIT_reloadDStream(&seqState->DStream); - if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) - BIT_reloadDStream(&seqState->DStream); - /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ - ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); - - if (llBits > 0) - seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); - - if (MEM_32bits()) - BIT_reloadDStream(&seqState->DStream); - - DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", - (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); - - if (!isLastSeq) { - /* don't update FSE state for last Sequence */ - ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */ - ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */ - BIT_reloadDStream(&seqState->DStream); - } - } - - return seq; -} - -#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) -#if DEBUGLEVEL >= 1 -static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd) -{ - size_t const windowSize = dctx->fParams.windowSize; - /* No dictionary used. */ - if (dctx->dictContentEndForFuzzing == NULL) return 0; - /* Dictionary is our prefix. */ - if (prefixStart == dctx->dictContentBeginForFuzzing) return 1; - /* Dictionary is not our ext-dict. */ - if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0; - /* Dictionary is not within our window size. */ - if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0; - /* Dictionary is active. */ - return 1; -} -#endif - -static void ZSTD_assertValidSequence( - ZSTD_DCtx const* dctx, - BYTE const* op, BYTE const* oend, - seq_t const seq, - BYTE const* prefixStart, BYTE const* virtualStart) -{ -#if DEBUGLEVEL >= 1 - if (dctx->isFrameDecompression) { - size_t const windowSize = dctx->fParams.windowSize; - size_t const sequenceSize = seq.litLength + seq.matchLength; - BYTE const* const oLitEnd = op + seq.litLength; - DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u", - (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); - assert(op <= oend); - assert((size_t)(oend - op) >= sequenceSize); - assert(sequenceSize <= ZSTD_blockSizeMax(dctx)); - if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) { - size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing); - /* Offset must be within the dictionary. */ - assert(seq.offset <= (size_t)(oLitEnd - virtualStart)); - assert(seq.offset <= windowSize + dictSize); - } else { - /* Offset must be within our window. */ - assert(seq.offset <= windowSize); - } - } -#else - (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart; -#endif -} -#endif - -#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG - - -FORCE_INLINE_TEMPLATE size_t -DONT_VECTORIZE -ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - const BYTE* ip = (const BYTE*)seqStart; - const BYTE* const iend = ip + seqSize; - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, maxDstSize); - BYTE* op = ostart; - const BYTE* litPtr = dctx->litPtr; - const BYTE* litBufferEnd = dctx->litBufferEnd; - const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); - const BYTE* const vBase = (const BYTE*) (dctx->virtualStart); - const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); - DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer (%i seqs)", nbSeq); - - /* Literals are split between internal buffer & output buffer */ - if (nbSeq) { - seqState_t seqState; - dctx->fseEntropy = 1; - { U32 i; for (i=0; ientropy.rep[i]; } - RETURN_ERROR_IF( - ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), - corruption_detected, ""); - ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); - ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); - ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); - assert(dst != NULL); - - ZSTD_STATIC_ASSERT( - BIT_DStream_unfinished < BIT_DStream_completed && - BIT_DStream_endOfBuffer < BIT_DStream_completed && - BIT_DStream_completed < BIT_DStream_overflow); - - /* decompress without overrunning litPtr begins */ - { seq_t sequence = {0,0,0}; /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */ - /* Align the decompression loop to 32 + 16 bytes. - * - * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression - * speed swings based on the alignment of the decompression loop. This - * performance swing is caused by parts of the decompression loop falling - * out of the DSB. The entire decompression loop should fit in the DSB, - * when it can't we get much worse performance. You can measure if you've - * hit the good case or the bad case with this perf command for some - * compressed file test.zst: - * - * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ - * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst - * - * If you see most cycles served out of the MITE you've hit the bad case. - * If you see most cycles served out of the DSB you've hit the good case. - * If it is pretty even then you may be in an okay case. - * - * This issue has been reproduced on the following CPUs: - * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 - * Use Instruments->Counters to get DSB/MITE cycles. - * I never got performance swings, but I was able to - * go from the good case of mostly DSB to half of the - * cycles served from MITE. - * - Coffeelake: Intel i9-9900k - * - Coffeelake: Intel i7-9700k - * - * I haven't been able to reproduce the instability or DSB misses on any - * of the following CPUS: - * - Haswell - * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH - * - Skylake - * - * Alignment is done for each of the three major decompression loops: - * - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer - * - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer - * - ZSTD_decompressSequences_body - * Alignment choices are made to minimize large swings on bad cases and influence on performance - * from changes external to this code, rather than to overoptimize on the current commit. - * - * If you are seeing performance stability this script can help test. - * It tests on 4 commits in zstd where I saw performance change. - * - * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 - */ -#if defined(__GNUC__) && defined(__x86_64__) - __asm__(".p2align 6"); -# if __GNUC__ >= 7 - /* good for gcc-7, gcc-9, and gcc-11 */ - __asm__("nop"); - __asm__(".p2align 5"); - __asm__("nop"); - __asm__(".p2align 4"); -# if __GNUC__ == 8 || __GNUC__ == 10 - /* good for gcc-8 and gcc-10 */ - __asm__("nop"); - __asm__(".p2align 3"); -# endif -# endif -#endif - - /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */ - for ( ; nbSeq; nbSeq--) { - sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); - if (litPtr + sequence.litLength > dctx->litBufferEnd) break; - { size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); -#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); -#endif - if (UNLIKELY(ZSTD_isError(oneSeqSize))) - return oneSeqSize; - DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); - op += oneSeqSize; - } } - DEBUGLOG(6, "reached: (litPtr + sequence.litLength > dctx->litBufferEnd)"); - - /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */ - if (nbSeq > 0) { - const size_t leftoverLit = dctx->litBufferEnd - litPtr; - DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength); - if (leftoverLit) { - RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); - ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); - sequence.litLength -= leftoverLit; - op += leftoverLit; - } - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; - dctx->litBufferLocation = ZSTD_not_in_dst; - { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); -#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); -#endif - if (UNLIKELY(ZSTD_isError(oneSeqSize))) - return oneSeqSize; - DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); - op += oneSeqSize; - } - nbSeq--; - } - } - - if (nbSeq > 0) { - /* there is remaining lit from extra buffer */ - -#if defined(__GNUC__) && defined(__x86_64__) - __asm__(".p2align 6"); - __asm__("nop"); -# if __GNUC__ != 7 - /* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */ - __asm__(".p2align 4"); - __asm__("nop"); - __asm__(".p2align 3"); -# elif __GNUC__ >= 11 - __asm__(".p2align 3"); -# else - __asm__(".p2align 5"); - __asm__("nop"); - __asm__(".p2align 3"); -# endif -#endif - - for ( ; nbSeq ; nbSeq--) { - seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); - size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); -#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); -#endif - if (UNLIKELY(ZSTD_isError(oneSeqSize))) - return oneSeqSize; - DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); - op += oneSeqSize; - } - } - - /* check if reached exact end */ - DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq); - RETURN_ERROR_IF(nbSeq, corruption_detected, ""); - DEBUGLOG(5, "bitStream : start=%p, ptr=%p, bitsConsumed=%u", seqState.DStream.start, seqState.DStream.ptr, seqState.DStream.bitsConsumed); - RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); - /* save reps for next block */ - { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } - } - - /* last literal segment */ - if (dctx->litBufferLocation == ZSTD_split) { - /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ - size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); - DEBUGLOG(6, "copy last literals from segment : %u", (U32)lastLLSize); - RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); - if (op != NULL) { - ZSTD_memmove(op, litPtr, lastLLSize); - op += lastLLSize; - } - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; - dctx->litBufferLocation = ZSTD_not_in_dst; - } - /* copy last literals from internal buffer */ - { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); - DEBUGLOG(6, "copy last literals from internal buffer : %u", (U32)lastLLSize); - RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); - if (op != NULL) { - ZSTD_memcpy(op, litPtr, lastLLSize); - op += lastLLSize; - } } - - DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart)); - return (size_t)(op - ostart); -} - -FORCE_INLINE_TEMPLATE size_t -DONT_VECTORIZE -ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - const BYTE* ip = (const BYTE*)seqStart; - const BYTE* const iend = ip + seqSize; - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, maxDstSize) : dctx->litBuffer; - BYTE* op = ostart; - const BYTE* litPtr = dctx->litPtr; - const BYTE* const litEnd = litPtr + dctx->litSize; - const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart); - const BYTE* const vBase = (const BYTE*)(dctx->virtualStart); - const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd); - DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq); - - /* Regen sequences */ - if (nbSeq) { - seqState_t seqState; - dctx->fseEntropy = 1; - { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } - RETURN_ERROR_IF( - ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)), - corruption_detected, ""); - ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); - ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); - ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); - assert(dst != NULL); - -#if defined(__GNUC__) && defined(__x86_64__) - __asm__(".p2align 6"); - __asm__("nop"); -# if __GNUC__ >= 7 - __asm__(".p2align 5"); - __asm__("nop"); - __asm__(".p2align 3"); -# else - __asm__(".p2align 4"); - __asm__("nop"); - __asm__(".p2align 3"); -# endif -#endif - - for ( ; nbSeq ; nbSeq--) { - seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); - size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); -#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); -#endif - if (UNLIKELY(ZSTD_isError(oneSeqSize))) - return oneSeqSize; - DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); - op += oneSeqSize; - } - - /* check if reached exact end */ - assert(nbSeq == 0); - RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); - /* save reps for next block */ - { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } - } - - /* last literal segment */ - { size_t const lastLLSize = (size_t)(litEnd - litPtr); - DEBUGLOG(6, "copy last literals : %u", (U32)lastLLSize); - RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); - if (op != NULL) { - ZSTD_memcpy(op, litPtr, lastLLSize); - op += lastLLSize; - } } - - DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart)); - return (size_t)(op - ostart); -} - -static size_t -ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); -} - -static size_t -ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); -} -#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ - -#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT - -FORCE_INLINE_TEMPLATE - -size_t ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence, - const BYTE* const prefixStart, const BYTE* const dictEnd) -{ - prefetchPos += sequence.litLength; - { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart; - /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. - * No consequence though : memory address is only used for prefetching, not for dereferencing */ - const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, prefetchPos), sequence.offset); - PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */ - } - return prefetchPos + sequence.matchLength; -} - -/* This decoding function employs prefetching - * to reduce latency impact of cache misses. - * It's generally employed when block contains a significant portion of long-distance matches - * or when coupled with a "cold" dictionary */ -FORCE_INLINE_TEMPLATE size_t -ZSTD_decompressSequencesLong_body( - ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - const BYTE* ip = (const BYTE*)seqStart; - const BYTE* const iend = ip + seqSize; - BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize); - BYTE* op = ostart; - const BYTE* litPtr = dctx->litPtr; - const BYTE* litBufferEnd = dctx->litBufferEnd; - const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); - const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); - const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); - - /* Regen sequences */ - if (nbSeq) { -#define STORED_SEQS 8 -#define STORED_SEQS_MASK (STORED_SEQS-1) -#define ADVANCED_SEQS STORED_SEQS - seq_t sequences[STORED_SEQS]; - int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); - seqState_t seqState; - int seqNb; - size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */ - - dctx->fseEntropy = 1; - { int i; for (i=0; ientropy.rep[i]; } - assert(dst != NULL); - assert(iend >= ip); - RETURN_ERROR_IF( - ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), - corruption_detected, ""); - ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); - ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); - ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); - - /* prepare in advance */ - for (seqNb=0; seqNblitBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) { - /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ - const size_t leftoverLit = dctx->litBufferEnd - litPtr; - if (leftoverLit) - { - RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); - ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); - sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit; - op += leftoverLit; - } - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; - dctx->litBufferLocation = ZSTD_not_in_dst; - { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); -#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); -#endif - if (ZSTD_isError(oneSeqSize)) return oneSeqSize; - - prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); - sequences[seqNb & STORED_SEQS_MASK] = sequence; - op += oneSeqSize; - } } - else - { - /* lit buffer is either wholly contained in first or second split, or not split at all*/ - size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? - ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : - ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); -#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); -#endif - if (ZSTD_isError(oneSeqSize)) return oneSeqSize; - - prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); - sequences[seqNb & STORED_SEQS_MASK] = sequence; - op += oneSeqSize; - } - } - RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); - - /* finish queue */ - seqNb -= seqAdvance; - for ( ; seqNblitBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) { - const size_t leftoverLit = dctx->litBufferEnd - litPtr; - if (leftoverLit) { - RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); - ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); - sequence->litLength -= leftoverLit; - op += leftoverLit; - } - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; - dctx->litBufferLocation = ZSTD_not_in_dst; - { size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); -#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); -#endif - if (ZSTD_isError(oneSeqSize)) return oneSeqSize; - op += oneSeqSize; - } - } - else - { - size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? - ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : - ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); -#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); -#endif - if (ZSTD_isError(oneSeqSize)) return oneSeqSize; - op += oneSeqSize; - } - } - - /* save reps for next block */ - { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } - } - - /* last literal segment */ - if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */ - size_t const lastLLSize = litBufferEnd - litPtr; - RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); - if (op != NULL) { - ZSTD_memmove(op, litPtr, lastLLSize); - op += lastLLSize; - } - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; - } - { size_t const lastLLSize = litBufferEnd - litPtr; - RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); - if (op != NULL) { - ZSTD_memmove(op, litPtr, lastLLSize); - op += lastLLSize; - } - } - - return (size_t)(op - ostart); -} - -static size_t -ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); -} -#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ - - - -#if DYNAMIC_BMI2 - -#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG -static BMI2_TARGET_ATTRIBUTE size_t -DONT_VECTORIZE -ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); -} -static BMI2_TARGET_ATTRIBUTE size_t -DONT_VECTORIZE -ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); -} -#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ - -#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT -static BMI2_TARGET_ATTRIBUTE size_t -ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); -} -#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ - -#endif /* DYNAMIC_BMI2 */ - -typedef size_t (*ZSTD_decompressSequences_t)( - ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset); - -#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG -static size_t -ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - DEBUGLOG(5, "ZSTD_decompressSequences"); -#if DYNAMIC_BMI2 - if (ZSTD_DCtx_get_bmi2(dctx)) { - return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); - } -#endif - return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); -} -static size_t -ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer"); -#if DYNAMIC_BMI2 - if (ZSTD_DCtx_get_bmi2(dctx)) { - return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); - } -#endif - return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); -} -#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ - - -#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT -/* ZSTD_decompressSequencesLong() : - * decompression function triggered when a minimum share of offsets is considered "long", - * aka out of cache. - * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance". - * This function will try to mitigate main memory latency through the use of prefetching */ -static size_t -ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset) -{ - DEBUGLOG(5, "ZSTD_decompressSequencesLong"); -#if DYNAMIC_BMI2 - if (ZSTD_DCtx_get_bmi2(dctx)) { - return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); - } -#endif - return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); -} -#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ - - -/** - * @returns The total size of the history referenceable by zstd, including - * both the prefix and the extDict. At @p op any offset larger than this - * is invalid. - */ -static size_t ZSTD_totalHistorySize(BYTE* op, BYTE const* virtualStart) -{ - return (size_t)(op - virtualStart); -} - -typedef struct { - unsigned longOffsetShare; - unsigned maxNbAdditionalBits; -} ZSTD_OffsetInfo; - -/* ZSTD_getOffsetInfo() : - * condition : offTable must be valid - * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) - * compared to maximum possible of (1< 22) info.longOffsetShare += 1; - } - - assert(tableLog <= OffFSELog); - info.longOffsetShare <<= (OffFSELog - tableLog); /* scale to OffFSELog */ - } - - return info; -} - -/** - * @returns The maximum offset we can decode in one read of our bitstream, without - * reloading more bits in the middle of the offset bits read. Any offsets larger - * than this must use the long offset decoder. - */ -static size_t ZSTD_maxShortOffset(void) -{ - if (MEM_64bits()) { - /* We can decode any offset without reloading bits. - * This might change if the max window size grows. - */ - ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); - return (size_t)-1; - } else { - /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1. - * This offBase would require STREAM_ACCUMULATOR_MIN extra bits. - * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset. - */ - size_t const maxOffbase = ((size_t)1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1; - size_t const maxOffset = maxOffbase - ZSTD_REP_NUM; - assert(ZSTD_highbit32((U32)maxOffbase) == STREAM_ACCUMULATOR_MIN); - return maxOffset; - } -} - -size_t -ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, const streaming_operation streaming) -{ /* blockType == blockCompressed */ - const BYTE* ip = (const BYTE*)src; - DEBUGLOG(5, "ZSTD_decompressBlock_internal (cSize : %u)", (unsigned)srcSize); - - /* Note : the wording of the specification - * allows compressed block to be sized exactly ZSTD_blockSizeMax(dctx). - * This generally does not happen, as it makes little sense, - * since an uncompressed block would feature same size and have no decompression cost. - * Also, note that decoder from reference libzstd before < v1.5.4 - * would consider this edge case as an error. - * As a consequence, avoid generating compressed blocks of size ZSTD_blockSizeMax(dctx) - * for broader compatibility with the deployed ecosystem of zstd decoders */ - RETURN_ERROR_IF(srcSize > ZSTD_blockSizeMax(dctx), srcSize_wrong, ""); - - /* Decode literals section */ - { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); - DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : cSize=%u, nbLiterals=%zu", (U32)litCSize, dctx->litSize); - if (ZSTD_isError(litCSize)) return litCSize; - ip += litCSize; - srcSize -= litCSize; - } - - /* Build Decoding Tables */ - { - /* Compute the maximum block size, which must also work when !frame and fParams are unset. - * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. - */ - size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx)); - size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((BYTE*)dst, blockSizeMax), (BYTE const*)dctx->virtualStart); - /* isLongOffset must be true if there are long offsets. - * Offsets are long if they are larger than ZSTD_maxShortOffset(). - * We don't expect that to be the case in 64-bit mode. - * - * We check here to see if our history is large enough to allow long offsets. - * If it isn't, then we can't possible have (valid) long offsets. If the offset - * is invalid, then it is okay to read it incorrectly. - * - * If isLongOffsets is true, then we will later check our decoding table to see - * if it is even possible to generate long offsets. - */ - ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (totalHistorySize > ZSTD_maxShortOffset())); - /* These macros control at build-time which decompressor implementation - * we use. If neither is defined, we do some inspection and dispatch at - * runtime. - */ -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - int usePrefetchDecoder = dctx->ddictIsCold; -#else - /* Set to 1 to avoid computing offset info if we don't need to. - * Otherwise this value is ignored. - */ - int usePrefetchDecoder = 1; -#endif - int nbSeq; - size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); - if (ZSTD_isError(seqHSize)) return seqHSize; - ip += seqHSize; - srcSize -= seqHSize; - - RETURN_ERROR_IF((dst == NULL || dstCapacity == 0) && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); - RETURN_ERROR_IF(MEM_64bits() && sizeof(size_t) == sizeof(void*) && (size_t)(-1) - (size_t)dst < (size_t)(1 << 20), dstSize_tooSmall, - "invalid dst"); - - /* If we could potentially have long offsets, or we might want to use the prefetch decoder, - * compute information about the share of long offsets, and the maximum nbAdditionalBits. - * NOTE: could probably use a larger nbSeq limit - */ - if (isLongOffset || (!usePrefetchDecoder && (totalHistorySize > (1u << 24)) && (nbSeq > 8))) { - ZSTD_OffsetInfo const info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); - if (isLongOffset && info.maxNbAdditionalBits <= STREAM_ACCUMULATOR_MIN) { - /* If isLongOffset, but the maximum number of additional bits that we see in our table is small - * enough, then we know it is impossible to have too long an offset in this block, so we can - * use the regular offset decoder. - */ - isLongOffset = ZSTD_lo_isRegularOffset; - } - if (!usePrefetchDecoder) { - U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ - usePrefetchDecoder = (info.longOffsetShare >= minShare); - } - } - - dctx->ddictIsCold = 0; - -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - if (usePrefetchDecoder) { -#else - (void)usePrefetchDecoder; - { -#endif -#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT - return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); -#endif - } - -#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG - /* else */ - if (dctx->litBufferLocation == ZSTD_split) - return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); - else - return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); -#endif - } -} - - -ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) -{ - if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */ - dctx->dictEnd = dctx->previousDstEnd; - dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); - dctx->prefixStart = dst; - dctx->previousDstEnd = dst; - } -} - - -size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) -{ - size_t dSize; - dctx->isFrameDecompression = 0; - ZSTD_checkContinuity(dctx, dst, dstCapacity); - dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, not_streaming); - FORWARD_IF_ERROR(dSize, ""); - dctx->previousDstEnd = (char*)dst + dSize; - return dSize; -} - - -/* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */ -size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) -{ - return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize); -} diff --git a/zstandard_android/src/dictBuilder/divsufsort.h b/zstandard_android/src/dictBuilder/divsufsort.h deleted file mode 100644 index 5440994..0000000 --- a/zstandard_android/src/dictBuilder/divsufsort.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * divsufsort.h for libdivsufsort-lite - * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following - * conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _DIVSUFSORT_H -#define _DIVSUFSORT_H 1 - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - - -/*- Prototypes -*/ - -/** - * Constructs the suffix array of a given string. - * @param T [0..n-1] The input string. - * @param SA [0..n-1] The output array of suffixes. - * @param n The length of the given string. - * @param openMP enables OpenMP optimization. - * @return 0 if no error occurred, -1 or -2 otherwise. - */ -int -divsufsort(const unsigned char *T, int *SA, int n, int openMP); - -/** - * Constructs the burrows-wheeler transformed string of a given string. - * @param T [0..n-1] The input string. - * @param U [0..n-1] The output string. (can be T) - * @param A [0..n-1] The temporary array. (can be NULL) - * @param n The length of the given string. - * @param num_indexes The length of secondary indexes array. (can be NULL) - * @param indexes The secondary indexes array. (can be NULL) - * @param openMP enables OpenMP optimization. - * @return The primary index if no error occurred, -1 or -2 otherwise. - */ -int -divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP); - - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ - -#endif /* _DIVSUFSORT_H */ diff --git a/zstandard_android/src/dictBuilder/fastcover.c b/zstandard_android/src/dictBuilder/fastcover.c deleted file mode 100644 index a958eb3..0000000 --- a/zstandard_android/src/dictBuilder/fastcover.c +++ /dev/null @@ -1,766 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -/*-************************************* -* Dependencies -***************************************/ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ - -#ifndef ZDICT_STATIC_LINKING_ONLY -# define ZDICT_STATIC_LINKING_ONLY -#endif - -#include "../common/mem.h" /* read */ -#include "../common/pool.h" -#include "../common/threading.h" -#include "../common/zstd_internal.h" /* includes zstd.h */ -#include "../compress/zstd_compress_internal.h" /* ZSTD_hash*() */ -#include "../zdict.h" -#include "cover.h" - - -/*-************************************* -* Constants -***************************************/ -/** -* There are 32bit indexes used to ref samples, so limit samples size to 4GB -* on 64bit builds. -* For 32bit builds we choose 1 GB. -* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large -* contiguous buffer, so 1GB is already a high limit. -*/ -#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) -#define FASTCOVER_MAX_F 31 -#define FASTCOVER_MAX_ACCEL 10 -#define FASTCOVER_DEFAULT_SPLITPOINT 0.75 -#define DEFAULT_F 20 -#define DEFAULT_ACCEL 1 - - -/*-************************************* -* Console display -***************************************/ -#ifndef LOCALDISPLAYLEVEL -static int g_displayLevel = 0; -#endif -#undef DISPLAY -#define DISPLAY(...) \ - { \ - fprintf(stderr, __VA_ARGS__); \ - fflush(stderr); \ - } -#undef LOCALDISPLAYLEVEL -#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ - if (displayLevel >= l) { \ - DISPLAY(__VA_ARGS__); \ - } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ -#undef DISPLAYLEVEL -#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) - -#ifndef LOCALDISPLAYUPDATE -static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100; -static clock_t g_time = 0; -#endif -#undef LOCALDISPLAYUPDATE -#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ - if (displayLevel >= l) { \ - if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \ - g_time = clock(); \ - DISPLAY(__VA_ARGS__); \ - } \ - } -#undef DISPLAYUPDATE -#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) - - -/*-************************************* -* Hash Functions -***************************************/ -/** - * Hash the d-byte value pointed to by p and mod 2^f into the frequency vector - */ -static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 f, unsigned d) { - if (d == 6) { - return ZSTD_hash6Ptr(p, f); - } - return ZSTD_hash8Ptr(p, f); -} - - -/*-************************************* -* Acceleration -***************************************/ -typedef struct { - unsigned finalize; /* Percentage of training samples used for ZDICT_finalizeDictionary */ - unsigned skip; /* Number of dmer skipped between each dmer counted in computeFrequency */ -} FASTCOVER_accel_t; - - -static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = { - { 100, 0 }, /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */ - { 100, 0 }, /* accel = 1 */ - { 50, 1 }, /* accel = 2 */ - { 34, 2 }, /* accel = 3 */ - { 25, 3 }, /* accel = 4 */ - { 20, 4 }, /* accel = 5 */ - { 17, 5 }, /* accel = 6 */ - { 14, 6 }, /* accel = 7 */ - { 13, 7 }, /* accel = 8 */ - { 11, 8 }, /* accel = 9 */ - { 10, 9 }, /* accel = 10 */ -}; - - -/*-************************************* -* Context -***************************************/ -typedef struct { - const BYTE *samples; - size_t *offsets; - const size_t *samplesSizes; - size_t nbSamples; - size_t nbTrainSamples; - size_t nbTestSamples; - size_t nbDmers; - U32 *freqs; - unsigned d; - unsigned f; - FASTCOVER_accel_t accelParams; -} FASTCOVER_ctx_t; - - -/*-************************************* -* Helper functions -***************************************/ -/** - * Selects the best segment in an epoch. - * Segments of are scored according to the function: - * - * Let F(d) be the frequency of all dmers with hash value d. - * Let S_i be hash value of the dmer at position i of segment S which has length k. - * - * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) - * - * Once the dmer with hash value d is in the dictionary we set F(d) = 0. - */ -static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx, - U32 *freqs, U32 begin, U32 end, - ZDICT_cover_params_t parameters, - U16* segmentFreqs) { - /* Constants */ - const U32 k = parameters.k; - const U32 d = parameters.d; - const U32 f = ctx->f; - const U32 dmersInK = k - d + 1; - - /* Try each segment (activeSegment) and save the best (bestSegment) */ - COVER_segment_t bestSegment = {0, 0, 0}; - COVER_segment_t activeSegment; - - /* Reset the activeDmers in the segment */ - /* The activeSegment starts at the beginning of the epoch. */ - activeSegment.begin = begin; - activeSegment.end = begin; - activeSegment.score = 0; - - /* Slide the activeSegment through the whole epoch. - * Save the best segment in bestSegment. - */ - while (activeSegment.end < end) { - /* Get hash value of current dmer */ - const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d); - - /* Add frequency of this index to score if this is the first occurrence of index in active segment */ - if (segmentFreqs[idx] == 0) { - activeSegment.score += freqs[idx]; - } - /* Increment end of segment and segmentFreqs*/ - activeSegment.end += 1; - segmentFreqs[idx] += 1; - /* If the window is now too large, drop the first position */ - if (activeSegment.end - activeSegment.begin == dmersInK + 1) { - /* Get hash value of the dmer to be eliminated from active segment */ - const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); - segmentFreqs[delIndex] -= 1; - /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */ - if (segmentFreqs[delIndex] == 0) { - activeSegment.score -= freqs[delIndex]; - } - /* Increment start of segment */ - activeSegment.begin += 1; - } - - /* If this segment is the best so far save it */ - if (activeSegment.score > bestSegment.score) { - bestSegment = activeSegment; - } - } - - /* Zero out rest of segmentFreqs array */ - while (activeSegment.begin < end) { - const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); - segmentFreqs[delIndex] -= 1; - activeSegment.begin += 1; - } - - { - /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ - U32 pos; - for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { - const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d); - freqs[i] = 0; - } - } - - return bestSegment; -} - - -static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters, - size_t maxDictSize, unsigned f, - unsigned accel) { - /* k, d, and f are required parameters */ - if (parameters.d == 0 || parameters.k == 0) { - return 0; - } - /* d has to be 6 or 8 */ - if (parameters.d != 6 && parameters.d != 8) { - return 0; - } - /* k <= maxDictSize */ - if (parameters.k > maxDictSize) { - return 0; - } - /* d <= k */ - if (parameters.d > parameters.k) { - return 0; - } - /* 0 < f <= FASTCOVER_MAX_F*/ - if (f > FASTCOVER_MAX_F || f == 0) { - return 0; - } - /* 0 < splitPoint <= 1 */ - if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) { - return 0; - } - /* 0 < accel <= 10 */ - if (accel > 10 || accel == 0) { - return 0; - } - return 1; -} - - -/** - * Clean up a context initialized with `FASTCOVER_ctx_init()`. - */ -static void -FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx) -{ - if (!ctx) return; - - free(ctx->freqs); - ctx->freqs = NULL; - - free(ctx->offsets); - ctx->offsets = NULL; -} - - -/** - * Calculate for frequency of hash value of each dmer in ctx->samples - */ -static void -FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx) -{ - const unsigned f = ctx->f; - const unsigned d = ctx->d; - const unsigned skip = ctx->accelParams.skip; - const unsigned readLength = MAX(d, 8); - size_t i; - assert(ctx->nbTrainSamples >= 5); - assert(ctx->nbTrainSamples <= ctx->nbSamples); - for (i = 0; i < ctx->nbTrainSamples; i++) { - size_t start = ctx->offsets[i]; /* start of current dmer */ - size_t const currSampleEnd = ctx->offsets[i+1]; - while (start + readLength <= currSampleEnd) { - const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d); - freqs[dmerIndex]++; - start = start + skip + 1; - } - } -} - - -/** - * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can be used multiple - * times. - * Returns 0 on success or error code on error. - * The context must be destroyed with `FASTCOVER_ctx_destroy()`. - */ -static size_t -FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, - const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples, - unsigned d, double splitPoint, unsigned f, - FASTCOVER_accel_t accelParams) -{ - const BYTE* const samples = (const BYTE*)samplesBuffer; - const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); - /* Split samples into testing and training sets */ - const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; - const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; - const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; - const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; - - /* Checks */ - if (totalSamplesSize < MAX(d, sizeof(U64)) || - totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) { - DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", - (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20)); - return ERROR(srcSize_wrong); - } - - /* Check if there are at least 5 training samples */ - if (nbTrainSamples < 5) { - DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples); - return ERROR(srcSize_wrong); - } - - /* Check if there's testing sample */ - if (nbTestSamples < 1) { - DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples); - return ERROR(srcSize_wrong); - } - - /* Zero the context */ - memset(ctx, 0, sizeof(*ctx)); - DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, - (unsigned)trainingSamplesSize); - DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, - (unsigned)testSamplesSize); - - ctx->samples = samples; - ctx->samplesSizes = samplesSizes; - ctx->nbSamples = nbSamples; - ctx->nbTrainSamples = nbTrainSamples; - ctx->nbTestSamples = nbTestSamples; - ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; - ctx->d = d; - ctx->f = f; - ctx->accelParams = accelParams; - - /* The offsets of each file */ - ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t)); - if (ctx->offsets == NULL) { - DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n"); - FASTCOVER_ctx_destroy(ctx); - return ERROR(memory_allocation); - } - - /* Fill offsets from the samplesSizes */ - { U32 i; - ctx->offsets[0] = 0; - assert(nbSamples >= 5); - for (i = 1; i <= nbSamples; ++i) { - ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; - } - } - - /* Initialize frequency array of size 2^f */ - ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32)); - if (ctx->freqs == NULL) { - DISPLAYLEVEL(1, "Failed to allocate frequency table \n"); - FASTCOVER_ctx_destroy(ctx); - return ERROR(memory_allocation); - } - - DISPLAYLEVEL(2, "Computing frequencies\n"); - FASTCOVER_computeFrequency(ctx->freqs, ctx); - - return 0; -} - - -/** - * Given the prepared context build the dictionary. - */ -static size_t -FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, - U32* freqs, - void* dictBuffer, size_t dictBufferCapacity, - ZDICT_cover_params_t parameters, - U16* segmentFreqs) -{ - BYTE *const dict = (BYTE *)dictBuffer; - size_t tail = dictBufferCapacity; - /* Divide the data into epochs. We will select one segment from each epoch. */ - const COVER_epoch_info_t epochs = COVER_computeEpochs( - (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1); - const size_t maxZeroScoreRun = 10; - size_t zeroScoreRun = 0; - size_t epoch; - DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", - (U32)epochs.num, (U32)epochs.size); - /* Loop through the epochs until there are no more segments or the dictionary - * is full. - */ - for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) { - const U32 epochBegin = (U32)(epoch * epochs.size); - const U32 epochEnd = epochBegin + epochs.size; - size_t segmentSize; - /* Select a segment */ - COVER_segment_t segment = FASTCOVER_selectSegment( - ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs); - - /* If the segment covers no dmers, then we are out of content. - * There may be new content in other epochs, for continue for some time. - */ - if (segment.score == 0) { - if (++zeroScoreRun >= maxZeroScoreRun) { - break; - } - continue; - } - zeroScoreRun = 0; - - /* Trim the segment if necessary and if it is too small then we are done */ - segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); - if (segmentSize < parameters.d) { - break; - } - - /* We fill the dictionary from the back to allow the best segments to be - * referenced with the smallest offsets. - */ - tail -= segmentSize; - memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); - DISPLAYUPDATE( - 2, "\r%u%% ", - (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); - } - DISPLAYLEVEL(2, "\r%79s\r", ""); - return tail; -} - -/** - * Parameters for FASTCOVER_tryParameters(). - */ -typedef struct FASTCOVER_tryParameters_data_s { - const FASTCOVER_ctx_t* ctx; - COVER_best_t* best; - size_t dictBufferCapacity; - ZDICT_cover_params_t parameters; -} FASTCOVER_tryParameters_data_t; - - -/** - * Tries a set of parameters and updates the COVER_best_t with the results. - * This function is thread safe if zstd is compiled with multithreaded support. - * It takes its parameters as an *OWNING* opaque pointer to support threading. - */ -static void FASTCOVER_tryParameters(void* opaque) -{ - /* Save parameters as local variables */ - FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t*)opaque; - const FASTCOVER_ctx_t *const ctx = data->ctx; - const ZDICT_cover_params_t parameters = data->parameters; - size_t dictBufferCapacity = data->dictBufferCapacity; - size_t totalCompressedSize = ERROR(GENERIC); - /* Initialize array to keep track of frequency of dmer within activeSegment */ - U16* segmentFreqs = (U16*)calloc(((U64)1 << ctx->f), sizeof(U16)); - /* Allocate space for hash table, dict, and freqs */ - BYTE *const dict = (BYTE*)malloc(dictBufferCapacity); - COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); - U32* freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32)); - if (!segmentFreqs || !dict || !freqs) { - DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); - goto _cleanup; - } - /* Copy the frequencies because we need to modify them */ - memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32)); - /* Build the dictionary */ - { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity, - parameters, segmentFreqs); - - const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); - selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail, - ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, - totalCompressedSize); - - if (COVER_dictSelectionIsError(selection)) { - DISPLAYLEVEL(1, "Failed to select dictionary\n"); - goto _cleanup; - } - } -_cleanup: - free(dict); - COVER_best_finish(data->best, parameters, selection); - free(data); - free(segmentFreqs); - COVER_dictSelectionFree(selection); - free(freqs); -} - - -static void -FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, - ZDICT_cover_params_t* coverParams) -{ - coverParams->k = fastCoverParams.k; - coverParams->d = fastCoverParams.d; - coverParams->steps = fastCoverParams.steps; - coverParams->nbThreads = fastCoverParams.nbThreads; - coverParams->splitPoint = fastCoverParams.splitPoint; - coverParams->zParams = fastCoverParams.zParams; - coverParams->shrinkDict = fastCoverParams.shrinkDict; -} - - -static void -FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, - ZDICT_fastCover_params_t* fastCoverParams, - unsigned f, unsigned accel) -{ - fastCoverParams->k = coverParams.k; - fastCoverParams->d = coverParams.d; - fastCoverParams->steps = coverParams.steps; - fastCoverParams->nbThreads = coverParams.nbThreads; - fastCoverParams->splitPoint = coverParams.splitPoint; - fastCoverParams->f = f; - fastCoverParams->accel = accel; - fastCoverParams->zParams = coverParams.zParams; - fastCoverParams->shrinkDict = coverParams.shrinkDict; -} - - -ZDICTLIB_STATIC_API size_t -ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t parameters) -{ - BYTE* const dict = (BYTE*)dictBuffer; - FASTCOVER_ctx_t ctx; - ZDICT_cover_params_t coverParams; - FASTCOVER_accel_t accelParams; - /* Initialize global data */ - g_displayLevel = (int)parameters.zParams.notificationLevel; - /* Assign splitPoint and f if not provided */ - parameters.splitPoint = 1.0; - parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f; - parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel; - /* Convert to cover parameter */ - memset(&coverParams, 0 , sizeof(coverParams)); - FASTCOVER_convertToCoverParams(parameters, &coverParams); - /* Checks */ - if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f, - parameters.accel)) { - DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); - return ERROR(parameter_outOfBound); - } - if (nbSamples == 0) { - DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); - return ERROR(srcSize_wrong); - } - if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { - DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", - ZDICT_DICTSIZE_MIN); - return ERROR(dstSize_tooSmall); - } - /* Assign corresponding FASTCOVER_accel_t to accelParams*/ - accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; - /* Initialize context */ - { - size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, - coverParams.d, parameters.splitPoint, parameters.f, - accelParams); - if (ZSTD_isError(initVal)) { - DISPLAYLEVEL(1, "Failed to initialize context\n"); - return initVal; - } - } - COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); - /* Build the dictionary */ - DISPLAYLEVEL(2, "Building dictionary\n"); - { - /* Initialize array to keep track of frequency of dmer within activeSegment */ - U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16)); - const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, - dictBufferCapacity, coverParams, segmentFreqs); - const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100); - const size_t dictionarySize = ZDICT_finalizeDictionary( - dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, - samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams); - if (!ZSTD_isError(dictionarySize)) { - DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", - (unsigned)dictionarySize); - } - FASTCOVER_ctx_destroy(&ctx); - free(segmentFreqs); - return dictionarySize; - } -} - - -ZDICTLIB_STATIC_API size_t -ZDICT_optimizeTrainFromBuffer_fastCover( - void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t* parameters) -{ - ZDICT_cover_params_t coverParams; - FASTCOVER_accel_t accelParams; - /* constants */ - const unsigned nbThreads = parameters->nbThreads; - const double splitPoint = - parameters->splitPoint <= 0.0 ? FASTCOVER_DEFAULT_SPLITPOINT : parameters->splitPoint; - const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; - const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; - const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; - const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; - const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; - const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); - const unsigned kIterations = - (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); - const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f; - const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel; - const unsigned shrinkDict = 0; - /* Local variables */ - const int displayLevel = (int)parameters->zParams.notificationLevel; - unsigned iteration = 1; - unsigned d; - unsigned k; - COVER_best_t best; - POOL_ctx *pool = NULL; - int warned = 0; - /* Checks */ - if (splitPoint <= 0 || splitPoint > 1) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n"); - return ERROR(parameter_outOfBound); - } - if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n"); - return ERROR(parameter_outOfBound); - } - if (kMinK < kMaxD || kMaxK < kMinK) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n"); - return ERROR(parameter_outOfBound); - } - if (nbSamples == 0) { - LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n"); - return ERROR(srcSize_wrong); - } - if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { - LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n", - ZDICT_DICTSIZE_MIN); - return ERROR(dstSize_tooSmall); - } - if (nbThreads > 1) { - pool = POOL_create(nbThreads, 1); - if (!pool) { - return ERROR(memory_allocation); - } - } - /* Initialization */ - COVER_best_init(&best); - memset(&coverParams, 0 , sizeof(coverParams)); - FASTCOVER_convertToCoverParams(*parameters, &coverParams); - accelParams = FASTCOVER_defaultAccelParameters[accel]; - /* Turn down global display level to clean up display at level 2 and below */ - g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; - /* Loop through d first because each new value needs a new context */ - LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", - kIterations); - for (d = kMinD; d <= kMaxD; d += 2) { - /* Initialize the context for this value of d */ - FASTCOVER_ctx_t ctx; - LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); - { - size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams); - if (ZSTD_isError(initVal)) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); - COVER_best_destroy(&best); - POOL_free(pool); - return initVal; - } - } - if (!warned) { - COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel); - warned = 1; - } - /* Loop through k reusing the same context */ - for (k = kMinK; k <= kMaxK; k += kStepSize) { - /* Prepare the arguments */ - FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc( - sizeof(FASTCOVER_tryParameters_data_t)); - LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); - if (!data) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); - COVER_best_destroy(&best); - FASTCOVER_ctx_destroy(&ctx); - POOL_free(pool); - return ERROR(memory_allocation); - } - data->ctx = &ctx; - data->best = &best; - data->dictBufferCapacity = dictBufferCapacity; - data->parameters = coverParams; - data->parameters.k = k; - data->parameters.d = d; - data->parameters.splitPoint = splitPoint; - data->parameters.steps = kSteps; - data->parameters.shrinkDict = shrinkDict; - data->parameters.zParams.notificationLevel = (unsigned)g_displayLevel; - /* Check the parameters */ - if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, - data->ctx->f, accel)) { - DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); - free(data); - continue; - } - /* Call the function and pass ownership of data to it */ - COVER_best_start(&best); - if (pool) { - POOL_add(pool, &FASTCOVER_tryParameters, data); - } else { - FASTCOVER_tryParameters(data); - } - /* Print status */ - LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", - (unsigned)((iteration * 100) / kIterations)); - ++iteration; - } - COVER_best_wait(&best); - FASTCOVER_ctx_destroy(&ctx); - } - LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); - /* Fill the output buffer and parameters with output of the best parameters */ - { - const size_t dictSize = best.dictSize; - if (ZSTD_isError(best.compressedSize)) { - const size_t compressedSize = best.compressedSize; - COVER_best_destroy(&best); - POOL_free(pool); - return compressedSize; - } - FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel); - memcpy(dictBuffer, best.dict, dictSize); - COVER_best_destroy(&best); - POOL_free(pool); - return dictSize; - } - -} diff --git a/zstandard_android/src/dll/example/build_package.bat b/zstandard_android/src/dll/example/build_package.bat deleted file mode 100644 index 8baabc7..0000000 --- a/zstandard_android/src/dll/example/build_package.bat +++ /dev/null @@ -1,20 +0,0 @@ -@ECHO OFF -MKDIR bin\dll bin\static bin\example bin\include -COPY tests\fullbench.c bin\example\ -COPY programs\datagen.c bin\example\ -COPY programs\datagen.h bin\example\ -COPY programs\util.h bin\example\ -COPY programs\platform.h bin\example\ -COPY lib\common\mem.h bin\example\ -COPY lib\common\zstd_internal.h bin\example\ -COPY lib\common\error_private.h bin\example\ -COPY lib\common\xxhash.h bin\example\ -COPY lib\libzstd.a bin\static\libzstd_static.lib -COPY lib\dll\libzstd.* bin\dll\ -COPY lib\dll\example\Makefile bin\example\ -COPY lib\dll\example\fullbench-dll.* bin\example\ -COPY lib\dll\example\README.md bin\ -COPY lib\zstd.h bin\include\ -COPY lib\common\zstd_errors.h bin\include\ -COPY lib\dictBuilder\zdict.h bin\include\ -COPY programs\zstd.exe bin\zstd.exe diff --git a/zstandard_android/src/legacy/zstd_v01.c b/zstandard_android/src/legacy/zstd_v01.c deleted file mode 100644 index 6cf5123..0000000 --- a/zstandard_android/src/legacy/zstd_v01.c +++ /dev/null @@ -1,2127 +0,0 @@ -/* - * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - -/****************************************** -* Includes -******************************************/ -#include /* size_t, ptrdiff_t */ -#include "zstd_v01.h" -#include "../common/compiler.h" -#include "../common/error_private.h" - - -/****************************************** -* Static allocation -******************************************/ -/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ -#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) -* Increasing memory usage improves compression ratio -* Reduced memory usage can improve speed, due to cache effect -* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ -#define FSE_MAX_MEMORY_USAGE 14 -#define FSE_DEFAULT_MEMORY_USAGE 13 - -/* FSE_MAX_SYMBOL_VALUE : -* Maximum symbol value authorized. -* Required for proper stack allocation */ -#define FSE_MAX_SYMBOL_VALUE 255 - - -/**************************************************************** -* template functions type & suffix -****************************************************************/ -#define FSE_FUNCTION_TYPE BYTE -#define FSE_FUNCTION_EXTENSION - - -/**************************************************************** -* Byte symbol type -****************************************************************/ -typedef struct -{ - unsigned short newState; - unsigned char symbol; - unsigned char nbBits; -} FSE_decode_t; /* size == U32 */ - - - -/**************************************************************** -* Compiler specifics -****************************************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# define FORCE_INLINE static __forceinline -# include /* For Visual 2005 */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ -#else -# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define FORCE_INLINE static inline -# endif -# else -# define FORCE_INLINE static -# endif /* __STDC_VERSION__ */ -#endif - - -/**************************************************************** -* Includes -****************************************************************/ -#include /* malloc, free, qsort */ -#include /* memcpy, memset */ -#include /* printf (debug) */ - - -#ifndef MEM_ACCESS_MODULE -#define MEM_ACCESS_MODULE -/**************************************************************** -* Basic Types -*****************************************************************/ -#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# include -typedef uint8_t BYTE; -typedef uint16_t U16; -typedef int16_t S16; -typedef uint32_t U32; -typedef int32_t S32; -typedef uint64_t U64; -typedef int64_t S64; -#else -typedef unsigned char BYTE; -typedef unsigned short U16; -typedef signed short S16; -typedef unsigned int U32; -typedef signed int S32; -typedef unsigned long long U64; -typedef signed long long S64; -#endif - -#endif /* MEM_ACCESS_MODULE */ - -/**************************************************************** -* Memory I/O -*****************************************************************/ - -static unsigned FSE_32bits(void) -{ - return sizeof(void*)==4; -} - -static unsigned FSE_isLittleEndian(void) -{ - const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ - return one.c[0]; -} - -static U16 FSE_read16(const void* memPtr) -{ - U16 val; memcpy(&val, memPtr, sizeof(val)); return val; -} - -static U32 FSE_read32(const void* memPtr) -{ - U32 val; memcpy(&val, memPtr, sizeof(val)); return val; -} - -static U64 FSE_read64(const void* memPtr) -{ - U64 val; memcpy(&val, memPtr, sizeof(val)); return val; -} - -static U16 FSE_readLE16(const void* memPtr) -{ - if (FSE_isLittleEndian()) - return FSE_read16(memPtr); - else - { - const BYTE* p = (const BYTE*)memPtr; - return (U16)(p[0] + (p[1]<<8)); - } -} - -static U32 FSE_readLE32(const void* memPtr) -{ - if (FSE_isLittleEndian()) - return FSE_read32(memPtr); - else - { - const BYTE* p = (const BYTE*)memPtr; - return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); - } -} - - -static U64 FSE_readLE64(const void* memPtr) -{ - if (FSE_isLittleEndian()) - return FSE_read64(memPtr); - else - { - const BYTE* p = (const BYTE*)memPtr; - return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) - + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); - } -} - -static size_t FSE_readLEST(const void* memPtr) -{ - if (FSE_32bits()) - return (size_t)FSE_readLE32(memPtr); - else - return (size_t)FSE_readLE64(memPtr); -} - - - -/**************************************************************** -* Constants -*****************************************************************/ -#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) -#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX -#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" -#endif - - -/**************************************************************** -* Error Management -****************************************************************/ -#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ - - -/**************************************************************** -* Complex types -****************************************************************/ -typedef struct -{ - int deltaFindState; - U32 deltaNbBits; -} FSE_symbolCompressionTransform; /* total 8 bytes */ - -typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; - -/**************************************************************** -* Internal functions -****************************************************************/ -FORCE_INLINE unsigned FSE_highbit32 (U32 val) -{ -# if defined(_MSC_VER) /* Visual */ - unsigned long r; - return _BitScanReverse(&r, val) ? (unsigned)r : 0; -# elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */ - return __builtin_clz (val) ^ 31; -# else /* Software version */ - static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - unsigned r; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; - return r; -# endif -} - - -/**************************************************************** -* Templates -****************************************************************/ -/* - designed to be included - for type-specific functions (template emulation in C) - Objective is to write these functions only once, for improved maintenance -*/ - -/* safety checks */ -#ifndef FSE_FUNCTION_EXTENSION -# error "FSE_FUNCTION_EXTENSION must be defined" -#endif -#ifndef FSE_FUNCTION_TYPE -# error "FSE_FUNCTION_TYPE must be defined" -#endif - -/* Function names */ -#define FSE_CAT(X,Y) X##Y -#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) -#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) - - - -static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } - -#define FSE_DECODE_TYPE FSE_decode_t - - -typedef struct { - U16 tableLog; - U16 fastMode; -} FSE_DTableHeader; /* sizeof U32 */ - -static size_t FSE_buildDTable -(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1; /* because dt is unsigned, 32-bits aligned on 32-bits */ - const U32 tableSize = 1 << tableLog; - const U32 tableMask = tableSize-1; - const U32 step = FSE_tableStep(tableSize); - U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; - U32 position = 0; - U32 highThreshold = tableSize-1; - const S16 largeLimit= (S16)(1 << (tableLog-1)); - U32 noLarge = 1; - U32 s; - - /* Sanity Checks */ - if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge; - if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge; - - /* Init, lay down lowprob symbols */ - DTableH[0].tableLog = (U16)tableLog; - for (s=0; s<=maxSymbolValue; s++) - { - if (normalizedCounter[s]==-1) - { - tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; - symbolNext[s] = 1; - } - else - { - if (normalizedCounter[s] >= largeLimit) noLarge=0; - symbolNext[s] = normalizedCounter[s]; - } - } - - /* Spread symbols */ - for (s=0; s<=maxSymbolValue; s++) - { - int i; - for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ - } - } - - if (position!=0) return (size_t)-FSE_ERROR_GENERIC; /* position must reach all cells once, otherwise normalizedCounter is incorrect */ - - /* Build Decoding table */ - { - U32 i; - for (i=0; ifastMode = (U16)noLarge; - return 0; -} - - -/****************************************** -* FSE byte symbol -******************************************/ -#ifndef FSE_COMMONDEFS_ONLY - -static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); } - -static short FSE_abs(short a) -{ - return a<0? -a : a; -} - - -/**************************************************************** -* Header bitstream management -****************************************************************/ -static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, - const void* headerBuffer, size_t hbSize) -{ - const BYTE* const istart = (const BYTE*) headerBuffer; - const BYTE* const iend = istart + hbSize; - const BYTE* ip = istart; - int nbBits; - int remaining; - int threshold; - U32 bitStream; - int bitCount; - unsigned charnum = 0; - int previous0 = 0; - - if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong; - bitStream = FSE_readLE32(ip); - nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ - if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge; - bitStream >>= 4; - bitCount = 4; - *tableLogPtr = nbBits; - remaining = (1<1) && (charnum<=*maxSVPtr)) - { - if (previous0) - { - unsigned n0 = charnum; - while ((bitStream & 0xFFFF) == 0xFFFF) - { - n0+=24; - if (ip < iend-5) - { - ip+=2; - bitStream = FSE_readLE32(ip) >> bitCount; - } - else - { - bitStream >>= 16; - bitCount+=16; - } - } - while ((bitStream & 3) == 3) - { - n0+=3; - bitStream>>=2; - bitCount+=2; - } - n0 += bitStream & 3; - bitCount += 2; - if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall; - while (charnum < n0) normalizedCounter[charnum++] = 0; - if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) - { - ip += bitCount>>3; - bitCount &= 7; - bitStream = FSE_readLE32(ip) >> bitCount; - } - else - bitStream >>= 2; - } - { - const short max = (short)((2*threshold-1)-remaining); - short count; - - if ((bitStream & (threshold-1)) < (U32)max) - { - count = (short)(bitStream & (threshold-1)); - bitCount += nbBits-1; - } - else - { - count = (short)(bitStream & (2*threshold-1)); - if (count >= threshold) count -= max; - bitCount += nbBits; - } - - count--; /* extra accuracy */ - remaining -= FSE_abs(count); - normalizedCounter[charnum++] = count; - previous0 = !count; - while (remaining < threshold) - { - nbBits--; - threshold >>= 1; - } - - { - if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) - { - ip += bitCount>>3; - bitCount &= 7; - } - else - { - bitCount -= (int)(8 * (iend - 4 - ip)); - ip = iend - 4; - } - bitStream = FSE_readLE32(ip) >> (bitCount & 31); - } - } - } - if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC; - *maxSVPtr = charnum-1; - - ip += (bitCount+7)>>3; - if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong; - return ip-istart; -} - - -/********************************************************* -* Decompression (Byte symbols) -*********************************************************/ -static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ - - DTableH->tableLog = 0; - DTableH->fastMode = 0; - - cell->newState = 0; - cell->symbol = symbolValue; - cell->nbBits = 0; - - return 0; -} - - -static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSymbolValue = tableMask; - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC; /* min size */ - - /* Build Decoding Table */ - DTableH->tableLog = (U16)nbBits; - DTableH->fastMode = 1; - for (s=0; s<=maxSymbolValue; s++) - { - dinfo[s].newState = 0; - dinfo[s].symbol = (BYTE)s; - dinfo[s].nbBits = (BYTE)nbBits; - } - - return 0; -} - - -/* FSE_initDStream - * Initialize a FSE_DStream_t. - * srcBuffer must point at the beginning of an FSE block. - * The function result is the size of the FSE_block (== srcSize). - * If srcSize is too small, the function will return an errorCode; - */ -static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize) -{ - if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong; - - if (srcSize >= sizeof(size_t)) - { - U32 contain32; - bitD->start = (const char*)srcBuffer; - bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); - bitD->bitContainer = FSE_readLEST(bitD->ptr); - contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; - if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ - bitD->bitsConsumed = 8 - FSE_highbit32(contain32); - } - else - { - U32 contain32; - bitD->start = (const char*)srcBuffer; - bitD->ptr = bitD->start; - bitD->bitContainer = *(const BYTE*)(bitD->start); - switch(srcSize) - { - case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); - /* fallthrough */ - case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); - /* fallthrough */ - case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); - /* fallthrough */ - case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; - /* fallthrough */ - case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; - /* fallthrough */ - case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; - /* fallthrough */ - default:; - } - contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; - if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ - bitD->bitsConsumed = 8 - FSE_highbit32(contain32); - bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; - } - - return srcSize; -} - - -/*!FSE_lookBits - * Provides next n bits from the bitContainer. - * bitContainer is not modified (bits are still present for next read/look) - * On 32-bits, maxNbBits==25 - * On 64-bits, maxNbBits==57 - * return : value extracted. - */ -static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits) -{ - const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; - return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); -} - -static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ -{ - const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; - return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); -} - -static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits) -{ - bitD->bitsConsumed += nbBits; -} - - -/*!FSE_readBits - * Read next n bits from the bitContainer. - * On 32-bits, don't read more than maxNbBits==25 - * On 64-bits, don't read more than maxNbBits==57 - * Use the fast variant *only* if n >= 1. - * return : value extracted. - */ -static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits) -{ - size_t value = FSE_lookBits(bitD, nbBits); - FSE_skipBits(bitD, nbBits); - return value; -} - -static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ -{ - size_t value = FSE_lookBitsFast(bitD, nbBits); - FSE_skipBits(bitD, nbBits); - return value; -} - -static unsigned FSE_reloadDStream(FSE_DStream_t* bitD) -{ - if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ - return FSE_DStream_tooFar; - - if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) - { - bitD->ptr -= bitD->bitsConsumed >> 3; - bitD->bitsConsumed &= 7; - bitD->bitContainer = FSE_readLEST(bitD->ptr); - return FSE_DStream_unfinished; - } - if (bitD->ptr == bitD->start) - { - if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer; - return FSE_DStream_completed; - } - { - U32 nbBytes = bitD->bitsConsumed >> 3; - U32 result = FSE_DStream_unfinished; - if (bitD->ptr - nbBytes < bitD->start) - { - nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ - result = FSE_DStream_endOfBuffer; - } - bitD->ptr -= nbBytes; - bitD->bitsConsumed -= nbBytes*8; - bitD->bitContainer = FSE_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ - return result; - } -} - - -static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt) -{ - const void* ptr = dt; - const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; - DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog); - FSE_reloadDStream(bitD); - DStatePtr->table = dt + 1; -} - -static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) -{ - const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; - const U32 nbBits = DInfo.nbBits; - BYTE symbol = DInfo.symbol; - size_t lowBits = FSE_readBits(bitD, nbBits); - - DStatePtr->state = DInfo.newState + lowBits; - return symbol; -} - -static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) -{ - const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; - const U32 nbBits = DInfo.nbBits; - BYTE symbol = DInfo.symbol; - size_t lowBits = FSE_readBitsFast(bitD, nbBits); - - DStatePtr->state = DInfo.newState + lowBits; - return symbol; -} - -/* FSE_endOfDStream - Tells if bitD has reached end of bitStream or not */ - -static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD) -{ - return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8)); -} - -static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) -{ - return DStatePtr->state == 0; -} - - -FORCE_INLINE size_t FSE_decompress_usingDTable_generic( - void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const FSE_DTable* dt, const unsigned fast) -{ - BYTE* const ostart = (BYTE*) dst; - BYTE* op = ostart; - BYTE* const omax = op + maxDstSize; - BYTE* const olimit = omax-3; - - FSE_DStream_t bitD; - FSE_DState_t state1; - FSE_DState_t state2; - size_t errorCode; - - /* Init */ - errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ - if (FSE_isError(errorCode)) return errorCode; - - FSE_initDState(&state1, &bitD, dt); - FSE_initDState(&state2, &bitD, dt); - -#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) - - /* 4 symbols per loop */ - for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ - FSE_reloadDStream(&bitD); - - op[1] = FSE_GETSYMBOL(&state2); - - if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ - { if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } } - - op[2] = FSE_GETSYMBOL(&state1); - - if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ - FSE_reloadDStream(&bitD); - - op[3] = FSE_GETSYMBOL(&state2); - } - - /* tail */ - /* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */ - while (1) - { - if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) - break; - - *op++ = FSE_GETSYMBOL(&state1); - - if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) - break; - - *op++ = FSE_GETSYMBOL(&state2); - } - - /* end ? */ - if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) - return op-ostart; - - if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */ - - return (size_t)-FSE_ERROR_corruptionDetected; -} - - -static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, - const void* cSrc, size_t cSrcSize, - const FSE_DTable* dt) -{ - FSE_DTableHeader DTableH; - memcpy(&DTableH, dt, sizeof(DTableH)); /* memcpy() into local variable, to avoid strict aliasing warning */ - - /* select fast mode (static) */ - if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); - return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); -} - - -static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) -{ - const BYTE* const istart = (const BYTE*)cSrc; - const BYTE* ip = istart; - short counting[FSE_MAX_SYMBOL_VALUE+1]; - DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ - unsigned tableLog; - unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; - size_t errorCode; - - if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ - - /* normal FSE decoding mode */ - errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); - if (FSE_isError(errorCode)) return errorCode; - if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ - ip += errorCode; - cSrcSize -= errorCode; - - errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); - if (FSE_isError(errorCode)) return errorCode; - - /* always return, even if it is an error code */ - return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); -} - - - -/* ******************************************************* -* Huff0 : Huffman block compression -*********************************************************/ -#define HUF_MAX_SYMBOL_VALUE 255 -#define HUF_DEFAULT_TABLELOG 12 /* used by default, when not specified */ -#define HUF_MAX_TABLELOG 12 /* max possible tableLog; for allocation purpose; can be modified */ -#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ -#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) -# error "HUF_MAX_TABLELOG is too large !" -#endif - -typedef struct HUF_CElt_s { - U16 val; - BYTE nbBits; -} HUF_CElt ; - -typedef struct nodeElt_s { - U32 count; - U16 parent; - BYTE byte; - BYTE nbBits; -} nodeElt; - - -/* ******************************************************* -* Huff0 : Huffman block decompression -*********************************************************/ -typedef struct { - BYTE byte; - BYTE nbBits; -} HUF_DElt; - -static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize) -{ - BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; - U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ - U32 weightTotal; - U32 maxBits; - const BYTE* ip = (const BYTE*) src; - size_t iSize; - size_t oSize; - U32 n; - U32 nextRankStart; - void* ptr = DTable+1; - HUF_DElt* const dt = (HUF_DElt*)ptr; - - if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; - iSize = ip[0]; - - FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16)); /* if compilation fails here, assertion is false */ - //memset(huffWeight, 0, sizeof(huffWeight)); /* should not be necessary, but some analyzer complain ... */ - if (iSize >= 128) /* special header */ - { - if (iSize >= (242)) /* RLE */ - { - static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; - oSize = l[iSize-242]; - memset(huffWeight, 1, sizeof(huffWeight)); - iSize = 0; - } - else /* Incompressible */ - { - oSize = iSize - 127; - iSize = ((oSize+1)/2); - if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; - ip += 1; - for (n=0; n> 4; - huffWeight[n+1] = ip[n/2] & 15; - } - } - } - else /* header compressed with FSE (normal case) */ - { - if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; - oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize); /* max 255 values decoded, last one is implied */ - if (FSE_isError(oSize)) return oSize; - } - - /* collect weight stats */ - memset(rankVal, 0, sizeof(rankVal)); - weightTotal = 0; - for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected; - rankVal[huffWeight[n]]++; - weightTotal += (1 << huffWeight[n]) >> 1; - } - if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected; - - /* get last non-null symbol weight (implied, total must be 2^n) */ - maxBits = FSE_highbit32(weightTotal) + 1; - if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* DTable is too small */ - DTable[0] = (U16)maxBits; - { - U32 total = 1 << maxBits; - U32 rest = total - weightTotal; - U32 verif = 1 << FSE_highbit32(rest); - U32 lastWeight = FSE_highbit32(rest) + 1; - if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected; /* last value must be a clean power of 2 */ - huffWeight[oSize] = (BYTE)lastWeight; - rankVal[lastWeight]++; - } - - /* check tree construction validity */ - if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected; /* by construction : at least 2 elts of rank 1, must be even */ - - /* Prepare ranks */ - nextRankStart = 0; - for (n=1; n<=maxBits; n++) - { - U32 current = nextRankStart; - nextRankStart += (rankVal[n] << (n-1)); - rankVal[n] = current; - } - - /* fill DTable */ - for (n=0; n<=oSize; n++) - { - const U32 w = huffWeight[n]; - const U32 length = (1 << w) >> 1; - U32 i; - HUF_DElt D; - D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w); - for (i = rankVal[w]; i < rankVal[w] + length; i++) - dt[i] = D; - rankVal[w] += length; - } - - return iSize+1; -} - - -static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog) -{ - const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ - const BYTE c = dt[val].byte; - FSE_skipBits(Dstream, dt[val].nbBits); - return c; -} - -static size_t HUF_decompress_usingDTable( /* -3% slower when non static */ - void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const U16* DTable) -{ - if (cSrcSize < 6) return (size_t)-FSE_ERROR_srcSize_wrong; - { - BYTE* const ostart = (BYTE*) dst; - BYTE* op = ostart; - BYTE* const omax = op + maxDstSize; - BYTE* const olimit = maxDstSize < 15 ? op : omax-15; - - const void* ptr = DTable; - const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1; - const U32 dtLog = DTable[0]; - size_t errorCode; - U32 reloadStatus; - - /* Init */ - - const U16* jumpTable = (const U16*)cSrc; - const size_t length1 = FSE_readLE16(jumpTable); - const size_t length2 = FSE_readLE16(jumpTable+1); - const size_t length3 = FSE_readLE16(jumpTable+2); - const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; /* check coherency !! */ - const char* const start1 = (const char*)(cSrc) + 6; - const char* const start2 = start1 + length1; - const char* const start3 = start2 + length2; - const char* const start4 = start3 + length3; - FSE_DStream_t bitD1, bitD2, bitD3, bitD4; - - if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; - - errorCode = FSE_initDStream(&bitD1, start1, length1); - if (FSE_isError(errorCode)) return errorCode; - errorCode = FSE_initDStream(&bitD2, start2, length2); - if (FSE_isError(errorCode)) return errorCode; - errorCode = FSE_initDStream(&bitD3, start3, length3); - if (FSE_isError(errorCode)) return errorCode; - errorCode = FSE_initDStream(&bitD4, start4, length4); - if (FSE_isError(errorCode)) return errorCode; - - reloadStatus=FSE_reloadDStream(&bitD2); - - /* 16 symbols per loop */ - for ( ; (reloadStatus12)) FSE_reloadDStream(&Dstream) - - #define HUF_DECODE_SYMBOL_2(n, Dstream) \ - op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \ - if (FSE_32bits()) FSE_reloadDStream(&Dstream) - - HUF_DECODE_SYMBOL_1( 0, bitD1); - HUF_DECODE_SYMBOL_1( 1, bitD2); - HUF_DECODE_SYMBOL_1( 2, bitD3); - HUF_DECODE_SYMBOL_1( 3, bitD4); - HUF_DECODE_SYMBOL_2( 4, bitD1); - HUF_DECODE_SYMBOL_2( 5, bitD2); - HUF_DECODE_SYMBOL_2( 6, bitD3); - HUF_DECODE_SYMBOL_2( 7, bitD4); - HUF_DECODE_SYMBOL_1( 8, bitD1); - HUF_DECODE_SYMBOL_1( 9, bitD2); - HUF_DECODE_SYMBOL_1(10, bitD3); - HUF_DECODE_SYMBOL_1(11, bitD4); - HUF_DECODE_SYMBOL_0(12, bitD1); - HUF_DECODE_SYMBOL_0(13, bitD2); - HUF_DECODE_SYMBOL_0(14, bitD3); - HUF_DECODE_SYMBOL_0(15, bitD4); - } - - if (reloadStatus!=FSE_DStream_completed) /* not complete : some bitStream might be FSE_DStream_unfinished */ - return (size_t)-FSE_ERROR_corruptionDetected; - - /* tail */ - { - /* bitTail = bitD1; */ /* *much* slower : -20% !??! */ - FSE_DStream_t bitTail; - bitTail.ptr = bitD1.ptr; - bitTail.bitsConsumed = bitD1.bitsConsumed; - bitTail.bitContainer = bitD1.bitContainer; /* required in case of FSE_DStream_endOfBuffer */ - bitTail.start = start1; - for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; - ip += errorCode; - cSrcSize -= errorCode; - - return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable); -} - - -#endif /* FSE_COMMONDEFS_ONLY */ - -/* - zstd - standard compression library - Copyright (C) 2014-2015, Yann Collet. - - BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - zstd source repository : https://github.com/Cyan4973/zstd - - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -/**************************************************************** -* Tuning parameters -*****************************************************************/ -/* MEMORY_USAGE : -* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) -* Increasing memory usage improves compression ratio -* Reduced memory usage can improve speed, due to cache effect */ -#define ZSTD_MEMORY_USAGE 17 - - -/************************************** - CPU Feature Detection -**************************************/ -/* - * Automated efficient unaligned memory access detection - * Based on known hardware architectures - * This list will be updated thanks to feedbacks - */ -#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \ - || defined(__ARM_FEATURE_UNALIGNED) \ - || defined(__i386__) || defined(__x86_64__) \ - || defined(_M_IX86) || defined(_M_X64) \ - || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \ - || (defined(_M_ARM) && (_M_ARM >= 7)) -# define ZSTD_UNALIGNED_ACCESS 1 -#else -# define ZSTD_UNALIGNED_ACCESS 0 -#endif - - -/******************************************************** -* Includes -*********************************************************/ -#include /* calloc */ -#include /* memcpy, memmove */ -#include /* debug : printf */ - - -/******************************************************** -* Compiler specifics -*********************************************************/ -#ifdef __AVX2__ -# include /* AVX2 intrinsics */ -#endif - -#ifdef _MSC_VER /* Visual Studio */ -# include /* For Visual 2005 */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# pragma warning(disable : 4324) /* disable: C4324: padded structure */ -#endif - - -#ifndef MEM_ACCESS_MODULE -#define MEM_ACCESS_MODULE -/******************************************************** -* Basic Types -*********************************************************/ -#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# if defined(_AIX) -# include -# else -# include /* intptr_t */ -# endif -typedef uint8_t BYTE; -typedef uint16_t U16; -typedef int16_t S16; -typedef uint32_t U32; -typedef int32_t S32; -typedef uint64_t U64; -#else -typedef unsigned char BYTE; -typedef unsigned short U16; -typedef signed short S16; -typedef unsigned int U32; -typedef signed int S32; -typedef unsigned long long U64; -#endif - -#endif /* MEM_ACCESS_MODULE */ - - -/******************************************************** -* Constants -*********************************************************/ -static const U32 ZSTD_magicNumber = 0xFD2FB51E; /* 3rd version : seqNb header */ - -#define HASH_LOG (ZSTD_MEMORY_USAGE - 2) -#define HASH_TABLESIZE (1 << HASH_LOG) -#define HASH_MASK (HASH_TABLESIZE - 1) - -#define KNUTH 2654435761 - -#define BIT7 128 -#define BIT6 64 -#define BIT5 32 -#define BIT4 16 - -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) - -#define BLOCKSIZE (128 KB) /* define, for static allocation */ - -#define WORKPLACESIZE (BLOCKSIZE*3) -#define MINMATCH 4 -#define MLbits 7 -#define LLbits 6 -#define Offbits 5 -#define MaxML ((1<>3]; -#else - U32 hashTable[HASH_TABLESIZE]; -#endif - BYTE buffer[WORKPLACESIZE]; -} cctxi_t; - - - - -/************************************** -* Error Management -**************************************/ -/* published entry point */ -unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); } - - -/************************************** -* Tool functions -**************************************/ -#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ -#define ZSTD_VERSION_MINOR 1 /* for new (non-breaking) interface capabilities */ -#define ZSTD_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */ -#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) - -/************************************************************** -* Decompression code -**************************************************************/ - -static size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) -{ - const BYTE* const in = (const BYTE* const)src; - BYTE headerFlags; - U32 cSize; - - if (srcSize < 3) return ERROR(srcSize_wrong); - - headerFlags = *in; - cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); - - bpPtr->blockType = (blockType_t)(headerFlags >> 6); - bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; - - if (bpPtr->blockType == bt_end) return 0; - if (bpPtr->blockType == bt_rle) return 1; - return cSize; -} - - -static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) -{ - if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); - if (srcSize > 0) { - memcpy(dst, src, srcSize); - } - return srcSize; -} - - -static size_t ZSTD_decompressLiterals(void* ctx, - void* dst, size_t maxDstSize, - const void* src, size_t srcSize) -{ - BYTE* op = (BYTE*)dst; - BYTE* const oend = op + maxDstSize; - const BYTE* ip = (const BYTE*)src; - size_t errorCode; - size_t litSize; - - /* check : minimum 2, for litSize, +1, for content */ - if (srcSize <= 3) return ERROR(corruption_detected); - - litSize = ip[1] + (ip[0]<<8); - litSize += ((ip[-3] >> 3) & 7) << 16; /* mmmmh.... */ - op = oend - litSize; - - (void)ctx; - if (litSize > maxDstSize) return ERROR(dstSize_tooSmall); - errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2); - if (FSE_isError(errorCode)) return ERROR(GENERIC); - return litSize; -} - - -static size_t ZSTDv01_decodeLiteralsBlock(void* ctx, - void* dst, size_t maxDstSize, - const BYTE** litStart, size_t* litSize, - const void* src, size_t srcSize) -{ - const BYTE* const istart = (const BYTE* const)src; - const BYTE* ip = istart; - BYTE* const ostart = (BYTE* const)dst; - BYTE* const oend = ostart + maxDstSize; - blockProperties_t litbp; - - size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp); - if (ZSTDv01_isError(litcSize)) return litcSize; - if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); - ip += ZSTD_blockHeaderSize; - - switch(litbp.blockType) - { - case bt_raw: - *litStart = ip; - ip += litcSize; - *litSize = litcSize; - break; - case bt_rle: - { - size_t rleSize = litbp.origSize; - if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall); - if (!srcSize) return ERROR(srcSize_wrong); - if (rleSize > 0) { - memset(oend - rleSize, *ip, rleSize); - } - *litStart = oend - rleSize; - *litSize = rleSize; - ip++; - break; - } - case bt_compressed: - { - size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize); - if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize; - *litStart = oend - decodedLitSize; - *litSize = decodedLitSize; - ip += litcSize; - break; - } - case bt_end: - default: - return ERROR(GENERIC); - } - - return ip-istart; -} - - -static size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, - FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb, - const void* src, size_t srcSize) -{ - const BYTE* const istart = (const BYTE* const)src; - const BYTE* ip = istart; - const BYTE* const iend = istart + srcSize; - U32 LLtype, Offtype, MLtype; - U32 LLlog, Offlog, MLlog; - size_t dumpsLength; - - /* check */ - if (srcSize < 5) return ERROR(srcSize_wrong); - - /* SeqHead */ - *nbSeq = ZSTD_readLE16(ip); ip+=2; - LLtype = *ip >> 6; - Offtype = (*ip >> 4) & 3; - MLtype = (*ip >> 2) & 3; - if (*ip & 2) - { - dumpsLength = ip[2]; - dumpsLength += ip[1] << 8; - ip += 3; - } - else - { - dumpsLength = ip[1]; - dumpsLength += (ip[0] & 1) << 8; - ip += 2; - } - *dumpsPtr = ip; - ip += dumpsLength; - *dumpsLengthPtr = dumpsLength; - - /* check */ - if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ - - /* sequences */ - { - S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */ - size_t headerSize; - - /* Build DTables */ - switch(LLtype) - { - case bt_rle : - LLlog = 0; - FSE_buildDTable_rle(DTableLL, *ip++); break; - case bt_raw : - LLlog = LLbits; - FSE_buildDTable_raw(DTableLL, LLbits); break; - default : - { U32 max = MaxLL; - headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip); - if (FSE_isError(headerSize)) return ERROR(GENERIC); - if (LLlog > LLFSELog) return ERROR(corruption_detected); - ip += headerSize; - FSE_buildDTable(DTableLL, norm, max, LLlog); - } } - - switch(Offtype) - { - case bt_rle : - Offlog = 0; - if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ - FSE_buildDTable_rle(DTableOffb, *ip++); break; - case bt_raw : - Offlog = Offbits; - FSE_buildDTable_raw(DTableOffb, Offbits); break; - default : - { U32 max = MaxOff; - headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip); - if (FSE_isError(headerSize)) return ERROR(GENERIC); - if (Offlog > OffFSELog) return ERROR(corruption_detected); - ip += headerSize; - FSE_buildDTable(DTableOffb, norm, max, Offlog); - } } - - switch(MLtype) - { - case bt_rle : - MLlog = 0; - if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ - FSE_buildDTable_rle(DTableML, *ip++); break; - case bt_raw : - MLlog = MLbits; - FSE_buildDTable_raw(DTableML, MLbits); break; - default : - { U32 max = MaxML; - headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip); - if (FSE_isError(headerSize)) return ERROR(GENERIC); - if (MLlog > MLFSELog) return ERROR(corruption_detected); - ip += headerSize; - FSE_buildDTable(DTableML, norm, max, MLlog); - } } } - - return ip-istart; -} - - -typedef struct { - size_t litLength; - size_t offset; - size_t matchLength; -} seq_t; - -typedef struct { - FSE_DStream_t DStream; - FSE_DState_t stateLL; - FSE_DState_t stateOffb; - FSE_DState_t stateML; - size_t prevOffset; - const BYTE* dumps; - const BYTE* dumpsEnd; -} seqState_t; - - -static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState) -{ - size_t litLength; - size_t prevOffset; - size_t offset; - size_t matchLength; - const BYTE* dumps = seqState->dumps; - const BYTE* const de = seqState->dumpsEnd; - - /* Literal length */ - litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); - prevOffset = litLength ? seq->offset : seqState->prevOffset; - seqState->prevOffset = seq->offset; - if (litLength == MaxLL) - { - const U32 add = dumpsstateOffb), &(seqState->DStream)); - if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); - nbBits = offsetCode - 1; - if (offsetCode==0) nbBits = 0; /* cmove */ - offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits); - if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); - if (offsetCode==0) offset = prevOffset; - } - - /* MatchLength */ - matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); - if (matchLength == MaxML) - { - const U32 add = dumpslitLength = litLength; - seq->offset = offset; - seq->matchLength = matchLength; - seqState->dumps = dumps; -} - - -static size_t ZSTD_execSequence(BYTE* op, - seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - BYTE* const base, BYTE* const oend) -{ - static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ - static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */ - const BYTE* const ostart = op; - BYTE* const oLitEnd = op + sequence.litLength; - const size_t litLength = sequence.litLength; - BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ - const BYTE* const litEnd = *litPtr + litLength; - - /* checks */ - size_t const seqLength = sequence.litLength + sequence.matchLength; - - if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); - if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); - /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ - if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); - - if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ - if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ - - /* copy Literals */ - ZSTD_memmove(op, *litPtr, sequence.litLength); /* note : v0.1 seems to allow scenarios where output or input are close to end of buffer */ - - op += litLength; - *litPtr = litEnd; /* update for next sequence */ - - /* check : last match must be at a minimum distance of 8 from end of dest buffer */ - if (oend-op < 8) return ERROR(dstSize_tooSmall); - - /* copy Match */ - { - const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12); - const BYTE* match = op - sequence.offset; /* possible underflow at op - offset ? */ - size_t qutt = 12; - U64 saved[2]; - - /* check */ - if (match < base) return ERROR(corruption_detected); - if (sequence.offset > (size_t)base) return ERROR(corruption_detected); - - /* save beginning of literal sequence, in case of write overlap */ - if (overlapRisk) - { - if ((endMatch + qutt) > oend) qutt = oend-endMatch; - memcpy(saved, endMatch, qutt); - } - - if (sequence.offset < 8) - { - const int dec64 = dec64table[sequence.offset]; - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += dec32table[sequence.offset]; - ZSTD_copy4(op+4, match); - match -= dec64; - } else { ZSTD_copy8(op, match); } - op += 8; match += 8; - - if (endMatch > oend-(16-MINMATCH)) - { - if (op < oend-8) - { - ZSTD_wildcopy(op, match, (oend-8) - op); - match += (oend-8) - op; - op = oend-8; - } - while (opLLTable; - U32* DTableML = dctx->MLTable; - U32* DTableOffb = dctx->OffTable; - BYTE* const base = (BYTE*) (dctx->base); - - /* Build Decoding Tables */ - errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, - DTableLL, DTableML, DTableOffb, - ip, iend-ip); - if (ZSTDv01_isError(errorCode)) return errorCode; - ip += errorCode; - - /* Regen sequences */ - { - seq_t sequence; - seqState_t seqState; - - memset(&sequence, 0, sizeof(sequence)); - seqState.dumps = dumps; - seqState.dumpsEnd = dumps + dumpsLength; - seqState.prevOffset = 1; - errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip); - if (FSE_isError(errorCode)) return ERROR(corruption_detected); - FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); - FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); - FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); - - for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; ) - { - size_t oneSeqSize; - nbSeq--; - ZSTD_decodeSequence(&sequence, &seqState); - oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend); - if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize; - op += oneSeqSize; - } - - /* check if reached exact end */ - if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */ - if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */ - - /* last literal segment */ - { - size_t lastLLSize = litEnd - litPtr; - if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); - if (lastLLSize > 0) { - if (op != litPtr) memmove(op, litPtr, lastLLSize); - op += lastLLSize; - } - } - } - - return op-ostart; -} - - -static size_t ZSTD_decompressBlock( - void* ctx, - void* dst, size_t maxDstSize, - const void* src, size_t srcSize) -{ - /* blockType == blockCompressed, srcSize is trusted */ - const BYTE* ip = (const BYTE*)src; - const BYTE* litPtr = NULL; - size_t litSize = 0; - size_t errorCode; - - /* Decode literals sub-block */ - errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize); - if (ZSTDv01_isError(errorCode)) return errorCode; - ip += errorCode; - srcSize -= errorCode; - - return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize); -} - - -size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) -{ - const BYTE* ip = (const BYTE*)src; - const BYTE* iend = ip + srcSize; - BYTE* const ostart = (BYTE* const)dst; - BYTE* op = ostart; - BYTE* const oend = ostart + maxDstSize; - size_t remainingSize = srcSize; - U32 magicNumber; - size_t errorCode=0; - blockProperties_t blockProperties; - - /* Frame Header */ - if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); - magicNumber = ZSTD_readBE32(src); - if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); - ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; - - /* Loop on each block */ - while (1) - { - size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties); - if (ZSTDv01_isError(blockSize)) return blockSize; - - ip += ZSTD_blockHeaderSize; - remainingSize -= ZSTD_blockHeaderSize; - if (blockSize > remainingSize) return ERROR(srcSize_wrong); - - switch(blockProperties.blockType) - { - case bt_compressed: - errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize); - break; - case bt_raw : - errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize); - break; - case bt_rle : - return ERROR(GENERIC); /* not yet supported */ - break; - case bt_end : - /* end of frame */ - if (remainingSize) return ERROR(srcSize_wrong); - break; - default: - return ERROR(GENERIC); - } - if (blockSize == 0) break; /* bt_end */ - - if (ZSTDv01_isError(errorCode)) return errorCode; - op += errorCode; - ip += blockSize; - remainingSize -= blockSize; - } - - return op-ostart; -} - -size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) -{ - dctx_t ctx; - ctx.base = dst; - return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize); -} - -/* ZSTD_errorFrameSizeInfoLegacy() : - assumes `cSize` and `dBound` are _not_ NULL */ -static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret) -{ - *cSize = ret; - *dBound = ZSTD_CONTENTSIZE_ERROR; -} - -void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound) -{ - const BYTE* ip = (const BYTE*)src; - size_t remainingSize = srcSize; - size_t nbBlocks = 0; - U32 magicNumber; - blockProperties_t blockProperties; - - /* Frame Header */ - if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) { - ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); - return; - } - magicNumber = ZSTD_readBE32(src); - if (magicNumber != ZSTD_magicNumber) { - ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown)); - return; - } - ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; - - /* Loop on each block */ - while (1) - { - size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties); - if (ZSTDv01_isError(blockSize)) { - ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, blockSize); - return; - } - - ip += ZSTD_blockHeaderSize; - remainingSize -= ZSTD_blockHeaderSize; - if (blockSize > remainingSize) { - ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); - return; - } - - if (blockSize == 0) break; /* bt_end */ - - ip += blockSize; - remainingSize -= blockSize; - nbBlocks++; - } - - *cSize = ip - (const BYTE*)src; - *dBound = nbBlocks * BLOCKSIZE; -} - -/******************************* -* Streaming Decompression API -*******************************/ - -size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx) -{ - dctx->expected = ZSTD_frameHeaderSize; - dctx->phase = 0; - dctx->previousDstEnd = NULL; - dctx->base = NULL; - return 0; -} - -ZSTDv01_Dctx* ZSTDv01_createDCtx(void) -{ - ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx)); - if (dctx==NULL) return NULL; - ZSTDv01_resetDCtx(dctx); - return dctx; -} - -size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx) -{ - free(dctx); - return 0; -} - -size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx) -{ - return ((dctx_t*)dctx)->expected; -} - -size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) -{ - dctx_t* ctx = (dctx_t*)dctx; - - /* Sanity check */ - if (srcSize != ctx->expected) return ERROR(srcSize_wrong); - if (dst != ctx->previousDstEnd) /* not contiguous */ - ctx->base = dst; - - /* Decompress : frame header */ - if (ctx->phase == 0) - { - /* Check frame magic header */ - U32 magicNumber = ZSTD_readBE32(src); - if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); - ctx->phase = 1; - ctx->expected = ZSTD_blockHeaderSize; - return 0; - } - - /* Decompress : block header */ - if (ctx->phase == 1) - { - blockProperties_t bp; - size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); - if (ZSTDv01_isError(blockSize)) return blockSize; - if (bp.blockType == bt_end) - { - ctx->expected = 0; - ctx->phase = 0; - } - else - { - ctx->expected = blockSize; - ctx->bType = bp.blockType; - ctx->phase = 2; - } - - return 0; - } - - /* Decompress : block content */ - { - size_t rSize; - switch(ctx->bType) - { - case bt_compressed: - rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize); - break; - case bt_raw : - rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize); - break; - case bt_rle : - return ERROR(GENERIC); /* not yet handled */ - break; - case bt_end : /* should never happen (filtered at phase 1) */ - rSize = 0; - break; - default: - return ERROR(GENERIC); - } - ctx->phase = 1; - ctx->expected = ZSTD_blockHeaderSize; - if (ZSTDv01_isError(rSize)) return rSize; - ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); - return rSize; - } - -} diff --git a/zstandard_android/src/legacy/zstd_v02.c b/zstandard_android/src/legacy/zstd_v02.c deleted file mode 100644 index 6d39b6e..0000000 --- a/zstandard_android/src/legacy/zstd_v02.c +++ /dev/null @@ -1,3465 +0,0 @@ -/* - * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - -#include /* size_t, ptrdiff_t */ -#include "zstd_v02.h" -#include "../common/compiler.h" -#include "../common/error_private.h" - - -/****************************************** -* Compiler-specific -******************************************/ -#if defined(_MSC_VER) /* Visual Studio */ -# include /* _byteswap_ulong */ -# include /* _byteswap_* */ -#endif - - -/* ****************************************************************** - mem.h - low-level memory access routines - Copyright (C) 2013-2015, Yann Collet. - - BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -****************************************************************** */ -#ifndef MEM_H_MODULE -#define MEM_H_MODULE - -#if defined (__cplusplus) -extern "C" { -#endif - -/****************************************** -* Includes -******************************************/ -#include /* size_t, ptrdiff_t */ -#include /* memcpy */ - - -/**************************************************************** -* Basic Types -*****************************************************************/ -#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# if defined(_AIX) -# include -# else -# include /* intptr_t */ -# endif - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef int16_t S16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; - typedef int64_t S64; -#else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef signed short S16; - typedef unsigned int U32; - typedef signed int S32; - typedef unsigned long long U64; - typedef signed long long S64; -#endif - - -/**************************************************************** -* Memory I/O -*****************************************************************/ - -MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } -MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } - -MEM_STATIC unsigned MEM_isLittleEndian(void) -{ - const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ - return one.c[0]; -} - -MEM_STATIC U16 MEM_read16(const void* memPtr) -{ - U16 val; memcpy(&val, memPtr, sizeof(val)); return val; -} - -MEM_STATIC U32 MEM_read32(const void* memPtr) -{ - U32 val; memcpy(&val, memPtr, sizeof(val)); return val; -} - -MEM_STATIC U64 MEM_read64(const void* memPtr) -{ - U64 val; memcpy(&val, memPtr, sizeof(val)); return val; -} - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) -{ - memcpy(memPtr, &value, sizeof(value)); -} - -MEM_STATIC U16 MEM_readLE16(const void* memPtr) -{ - if (MEM_isLittleEndian()) - return MEM_read16(memPtr); - else - { - const BYTE* p = (const BYTE*)memPtr; - return (U16)(p[0] + (p[1]<<8)); - } -} - -MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) -{ - if (MEM_isLittleEndian()) - { - MEM_write16(memPtr, val); - } - else - { - BYTE* p = (BYTE*)memPtr; - p[0] = (BYTE)val; - p[1] = (BYTE)(val>>8); - } -} - -MEM_STATIC U32 MEM_readLE24(const void* memPtr) -{ - return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); -} - -MEM_STATIC U32 MEM_readLE32(const void* memPtr) -{ - if (MEM_isLittleEndian()) - return MEM_read32(memPtr); - else - { - const BYTE* p = (const BYTE*)memPtr; - return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); - } -} - - -MEM_STATIC U64 MEM_readLE64(const void* memPtr) -{ - if (MEM_isLittleEndian()) - return MEM_read64(memPtr); - else - { - const BYTE* p = (const BYTE*)memPtr; - return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) - + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); - } -} - - -MEM_STATIC size_t MEM_readLEST(const void* memPtr) -{ - if (MEM_32bits()) - return (size_t)MEM_readLE32(memPtr); - else - return (size_t)MEM_readLE64(memPtr); -} - -#if defined (__cplusplus) -} -#endif - -#endif /* MEM_H_MODULE */ - - -/* ****************************************************************** - bitstream - Part of NewGen Entropy library - header file (to include) - Copyright (C) 2013-2015, Yann Collet. - - BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -****************************************************************** */ -#ifndef BITSTREAM_H_MODULE -#define BITSTREAM_H_MODULE - -#if defined (__cplusplus) -extern "C" { -#endif - - -/* -* This API consists of small unitary functions, which highly benefit from being inlined. -* Since link-time-optimization is not available for all compilers, -* these functions are defined into a .h to be included. -*/ - - -/********************************************** -* bitStream decompression API (read backward) -**********************************************/ -typedef struct -{ - size_t bitContainer; - unsigned bitsConsumed; - const char* ptr; - const char* start; -} BIT_DStream_t; - -typedef enum { BIT_DStream_unfinished = 0, - BIT_DStream_endOfBuffer = 1, - BIT_DStream_completed = 2, - BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ - /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ - -MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); -MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); -MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); -MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); - - -/****************************************** -* unsafe API -******************************************/ -MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); -/* faster, but works only if nbBits >= 1 */ - - - -/**************************************************************** -* Helper functions -****************************************************************/ -MEM_STATIC unsigned BIT_highbit32 (U32 val) -{ -# if defined(_MSC_VER) /* Visual */ - unsigned long r; - return _BitScanReverse(&r, val) ? (unsigned)r : 0; -# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return __builtin_clz (val) ^ 31; -# else /* Software version */ - static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - unsigned r; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; - return r; -# endif -} - - - -/********************************************************** -* bitStream decoding -**********************************************************/ - -/*!BIT_initDStream -* Initialize a BIT_DStream_t. -* @bitD : a pointer to an already allocated BIT_DStream_t structure -* @srcBuffer must point at the beginning of a bitStream -* @srcSize must be the exact size of the bitStream -* @result : size of stream (== srcSize) or an errorCode if a problem is detected -*/ -MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) -{ - if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } - - if (srcSize >= sizeof(size_t)) /* normal case */ - { - U32 contain32; - bitD->start = (const char*)srcBuffer; - bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); - bitD->bitContainer = MEM_readLEST(bitD->ptr); - contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; - if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ - bitD->bitsConsumed = 8 - BIT_highbit32(contain32); - } - else - { - U32 contain32; - bitD->start = (const char*)srcBuffer; - bitD->ptr = bitD->start; - bitD->bitContainer = *(const BYTE*)(bitD->start); - switch(srcSize) - { - case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); - /* fallthrough */ - case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); - /* fallthrough */ - case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); - /* fallthrough */ - case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; - /* fallthrough */ - case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; - /* fallthrough */ - case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; - /* fallthrough */ - default:; - } - contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; - if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ - bitD->bitsConsumed = 8 - BIT_highbit32(contain32); - bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; - } - - return srcSize; -} - -MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) -{ - const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; - return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); -} - -/*! BIT_lookBitsFast : -* unsafe version; only works if nbBits >= 1 */ -MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) -{ - const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; - return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); -} - -MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) -{ - bitD->bitsConsumed += nbBits; -} - -MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) -{ - size_t value = BIT_lookBits(bitD, nbBits); - BIT_skipBits(bitD, nbBits); - return value; -} - -/*!BIT_readBitsFast : -* unsafe version; only works if nbBits >= 1 */ -MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) -{ - size_t value = BIT_lookBitsFast(bitD, nbBits); - BIT_skipBits(bitD, nbBits); - return value; -} - -MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) -{ - if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ - return BIT_DStream_overflow; - - if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) - { - bitD->ptr -= bitD->bitsConsumed >> 3; - bitD->bitsConsumed &= 7; - bitD->bitContainer = MEM_readLEST(bitD->ptr); - return BIT_DStream_unfinished; - } - if (bitD->ptr == bitD->start) - { - if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; - return BIT_DStream_completed; - } - { - U32 nbBytes = bitD->bitsConsumed >> 3; - BIT_DStream_status result = BIT_DStream_unfinished; - if (bitD->ptr - nbBytes < bitD->start) - { - nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ - result = BIT_DStream_endOfBuffer; - } - bitD->ptr -= nbBytes; - bitD->bitsConsumed -= nbBytes*8; - bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ - return result; - } -} - -/*! BIT_endOfDStream -* @return Tells if DStream has reached its exact end -*/ -MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) -{ - return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); -} - -#if defined (__cplusplus) -} -#endif - -#endif /* BITSTREAM_H_MODULE */ -/* ****************************************************************** - Error codes and messages - Copyright (C) 2013-2015, Yann Collet - - BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -****************************************************************** */ -#ifndef ERROR_H_MODULE -#define ERROR_H_MODULE - -#if defined (__cplusplus) -extern "C" { -#endif - - -/****************************************** -* Compiler-specific -******************************************/ -#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define ERR_STATIC static inline -#elif defined(_MSC_VER) -# define ERR_STATIC static __inline -#elif defined(__GNUC__) -# define ERR_STATIC static __attribute__((unused)) -#else -# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif - - -/****************************************** -* Error Management -******************************************/ -#define PREFIX(name) ZSTD_error_##name - -#define ERROR(name) (size_t)-PREFIX(name) - -#define ERROR_LIST(ITEM) \ - ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \ - ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \ - ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \ - ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \ - ITEM(PREFIX(maxCode)) - -#define ERROR_GENERATE_ENUM(ENUM) ENUM, -typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */ - -#define ERROR_CONVERTTOSTRING(STRING) #STRING, -#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR) -static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) }; - -ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } - -ERR_STATIC const char* ERR_getErrorName(size_t code) -{ - static const char* codeError = "Unspecified error code"; - if (ERR_isError(code)) return ERR_strings[-(int)(code)]; - return codeError; -} - - -#if defined (__cplusplus) -} -#endif - -#endif /* ERROR_H_MODULE */ -/* -Constructor and Destructor of type FSE_CTable - Note that its size depends on 'tableLog' and 'maxSymbolValue' */ -typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */ -typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ - - -/* ****************************************************************** - FSE : Finite State Entropy coder - header file for static linking (only) - Copyright (C) 2013-2015, Yann Collet - - BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -****************************************************************** */ -#if defined (__cplusplus) -extern "C" { -#endif - - -/****************************************** -* Static allocation -******************************************/ -/* FSE buffer bounds */ -#define FSE_NCOUNTBOUND 512 -#define FSE_BLOCKBOUND(size) (size + (size>>7)) -#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ - -/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ -#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) -#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= 1 (otherwise, result will be corrupted) */ - - -/****************************************** -* Implementation of inline functions -******************************************/ - -/* decompression */ - -typedef struct { - U16 tableLog; - U16 fastMode; -} FSE_DTableHeader; /* sizeof U32 */ - -typedef struct -{ - unsigned short newState; - unsigned char symbol; - unsigned char nbBits; -} FSE_decode_t; /* size == U32 */ - -MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) -{ - FSE_DTableHeader DTableH; - memcpy(&DTableH, dt, sizeof(DTableH)); - DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog); - BIT_reloadDStream(bitD); - DStatePtr->table = dt + 1; -} - -MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) -{ - const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; - const U32 nbBits = DInfo.nbBits; - BYTE symbol = DInfo.symbol; - size_t lowBits = BIT_readBits(bitD, nbBits); - - DStatePtr->state = DInfo.newState + lowBits; - return symbol; -} - -MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) -{ - const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; - const U32 nbBits = DInfo.nbBits; - BYTE symbol = DInfo.symbol; - size_t lowBits = BIT_readBitsFast(bitD, nbBits); - - DStatePtr->state = DInfo.newState + lowBits; - return symbol; -} - -MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) -{ - return DStatePtr->state == 0; -} - - -#if defined (__cplusplus) -} -#endif -/* ****************************************************************** - Huff0 : Huffman coder, part of New Generation Entropy library - header file for static linking (only) - Copyright (C) 2013-2015, Yann Collet - - BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -****************************************************************** */ - -#if defined (__cplusplus) -extern "C" { -#endif - -/****************************************** -* Static allocation macros -******************************************/ -/* Huff0 buffer bounds */ -#define HUF_CTABLEBOUND 129 -#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ -#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ - -/* static allocation of Huff0's DTable */ -#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1< /* size_t */ - - -/* ************************************* -* Version -***************************************/ -#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ -#define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */ -#define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ -#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) - - -/* ************************************* -* Advanced functions -***************************************/ -typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ - -#if defined (__cplusplus) -} -#endif -/* - zstd - standard compression library - Header File for static linking only - Copyright (C) 2014-2015, Yann Collet. - - BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - zstd source repository : https://github.com/Cyan4973/zstd - - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -/* The objects defined into this file should be considered experimental. - * They are not labelled stable, as their prototype may change in the future. - * You can use them for tests, provide feedback, or if you can endure risk of future changes. - */ - -#if defined (__cplusplus) -extern "C" { -#endif - -/* ************************************* -* Streaming functions -***************************************/ - -typedef struct ZSTDv02_Dctx_s ZSTD_DCtx; - -/* - Use above functions alternatively. - ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). - ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. - Result is the number of bytes regenerated within 'dst'. - It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. -*/ - -/* ************************************* -* Prefix - version detection -***************************************/ -#define ZSTD_magicNumber 0xFD2FB522 /* v0.2 (current)*/ - - -#if defined (__cplusplus) -} -#endif -/* ****************************************************************** - FSE : Finite State Entropy coder - Copyright (C) 2013-2015, Yann Collet. - - BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -****************************************************************** */ - -#ifndef FSE_COMMONDEFS_ONLY - -/**************************************************************** -* Tuning parameters -****************************************************************/ -/* MEMORY_USAGE : -* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) -* Increasing memory usage improves compression ratio -* Reduced memory usage can improve speed, due to cache effect -* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ -#define FSE_MAX_MEMORY_USAGE 14 -#define FSE_DEFAULT_MEMORY_USAGE 13 - -/* FSE_MAX_SYMBOL_VALUE : -* Maximum symbol value authorized. -* Required for proper stack allocation */ -#define FSE_MAX_SYMBOL_VALUE 255 - - -/**************************************************************** -* template functions type & suffix -****************************************************************/ -#define FSE_FUNCTION_TYPE BYTE -#define FSE_FUNCTION_EXTENSION - - -/**************************************************************** -* Byte symbol type -****************************************************************/ -#endif /* !FSE_COMMONDEFS_ONLY */ - - -/**************************************************************** -* Compiler specifics -****************************************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# define FORCE_INLINE static __forceinline -# include /* For Visual 2005 */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ -#else -# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define FORCE_INLINE static inline -# endif -# else -# define FORCE_INLINE static -# endif /* __STDC_VERSION__ */ -#endif - - -/**************************************************************** -* Includes -****************************************************************/ -#include /* malloc, free, qsort */ -#include /* memcpy, memset */ -#include /* printf (debug) */ - -/**************************************************************** -* Constants -*****************************************************************/ -#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) -#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX -#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" -#endif - - -/**************************************************************** -* Error Management -****************************************************************/ -#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ - - -/**************************************************************** -* Complex types -****************************************************************/ -typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; - - -/**************************************************************** -* Templates -****************************************************************/ -/* - designed to be included - for type-specific functions (template emulation in C) - Objective is to write these functions only once, for improved maintenance -*/ - -/* safety checks */ -#ifndef FSE_FUNCTION_EXTENSION -# error "FSE_FUNCTION_EXTENSION must be defined" -#endif -#ifndef FSE_FUNCTION_TYPE -# error "FSE_FUNCTION_TYPE must be defined" -#endif - -/* Function names */ -#define FSE_CAT(X,Y) X##Y -#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) -#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) - - -/* Function templates */ - -#define FSE_DECODE_TYPE FSE_decode_t - -static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } - -static size_t FSE_buildDTable -(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) -{ - void* ptr = dt+1; - FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr; - FSE_DTableHeader DTableH; - const U32 tableSize = 1 << tableLog; - const U32 tableMask = tableSize-1; - const U32 step = FSE_tableStep(tableSize); - U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; - U32 position = 0; - U32 highThreshold = tableSize-1; - const S16 largeLimit= (S16)(1 << (tableLog-1)); - U32 noLarge = 1; - U32 s; - - /* Sanity Checks */ - if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); - if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); - - /* Init, lay down lowprob symbols */ - DTableH.tableLog = (U16)tableLog; - for (s=0; s<=maxSymbolValue; s++) - { - if (normalizedCounter[s]==-1) - { - tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; - symbolNext[s] = 1; - } - else - { - if (normalizedCounter[s] >= largeLimit) noLarge=0; - symbolNext[s] = normalizedCounter[s]; - } - } - - /* Spread symbols */ - for (s=0; s<=maxSymbolValue; s++) - { - int i; - for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ - } - } - - if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ - - /* Build Decoding table */ - { - U32 i; - for (i=0; i FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); - bitStream >>= 4; - bitCount = 4; - *tableLogPtr = nbBits; - remaining = (1<1) && (charnum<=*maxSVPtr)) - { - if (previous0) - { - unsigned n0 = charnum; - while ((bitStream & 0xFFFF) == 0xFFFF) - { - n0+=24; - if (ip < iend-5) - { - ip+=2; - bitStream = MEM_readLE32(ip) >> bitCount; - } - else - { - bitStream >>= 16; - bitCount+=16; - } - } - while ((bitStream & 3) == 3) - { - n0+=3; - bitStream>>=2; - bitCount+=2; - } - n0 += bitStream & 3; - bitCount += 2; - if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); - while (charnum < n0) normalizedCounter[charnum++] = 0; - if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) - { - ip += bitCount>>3; - bitCount &= 7; - bitStream = MEM_readLE32(ip) >> bitCount; - } - else - bitStream >>= 2; - } - { - const short max = (short)((2*threshold-1)-remaining); - short count; - - if ((bitStream & (threshold-1)) < (U32)max) - { - count = (short)(bitStream & (threshold-1)); - bitCount += nbBits-1; - } - else - { - count = (short)(bitStream & (2*threshold-1)); - if (count >= threshold) count -= max; - bitCount += nbBits; - } - - count--; /* extra accuracy */ - remaining -= FSE_abs(count); - normalizedCounter[charnum++] = count; - previous0 = !count; - while (remaining < threshold) - { - nbBits--; - threshold >>= 1; - } - - { - if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) - { - ip += bitCount>>3; - bitCount &= 7; - } - else - { - bitCount -= (int)(8 * (iend - 4 - ip)); - ip = iend - 4; - } - bitStream = MEM_readLE32(ip) >> (bitCount & 31); - } - } - } - if (remaining != 1) return ERROR(GENERIC); - *maxSVPtr = charnum-1; - - ip += (bitCount+7)>>3; - if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); - return ip-istart; -} - - -/********************************************************* -* Decompression (Byte symbols) -*********************************************************/ -static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ - - DTableH->tableLog = 0; - DTableH->fastMode = 0; - - cell->newState = 0; - cell->symbol = symbolValue; - cell->nbBits = 0; - - return 0; -} - - -static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSymbolValue = tableMask; - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return ERROR(GENERIC); /* min size */ - - /* Build Decoding Table */ - DTableH->tableLog = (U16)nbBits; - DTableH->fastMode = 1; - for (s=0; s<=maxSymbolValue; s++) - { - dinfo[s].newState = 0; - dinfo[s].symbol = (BYTE)s; - dinfo[s].nbBits = (BYTE)nbBits; - } - - return 0; -} - -FORCE_INLINE size_t FSE_decompress_usingDTable_generic( - void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const FSE_DTable* dt, const unsigned fast) -{ - BYTE* const ostart = (BYTE*) dst; - BYTE* op = ostart; - BYTE* const omax = op + maxDstSize; - BYTE* const olimit = omax-3; - - BIT_DStream_t bitD; - FSE_DState_t state1; - FSE_DState_t state2; - size_t errorCode; - - /* Init */ - errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ - if (FSE_isError(errorCode)) return errorCode; - - FSE_initDState(&state1, &bitD, dt); - FSE_initDState(&state2, &bitD, dt); - -#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) - - /* 4 symbols per loop */ - for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ - BIT_reloadDStream(&bitD); - - op[1] = FSE_GETSYMBOL(&state2); - - if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ - { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } - - op[2] = FSE_GETSYMBOL(&state1); - - if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ - BIT_reloadDStream(&bitD); - - op[3] = FSE_GETSYMBOL(&state2); - } - - /* tail */ - /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ - while (1) - { - if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) - break; - - *op++ = FSE_GETSYMBOL(&state1); - - if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) - break; - - *op++ = FSE_GETSYMBOL(&state2); - } - - /* end ? */ - if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) - return op-ostart; - - if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */ - - return ERROR(corruption_detected); -} - - -static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, - const void* cSrc, size_t cSrcSize, - const FSE_DTable* dt) -{ - FSE_DTableHeader DTableH; - memcpy(&DTableH, dt, sizeof(DTableH)); - - /* select fast mode (static) */ - if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); - return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); -} - - -static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) -{ - const BYTE* const istart = (const BYTE*)cSrc; - const BYTE* ip = istart; - short counting[FSE_MAX_SYMBOL_VALUE+1]; - DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ - unsigned tableLog; - unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; - size_t errorCode; - - if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ - - /* normal FSE decoding mode */ - errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); - if (FSE_isError(errorCode)) return errorCode; - if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ - ip += errorCode; - cSrcSize -= errorCode; - - errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); - if (FSE_isError(errorCode)) return errorCode; - - /* always return, even if it is an error code */ - return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); -} - - - -#endif /* FSE_COMMONDEFS_ONLY */ -/* ****************************************************************** - Huff0 : Huffman coder, part of New Generation Entropy library - Copyright (C) 2013-2015, Yann Collet. - - BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -****************************************************************** */ - -/**************************************************************** -* Compiler specifics -****************************************************************/ -#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -/* inline is defined */ -#elif defined(_MSC_VER) -# define inline __inline -#else -# define inline /* disable inline */ -#endif - - -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#endif - - -/**************************************************************** -* Includes -****************************************************************/ -#include /* malloc, free, qsort */ -#include /* memcpy, memset */ -#include /* printf (debug) */ - -/**************************************************************** -* Error Management -****************************************************************/ -#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ - - -/****************************************** -* Helper functions -******************************************/ -static unsigned HUF_isError(size_t code) { return ERR_isError(code); } - -#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ -#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ -#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */ -#define HUF_MAX_SYMBOL_VALUE 255 -#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) -# error "HUF_MAX_TABLELOG is too large !" -#endif - - - -/********************************************************* -* Huff0 : Huffman block decompression -*********************************************************/ -typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ - -typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ - -typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; - -/*! HUF_readStats - Read compact Huffman tree, saved by HUF_writeCTable - @huffWeight : destination buffer - @return : size read from `src` -*/ -static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, - U32* nbSymbolsPtr, U32* tableLogPtr, - const void* src, size_t srcSize) -{ - U32 weightTotal; - U32 tableLog; - const BYTE* ip = (const BYTE*) src; - size_t iSize; - size_t oSize; - U32 n; - - if (!srcSize) return ERROR(srcSize_wrong); - iSize = ip[0]; - //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ - - if (iSize >= 128) /* special header */ - { - if (iSize >= (242)) /* RLE */ - { - static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; - oSize = l[iSize-242]; - memset(huffWeight, 1, hwSize); - iSize = 0; - } - else /* Incompressible */ - { - oSize = iSize - 127; - iSize = ((oSize+1)/2); - if (iSize+1 > srcSize) return ERROR(srcSize_wrong); - if (oSize >= hwSize) return ERROR(corruption_detected); - ip += 1; - for (n=0; n> 4; - huffWeight[n+1] = ip[n/2] & 15; - } - } - } - else /* header compressed with FSE (normal case) */ - { - if (iSize+1 > srcSize) return ERROR(srcSize_wrong); - oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ - if (FSE_isError(oSize)) return oSize; - } - - /* collect weight stats */ - memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); - weightTotal = 0; - for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); - rankStats[huffWeight[n]]++; - weightTotal += (1 << huffWeight[n]) >> 1; - } - if (weightTotal == 0) return ERROR(corruption_detected); - - /* get last non-null symbol weight (implied, total must be 2^n) */ - tableLog = BIT_highbit32(weightTotal) + 1; - if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); - { - U32 total = 1 << tableLog; - U32 rest = total - weightTotal; - U32 verif = 1 << BIT_highbit32(rest); - U32 lastWeight = BIT_highbit32(rest) + 1; - if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ - huffWeight[oSize] = (BYTE)lastWeight; - rankStats[lastWeight]++; - } - - /* check tree construction validity */ - if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ - - /* results */ - *nbSymbolsPtr = (U32)(oSize+1); - *tableLogPtr = tableLog; - return iSize+1; -} - - -/**************************/ -/* single-symbol decoding */ -/**************************/ - -static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize) -{ - BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; - U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ - U32 tableLog = 0; - const BYTE* ip = (const BYTE*) src; - size_t iSize = ip[0]; - U32 nbSymbols = 0; - U32 n; - U32 nextRankStart; - void* ptr = DTable+1; - HUF_DEltX2* const dt = (HUF_DEltX2*)ptr; - - HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ - //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ - - iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); - if (HUF_isError(iSize)) return iSize; - - /* check result */ - if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ - DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */ - - /* Prepare ranks */ - nextRankStart = 0; - for (n=1; n<=tableLog; n++) - { - U32 current = nextRankStart; - nextRankStart += (rankVal[n] << (n-1)); - rankVal[n] = current; - } - - /* fill DTable */ - for (n=0; n> 1; - U32 i; - HUF_DEltX2 D; - D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); - for (i = rankVal[w]; i < rankVal[w] + length; i++) - dt[i] = D; - rankVal[w] += length; - } - - return iSize; -} - -static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) -{ - const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ - const BYTE c = dt[val].byte; - BIT_skipBits(Dstream, dt[val].nbBits); - return c; -} - -#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ - *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) - -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ - if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ - HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) - -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ - if (MEM_64bits()) \ - HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) - -static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) -{ - BYTE* const pStart = p; - - /* up to 4 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) - { - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_1(p, bitDPtr); - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - } - - /* closer to the end */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - - /* no more data to retrieve from bitstream, hence no need to reload */ - while (p < pEnd) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - - return pEnd-pStart; -} - - -static size_t HUF_decompress4X2_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const U16* DTable) -{ - if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ - - { - const BYTE* const istart = (const BYTE*) cSrc; - BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ostart + dstSize; - - const void* ptr = DTable; - const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1; - const U32 dtLog = DTable[0]; - size_t errorCode; - - /* Init */ - BIT_DStream_t bitD1; - BIT_DStream_t bitD2; - BIT_DStream_t bitD3; - BIT_DStream_t bitD4; - const size_t length1 = MEM_readLE16(istart); - const size_t length2 = MEM_readLE16(istart+2); - const size_t length3 = MEM_readLE16(istart+4); - size_t length4; - const BYTE* const istart1 = istart + 6; /* jumpTable */ - const BYTE* const istart2 = istart1 + length1; - const BYTE* const istart3 = istart2 + length2; - const BYTE* const istart4 = istart3 + length3; - const size_t segmentSize = (dstSize+3) / 4; - BYTE* const opStart2 = ostart + segmentSize; - BYTE* const opStart3 = opStart2 + segmentSize; - BYTE* const opStart4 = opStart3 + segmentSize; - BYTE* op1 = ostart; - BYTE* op2 = opStart2; - BYTE* op3 = opStart3; - BYTE* op4 = opStart4; - U32 endSignal; - - length4 = cSrcSize - (length1 + length2 + length3 + 6); - if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ - errorCode = BIT_initDStream(&bitD1, istart1, length1); - if (HUF_isError(errorCode)) return errorCode; - errorCode = BIT_initDStream(&bitD2, istart2, length2); - if (HUF_isError(errorCode)) return errorCode; - errorCode = BIT_initDStream(&bitD3, istart3, length3); - if (HUF_isError(errorCode)) return errorCode; - errorCode = BIT_initDStream(&bitD4, istart4, length4); - if (HUF_isError(errorCode)) return errorCode; - - /* 16-32 symbols per loop (4-8 symbols per stream) */ - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) - { - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - } - - /* check corruption */ - if (op1 > opStart2) return ERROR(corruption_detected); - if (op2 > opStart3) return ERROR(corruption_detected); - if (op3 > opStart4) return ERROR(corruption_detected); - /* note : op4 supposed already verified within main loop */ - - /* finish bitStreams one by one */ - HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); - - /* check */ - endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); - if (!endSignal) return ERROR(corruption_detected); - - /* decoded size */ - return dstSize; - } -} - - -static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG); - const BYTE* ip = (const BYTE*) cSrc; - size_t errorCode; - - errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize); - if (HUF_isError(errorCode)) return errorCode; - if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); - ip += errorCode; - cSrcSize -= errorCode; - - return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); -} - - -/***************************/ -/* double-symbols decoding */ -/***************************/ - -static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, - const U32* rankValOrigin, const int minWeight, - const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, - U32 nbBitsBaseline, U16 baseSeq) -{ - HUF_DEltX4 DElt; - U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; - U32 s; - - /* get pre-calculated rankVal */ - memcpy(rankVal, rankValOrigin, sizeof(rankVal)); - - /* fill skipped values */ - if (minWeight>1) - { - U32 i, skipSize = rankVal[minWeight]; - MEM_writeLE16(&(DElt.sequence), baseSeq); - DElt.nbBits = (BYTE)(consumed); - DElt.length = 1; - for (i = 0; i < skipSize; i++) - DTable[i] = DElt; - } - - /* fill DTable */ - for (s=0; s= 1 */ - - rankVal[weight] += length; - } -} - -typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1]; - -static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, - const sortedSymbol_t* sortedList, const U32 sortedListSize, - const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, - const U32 nbBitsBaseline) -{ - U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; - const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ - const U32 minBits = nbBitsBaseline - maxWeight; - U32 s; - - memcpy(rankVal, rankValOrigin, sizeof(rankVal)); - - /* fill DTable */ - for (s=0; s= minBits) /* enough room for a second symbol */ - { - U32 sortedRank; - int minWeight = nbBits + scaleLog; - if (minWeight < 1) minWeight = 1; - sortedRank = rankStart[minWeight]; - HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, - rankValOrigin[nbBits], minWeight, - sortedList+sortedRank, sortedListSize-sortedRank, - nbBitsBaseline, symbol); - } - else - { - U32 i; - const U32 end = start + length; - HUF_DEltX4 DElt; - - MEM_writeLE16(&(DElt.sequence), symbol); - DElt.nbBits = (BYTE)(nbBits); - DElt.length = 1; - for (i = start; i < end; i++) - DTable[i] = DElt; - } - rankVal[weight] += length; - } -} - -static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize) -{ - BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1]; - sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1]; - U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; - U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; - U32* const rankStart = rankStart0+1; - rankVal_t rankVal; - U32 tableLog, maxW, sizeOfSort, nbSymbols; - const U32 memLog = DTable[0]; - const BYTE* ip = (const BYTE*) src; - size_t iSize = ip[0]; - void* ptr = DTable; - HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1; - - HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ - if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); - //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ - - iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); - if (HUF_isError(iSize)) return iSize; - - /* check result */ - if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ - - /* find maxWeight */ - for (maxW = tableLog; rankStats[maxW]==0; maxW--) - {if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */ - - /* Get start index of each weight */ - { - U32 w, nextRankStart = 0; - for (w=1; w<=maxW; w++) - { - U32 current = nextRankStart; - nextRankStart += rankStats[w]; - rankStart[w] = current; - } - rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ - sizeOfSort = nextRankStart; - } - - /* sort symbols by weight */ - { - U32 s; - for (s=0; s> consumed; - } - } - } - - HUF_fillDTableX4(dt, memLog, - sortedSymbol, sizeOfSort, - rankStart0, rankVal, maxW, - tableLog+1); - - return iSize; -} - - -static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) -{ - const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - memcpy(op, dt+val, 2); - BIT_skipBits(DStream, dt[val].nbBits); - return dt[val].length; -} - -static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) -{ - const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - memcpy(op, dt+val, 1); - if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); - else - { - if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) - { - BIT_skipBits(DStream, dt[val].nbBits); - if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) - DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ - } - } - return 1; -} - - -#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ - ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) - -#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ - if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ - ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) - -#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ - if (MEM_64bits()) \ - ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) - -static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) -{ - BYTE* const pStart = p; - - /* up to 8 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7)) - { - HUF_DECODE_SYMBOLX4_2(p, bitDPtr); - HUF_DECODE_SYMBOLX4_1(p, bitDPtr); - HUF_DECODE_SYMBOLX4_2(p, bitDPtr); - HUF_DECODE_SYMBOLX4_0(p, bitDPtr); - } - - /* closer to the end */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2)) - HUF_DECODE_SYMBOLX4_0(p, bitDPtr); - - while (p <= pEnd-2) - HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ - - if (p < pEnd) - p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); - - return p-pStart; -} - - - -static size_t HUF_decompress4X4_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const U32* DTable) -{ - if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ - - { - const BYTE* const istart = (const BYTE*) cSrc; - BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ostart + dstSize; - - const void* ptr = DTable; - const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1; - const U32 dtLog = DTable[0]; - size_t errorCode; - - /* Init */ - BIT_DStream_t bitD1; - BIT_DStream_t bitD2; - BIT_DStream_t bitD3; - BIT_DStream_t bitD4; - const size_t length1 = MEM_readLE16(istart); - const size_t length2 = MEM_readLE16(istart+2); - const size_t length3 = MEM_readLE16(istart+4); - size_t length4; - const BYTE* const istart1 = istart + 6; /* jumpTable */ - const BYTE* const istart2 = istart1 + length1; - const BYTE* const istart3 = istart2 + length2; - const BYTE* const istart4 = istart3 + length3; - const size_t segmentSize = (dstSize+3) / 4; - BYTE* const opStart2 = ostart + segmentSize; - BYTE* const opStart3 = opStart2 + segmentSize; - BYTE* const opStart4 = opStart3 + segmentSize; - BYTE* op1 = ostart; - BYTE* op2 = opStart2; - BYTE* op3 = opStart3; - BYTE* op4 = opStart4; - U32 endSignal; - - length4 = cSrcSize - (length1 + length2 + length3 + 6); - if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ - errorCode = BIT_initDStream(&bitD1, istart1, length1); - if (HUF_isError(errorCode)) return errorCode; - errorCode = BIT_initDStream(&bitD2, istart2, length2); - if (HUF_isError(errorCode)) return errorCode; - errorCode = BIT_initDStream(&bitD3, istart3, length3); - if (HUF_isError(errorCode)) return errorCode; - errorCode = BIT_initDStream(&bitD4, istart4, length4); - if (HUF_isError(errorCode)) return errorCode; - - /* 16-32 symbols per loop (4-8 symbols per stream) */ - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) - { - HUF_DECODE_SYMBOLX4_2(op1, &bitD1); - HUF_DECODE_SYMBOLX4_2(op2, &bitD2); - HUF_DECODE_SYMBOLX4_2(op3, &bitD3); - HUF_DECODE_SYMBOLX4_2(op4, &bitD4); - HUF_DECODE_SYMBOLX4_1(op1, &bitD1); - HUF_DECODE_SYMBOLX4_1(op2, &bitD2); - HUF_DECODE_SYMBOLX4_1(op3, &bitD3); - HUF_DECODE_SYMBOLX4_1(op4, &bitD4); - HUF_DECODE_SYMBOLX4_2(op1, &bitD1); - HUF_DECODE_SYMBOLX4_2(op2, &bitD2); - HUF_DECODE_SYMBOLX4_2(op3, &bitD3); - HUF_DECODE_SYMBOLX4_2(op4, &bitD4); - HUF_DECODE_SYMBOLX4_0(op1, &bitD1); - HUF_DECODE_SYMBOLX4_0(op2, &bitD2); - HUF_DECODE_SYMBOLX4_0(op3, &bitD3); - HUF_DECODE_SYMBOLX4_0(op4, &bitD4); - - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - } - - /* check corruption */ - if (op1 > opStart2) return ERROR(corruption_detected); - if (op2 > opStart3) return ERROR(corruption_detected); - if (op3 > opStart4) return ERROR(corruption_detected); - /* note : op4 supposed already verified within main loop */ - - /* finish bitStreams one by one */ - HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); - - /* check */ - endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); - if (!endSignal) return ERROR(corruption_detected); - - /* decoded size */ - return dstSize; - } -} - - -static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG); - const BYTE* ip = (const BYTE*) cSrc; - - size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; - cSrcSize -= hSize; - - return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); -} - - -/**********************************/ -/* quad-symbol decoding */ -/**********************************/ -typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6; -typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6; - -/* recursive, up to level 3; may benefit from