Repository: cctv18/oppo_oplus_realme_sm8650
Branch: main
Commit: 576e6d98e7eb
Files: 30
Total size: 2.3 MB
Directory structure:
gitextract_pmjp290u/
├── .github/
│ └── workflows/
│ ├── build-test.yml
│ ├── clean_workflow.yml
│ ├── cleaner.yml
│ ├── fastbuild_6.1.115.yml
│ ├── fastbuild_6.1.118.yml
│ ├── fastbuild_6.1.128.yml
│ ├── fastbuild_6.1.134.yml
│ ├── fastbuild_6.1.141.yml
│ ├── fastbuild_6.1.57.yml
│ └── fastbuild_6.1.75.yml
├── .old_yml/
│ ├── build_6.1.118.yml
│ ├── build_6.1.57.yml
│ └── build_6.1.75.yml
├── README.md
├── lib/
│ ├── ccache-arm64
│ └── ccache-x86-64
├── local/
│ ├── builder_6.1.115.sh
│ ├── builder_6.1.118.sh
│ ├── builder_6.1.128.sh
│ ├── builder_6.1.134.sh
│ ├── builder_6.1.141.sh
│ ├── builder_6.1.57.sh
│ └── builder_6.1.75.sh
├── other_patch/
│ ├── 69_hide_stuff.patch
│ ├── apk_sign.patch
│ └── config.patch
└── zram_patch/
├── 001-lz4-old.patch
├── 001-lz4.patch
├── 002-zstd.patch
└── lz4armv8.S
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/build-test.yml
================================================
name: 发布测试
env:
TZ: Asia/Shanghai
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
SUB_VERSION: '118'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
CCACHE_KEY: ccache-neov4-6.1.118
on:
workflow_dispatch:
inputs:
ksu_type:
description: 'KernelSU分支(ReSukiSU/SukiSU Ultra/KernelSU Next/KSU(原版)/无内置KSU,默认ReSukiSU)'
required: true
type: choice
default: 'resukisu'
options:
- 'resukisu'
- 'sukisu'
- 'ksunext'
- 'ksu'
- 'none'
susfs_enable:
description: '是否开启susfs(用于增强隐藏环境挂载功能; 可能轻微增加耗电,上游更新导致不稳定时或不需要可关闭)'
required: true
type: boolean
default: 'true'
kpm_enable:
description: '是否开启kpm(builtin-使用(re)sukisu内置kpm, kpn-使用独立kpm实现(支持任意KSU/面具环境); 不需要可保持默认关闭)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'builtin'
- 'kpn'
lz4_enable:
description: '是否安装 lz4 1.10.0+zstd 1.5.7 补丁及 LZ4KD 补丁'
required: true
type: boolean
default: 'true'
lz4kd_enable:
description: '是否安装 LZ4KD 补丁(若已开启lz4+zstd补丁则可不开启)'
required: true
type: boolean
default: 'false'
bbr_enable:
description: '是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能拓展配置(用于为ipset及需要iptables等高级网络功能内核支持的程序提供支持)'
required: true
type: boolean
default: 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器支持(提升IO读写性能; 在一加12上可能导致bug)'
required: true
type: boolean
default: 'true'
rekernel_enable:
description: '是否启用Re-Kernel支持(与Freezer/NoActive等软件配合, 提升应用冻结能力)'
required: true
type: boolean
default: 'false'
baseband_guard:
description: '是否开启内核级基带保护(阻止一切对非用户分区的写入,有效防止格机)'
required: true
type: boolean
default: 'true'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
ak3name: ${{ steps.create_zip.outputs.ak3name }}
steps:
- name: 添加KernelSU版本号
id: ksu_version
run: |
KSU_VERSION="114514"
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
#export KSUVER=$KSU_VERSION
- name: 克隆 AnyKernel3 并打包
id: create_zip
run: |
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KSUNext"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KSU"
else
KSU_TYPENAME="none"
fi
#echo "KSU_TYPENAME=$KSU_TYPENAME" >> $GITHUB_ENV
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ github.event.inputs.kernel_suffix }}.zip
else
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ env.KERNEL_NAME }}.zip
fi
truncate -s 30M testfile
zip -r ./$AK3_NAME ./testfile
echo "ak3name=$AK3_NAME" >> $GITHUB_OUTPUT
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v7
with:
path: ${{ github.workspace }}/AnyKernel*.zip
archive: false
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v8
with:
name: ${{ needs.build.outputs.ak3name }}
path: ./release_zips
skip-decompress: true
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO-OPlus-Realme-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU Ultra"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KernelSU Next"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KernelSU (Official)"
else
KSU_TYPENAME="无内置KSU"
fi
echo "KSU_TYPENAME=$KSU_TYPENAME" >> $GITHUB_ENV
- name: 创建发布
id: create_release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cat << 'EOF' > release_notes.md
### 📱 欧加真 ${{ env.KSU_TYPENAME }} SM8650 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3通用 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 内核(基于一加12 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} Android 15 版官方OKI源码)
- KSU分支:${{ env.KSU_TYPENAME }}
- susfs支持:${{ github.event.inputs.susfs_enable }}
- KPM支持 :${{ github.event.inputs.kpm_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- LZ4KD支持:${{ github.event.inputs.lz4kd_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- Re-Kernel支持:${{ github.event.inputs.rekernel_enable }}
- 内核级基带保护支持:${{ github.event.inputs.baseband_guard }}
- ReSukiSU管理器下载:[ReSukiSU_CI](https://github.com/cctv18/ReSukiSU_CI/releases)
- SukiSU Ultra管理器下载:[SukiSU-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra/releases)
- KernelSU Next管理器下载:[KernelSU-Next](https://github.com/KernelSU-Next/KernelSU-Next/releases)
- KSU原版管理器下载:[KernelSU](https://github.com/tiann/KernelSU/releases)
### ⏫️ 更新内容:
- 更新${{ env.KSU_TYPENAME }}至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备;
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启;
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启;
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块;
5. 由于KernelSU上游更新了元模块功能,最新版KSU管理器(包括除KernelSU Next以外的各分支)需要配合元模块(metamodule)才能正常挂载模块。目前的元模块包括[meta overlayfs](https://github.com/KernelSU-Modules-Repo/meta-overlayfs), [mountify](https://github.com/backslashxx/mountify), [meta magicmount](https://github.com/7a72/meta-magic_mount/), [meta magicmount rs](https://github.com/Tools-cx-app/meta-magic_mount/), [hybrid mount](https://github.com/YuzakiKokuban/meta-hybrid_mount)等。若你是第一次使用KSU或刚从旧版KSU管理器升级至新版,请先安装一个元模块,这样其他涉及系统挂载的模块才能正常运行;
6. KernelPatch Next(即KPN)是一个独立于KSU的KPM实现,可以运行在任意KSU/面具环境中(不适用于Apatch),且不能与(Re)SukiSU内置的kpm功能共同使用,使用前请保证你的内核没有内置的kpm实现/修补。
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
EOF
gh release create "${{ env.TAG_HEAD }}-${{ env.TIME }}" \
--repo "${{ github.repository }}" \
--title "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}" \
--notes-file release_notes.md \
release_zips/AnyKernel3_*.zip
================================================
FILE: .github/workflows/clean_workflow.yml
================================================
name: 清理仓库工作流
on:
workflow_dispatch:
inputs:
clean_mode:
description: "清理模式(amount--保留数量, date--保留天数):"
required: true
type: choice
default: 'amount'
options:
- 'amount'
- 'date'
clean_to_keep:
description: "保留最近工作流的天数/数量:"
required: true
type: string
default: "0"
jobs:
clean-workflows:
runs-on: ubuntu-latest
permissions:
actions: write
contents: read
steps:
- name: 检出当前仓库
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: 安装GitHub CLI
run: |
sudo apt-mark hold firefox
sudo apt-mark hold libc-bin
sudo apt purge man-db
sudo apt update
sudo apt install -y jq
type -p gh >/dev/null || sudo apt install -y gh
gh auth login --with-token <<< "${{ secrets.GITHUB_TOKEN }}"
gh --version
- name: 按保留数量清理工作流
if: ${{ github.event.inputs.clean_mode == 'amount' }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
repo_full_name=$GITHUB_REPOSITORY
# 获取所有工作流(按创建时间倒序)
all_workflows=$(gh api -X GET "repos/$repo_full_name/actions/runs?per_page=100" |
jq --arg current_run "$GITHUB_RUN_ID" '.workflow_runs | map(select(.id != ($current_run | tonumber)))')
# 计算实际需要保留的工作流数量
total_workflows=$(echo "$all_workflows" | jq length)
keep_count=$(( ${{ github.event.inputs.clean_to_keep }} < total_workflows ? ${{ github.event.inputs.clean_to_keep }} : total_workflows ))
# 获取要保留的最新工作流ID
keep_ids=$(echo "$all_workflows" |
jq -r 'sort_by(.created_at) | reverse | .[0:'$keep_count'] | .[].id')
echo "保留最近 $keep_count 个工作流 (ID: $keep_ids)"
# 删除不在保留列表中的工作流
echo "$all_workflows" | jq -r '.[] | select(.status != "in_progress" and .status != "queued") | .id' | while read id; do
if ! grep -qw "$id" <<< "$keep_ids"; then
echo "删除工作流 $id"
gh api -X DELETE "repos/$repo_full_name/actions/runs/$id" --silent
fi
done
- name: 按保留天数清理工作流
if: ${{ github.event.inputs.clean_mode == 'date'}}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
repo_full_name=$GITHUB_REPOSITORY
cutoff_date=$(date -d "${{ github.event.inputs.clean_to_keep }} days ago" -u +"%Y-%m-%dT%H:%M:%SZ")
echo "删除 $cutoff_date 之前的所有工作流..."
# 获取需要删除的工作流ID
to_delete_ids=$(gh api -X GET "repos/$repo_full_name/actions/runs?created=<$cutoff_date&per_page=100" |
jq -r --arg current_run "$GITHUB_RUN_ID" '.workflow_runs[] |
select(.id != ($current_run | tonumber) and .status != "in_progress" and .status != "queued") | .id')
for id in $to_delete_ids; do
echo "删除工作流 $id (创建于 $cutoff_date 之前)"
gh api -X DELETE "repos/$repo_full_name/actions/runs/$id" --silent
done
================================================
FILE: .github/workflows/cleaner.yml
================================================
name: 清理全部ccache缓存
on:
workflow_dispatch:
inputs:
confirm:
description: "输入DELETE确认清理所有缓存"
required: true
default: ""
jobs:
clean-caches:
runs-on: ubuntu-latest
permissions:
actions: write
steps:
- name: 验证确认输入
if: ${{ github.event.inputs.confirm != 'DELETE' }}
run: |
echo "::error::必须输入DELETE确认清理操作!"
exit 1
- name: 获取并删除所有缓存
uses: actions/github-script@v6
with:
script: |
const { owner, repo } = context.repo;
let totalDeleted = 0;
// 获取所有缓存
const caches = await github.rest.actions.getActionsCacheList({
owner,
repo,
per_page: 100
});
// 删除匹配的缓存
for (const cache of caches.data.actions_caches) {
if (cache.key.startsWith('ccache-')) {
console.log(`删除缓存: ${cache.key}`);
await github.rest.actions.deleteActionsCacheById({
owner,
repo,
cache_id: cache.id
});
totalDeleted++;
}
}
return `成功删除 ${totalDeleted} 个缓存`;
- name: 重置环境
run: |
rm -rf $HOME/.ccache/*
rm -rf $HOME/.ccache_6.1.57/*
rm -rf $HOME/.ccache_6.1.75/*
rm -rf $HOME/.ccache_6.1.118/*
sudo apt clean
sudo journalctl --vacuum-time=1s
sudo rm -rf /var/log/*
docker system prune -af || true
sudo rm -rf /tmp/*
echo "容器环境已重置!"
echo "清理后空间:"
df -h
================================================
FILE: .github/workflows/fastbuild_6.1.115.yml
================================================
name: 6.1.115 (天玑特供)欧加真OKI内核快速构建
env:
TZ: Asia/Shanghai
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
SUB_VERSION: '115'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
CCACHE_KEY: ccache-ecsv2-6.1.115
on:
workflow_dispatch:
inputs:
ksu_type:
description: 'KernelSU分支(ReSukiSU/SukiSU Ultra/KernelSU Next/KSU(原版)/无内置KSU,默认ReSukiSU)'
required: true
type: choice
default: 'resukisu'
options:
- 'resukisu'
- 'sukisu'
- 'ksunext'
- 'ksu'
- 'none'
susfs_enable:
description: '是否开启susfs(用于增强隐藏环境挂载功能; 可能轻微增加耗电,上游更新导致不稳定时或不需要可关闭)'
required: true
type: boolean
default: 'true'
kpm_enable:
description: '是否开启kpm(builtin-使用(re)sukisu内置kpm, kpn-使用独立kpm实现(支持任意KSU/面具环境); 不需要可保持默认关闭)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'builtin'
- 'kpn'
lz4_enable:
description: '是否安装 lz4 1.10.0+zstd 1.5.7 补丁'
required: true
type: boolean
default: 'true'
lz4kd_enable:
description: '是否安装 LZ4KD 补丁(若已开启lz4+zstd补丁则可不开启)'
required: true
type: boolean
default: 'false'
bbr_enable:
description: '是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能拓展配置(用于为ipset及需要iptables等高级网络功能内核支持的程序提供支持,天玑机型可能导致bug,建议关闭)'
required: true
type: boolean
default: 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器支持(提升IO读写性能; 在一加12上可能导致bug)'
required: true
type: boolean
default: 'true'
rekernel_enable:
description: '是否启用Re-Kernel支持(与Freezer/NoActive等软件配合, 提升应用冻结能力)'
required: true
type: boolean
default: 'false'
baseband_guard:
description: '是否开启内核级基带保护(阻止一切对非用户分区的写入,有效防止格机)'
required: true
type: boolean
default: 'true'
ccache_update:
description: '更新ccache缓存(将本次编译生成的ccache缓存覆盖至仓库缓存,在更改编译配置、源码或需要刷新缓存时开启)'
required: true
type: boolean
default: 'false'
ccache_debug:
description: '是否上传 Ccache调试日志(用于调试, 无需要不必开启)'
required: true
type: boolean
default: 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
ak3name: ${{ steps.create_zip.outputs.ak3name }}
permissions:
actions: write
contents: read
steps:
- name: 安装环境依赖+初始化源码仓库及llvm-Clang20工具链
run: |
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
echo "当前仓库:$GITHUB_REPOSITORY"
echo "当前分支:$GITHUB_REF_NAME"
sudo apt-mark hold firefox &&
sudo apt-mark hold libc-bin &&
sudo apt purge man-db &&
sudo rm -rf /var/lib/man-db/auto-update &&
sudo apt update &&
sudo apt-get install -y --no-install-recommends binutils python-is-python3 libssl-dev libelf-dev &
#旧版完整指令:(由于经过验证大部分指令已内置于GitHub Action环境中,故进行精简)
#sudo apt-get install -y --no-install-recommends curl bison flex make binutils git perl gcc python3 python-is-python3 bc libssl-dev libelf-dev zip unzip ccache
#使用最新版ccache-ECS(特化优化内核编译缓存,大幅提升二次不同配置编译速度)
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/ccache-x86-64 -O ccache &&
sudo cp -f ./ccache /usr/bin/ccache &&
sudo chmod +x /usr/bin/ccache &&
rm -f ./ccache &
echo "正在克隆源码仓库..."
aria2c -s16 -x16 -k1M https://github.com/cctv18/android_kernel_oneplus_mt6989/archive/refs/heads/oneplus/mt6989_v_15.0.2_ace5_race.zip -o common.zip &&
unzip -q common.zip &&
mv "android_kernel_oneplus_mt6989-oneplus-mt6989_v_15.0.2_ace5_race" common &&
rm -rf common.zip &
echo "正在克隆llvm-Clang20工具链..." &&
mkdir -p clang20 &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/clang-r547379.zip -o clang.zip &&
unzip -q clang.zip -d clang20 &&
rm -rf clang.zip &
echo "正在克隆构建工具..." &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/build-tools.zip -o build-tools.zip &&
unzip -q build-tools.zip &&
rm -rf build-tools.zip &
wait
echo "所有源码及llvm-Clang20工具链初始化完成!"
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 配置ccache目录
run: |
echo "CCACHE_DIR=$HOME/.ccache_${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}" >> $GITHUB_ENV
echo "CCACHE_MAXSIZE=3G" >> $GITHUB_ENV
echo "当前磁盘空间:"
df -h
echo "当前构建内核版本:${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}"
- name: 载入当前版本内核的 ccache缓存
uses: actions/cache@v5
id: ccache-restore
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
restore-keys: |
${{ env.CCACHE_KEY }}-${{ runner.os }}-
${{ env.CCACHE_KEY }}-
- name: 拉取公共预置 ccache 缓存
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "检查本地缓存状态..."
if [ -d "${{ env.CCACHE_DIR }}" ] && [ "$(ls -A ${{ env.CCACHE_DIR }} 2>/dev/null)" ]; then
echo "检测到本地已成功载入 ccache 缓存,跳过公共 ccache 拉取!"
exit 0
fi
echo "未命中缓存,尝试拉取最新公共 ccache ..."
mkdir -p ${{ env.CCACHE_DIR }}
FILE_NAME="ccache-${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}.tar.zst"
if gh release download -p "$FILE_NAME" -R cctv18/public_ccache; then
echo "成功下载 $FILE_NAME,正在解压..."
tar -I zstd -xf "$FILE_NAME" -C ${{ env.CCACHE_DIR }}
echo "公共 ccache 恢复完成!"
else
echo "公共 ccache 中未找到对应的 ccache 文件,将进行全量全新编译..."
fi
- name: 清除旧 ccache 缓存
if: inputs.ccache_update
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "正在清除仓库中的旧 ccache 缓存..."
if gh cache delete ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }} -R ${{ github.repository }}; then
echo "成功删除旧的 ccache 缓存!"
else
echo "旧缓存不存在或已被清理!"
fi
- name: 初始化并配置ccache
run: |
# 设置ccache环境变量
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="${{ env.CCACHE_MAXSIZE }}"
# 确保ccache目录存在
mkdir -p "$CCACHE_DIR"
# 每次运行都重新配置缓存大小
echo "配置ccache缓存大小为: $CCACHE_MAXSIZE"
ccache -M "$CCACHE_MAXSIZE"
ccache -o compression=true
# 显示初始缓存状态
echo "ccache初始状态:"
ccache -s
# 如果缓存恢复命中,显示详细信息
if [ "${{ steps.ccache-restore.outputs.cache-hit }}" == 'true' ]; then
echo "ccache缓存命中详情:"
ccache -sv
fi
- name: 添加KernelSU
id: ksu_version
run: |
# 进入内核工作目录
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
echo "正在配置SukiSU Ultra..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s builtin
cd ./KernelSU
# 获取当前 Git 提交的短哈希 (8位)
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
export KSU_VERSION=$KSU_VERSION
# 尝试最多 3 次获取 KernelSU API 版本号
for i in {1..3}; do
# 从远程 Makefile 中提取 KSU_API_VERSION
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Makefile" |
# 查找第一个包含版本定义的行
grep -m1 "KSU_VERSION_API :=" |
# 提取等号后的值
awk -F'= ' '{print $2}' |
# 删除所有空白字符
tr -d '[:space:]')
# 如果成功获取到版本号则跳出循环,否则等待 1 秒后重试
[ -n "$KSU_API_VERSION" ] && break || sleep 1
done
# 如果获取失败,使用默认版本号 3.1.7
[ -z "$KSU_API_VERSION" ] && KSU_API_VERSION="3.1.7"
# 将 API 版本号存储到 GitHub 环境变量
echo "KSU_API_VERSION=$KSU_API_VERSION" >> $GITHUB_ENV
# 创建版本定义模板&版本格式函数: 使用获取的提交哈希和固定后缀
# KSU_VERSION_API: API 版本定义
# KSU_VERSION_FULL: 完整版本定义
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
# 清理内核 Makefile 中的旧版本定义
# 删除版本函数
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Makefile
# 删除 API 版本定义
sed -i '/KSU_VERSION_API :=/d' kernel/Makefile
# 删除完整版本定义
sed -i '/KSU_VERSION_FULL :=/d' kernel/Makefile
# 在 REPO_OWNER 行后插入新版本定义
awk -v def="$VERSION_DEFINITIONS" '
# 当找到 REPO_OWNER 行时,插入版本定义并设置标记
/REPO_OWNER :=/ {print; print def; inserted=1; next}
# 打印所有行
1
# 如果未找到插入点,在文件末尾追加
END {if (!inserted) print def}
' kernel/Makefile > kernel/Makefile.tmp && mv kernel/Makefile.tmp kernel/Makefile
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 37185 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
# 验证修改结果
grep -A10 "REPO_OWNER" kernel/Makefile # 检查插入点后的内容
grep "KSU_VERSION_FULL" kernel/Makefile # 确认版本定义存在
echo "SukiSU版本号: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
echo "正在配置ReSukiSU..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/refs/heads/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
cd ./KernelSU
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 30700 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
echo "正在配置KernelSU Next..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在配置原版 KernelSU (tiann/KernelSU)..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过KernelSU配置..."
fi
- name: 应用 KernelSU & SUSFS 补丁
if: inputs.susfs_enable
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} != "none" ]]; then
echo "正在添加susfs补丁..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 -N -F 3 < 69_hide_stuff.patch || true
cd ..
else
echo "已选择无内置KernelSU模式,跳过susfs配置..."
fi
if [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在为原版 KernelSU (tiann/KernelSU)添加补丁..."
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
if: inputs.lz4_enable
run: |
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone --depth=1 https://github.com/$GITHUB_REPOSITORY.git -b $GITHUB_REF_NAME $GITHUB_ACTOR
cp ./$GITHUB_ACTOR/zram_patch/001-lz4.patch ./common/
cp ./$GITHUB_ACTOR/zram_patch/lz4armv8.S ./common/lib
cp ./$GITHUB_ACTOR/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
- name: 应用 lz4kd 补丁
if: inputs.lz4kd_enable
run: |
echo "正在添加lz4kd补丁…"
cd kernel_workspace
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cd common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
- name: 添加SUSFS 配置项
if: inputs.susfs_enable
run: |
cd kernel_workspace
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 添加 KSU & 其他配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [[ "${{ github.event.inputs.susfs_enable }}" == "false" ]]; then
echo "CONFIG_KSU_SUSFS=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_TMPFS_POSIX_ACL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用网络功能增强优化配置
if: inputs.better_net
run: |
cd kernel_workspace
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" != "false" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
if: inputs.ssg_enable
run: |
echo "正在启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用Re-Kernel支持
if: inputs.rekernel_enable
run: |
echo "正在启用Re-Kernel支持(用于与Freezer,NoActive等软件配合使用,提升冻结后台能力)…"
cd kernel_workspace
echo "CONFIG_REKERNEL=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用内核级基带保护
if: inputs.baseband_guard
run: |
echo "正在启用启用内核级基带保护支持…"
cd kernel_workspace
echo "CONFIG_BBG=y" >> ./common/arch/arm64/configs/gki_defconfig
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
WORKDIR="$(pwd)"
export PATH="/usr/lib/ccache:$PATH"
export PATH="$WORKDIR/kernel_workspace/clang20/bin:$PATH"
export PATH="$WORKDIR/kernel_workspace/build-tools/bin:$PATH"
CLANG_DIR="$WORKDIR/kernel_workspace/clang20/bin"
CLANG_VERSION="$($CLANG_DIR/clang --version | head -n 1)"
LLD_VERSION="$($CLANG_DIR/ld.lld --version | head -n 1)"
echo "编译器信息:"
echo "Clang版本: $CLANG_VERSION"
echo "LLD版本: $LLD_VERSION"
pahole_version=$(pahole --version 2>/dev/null | head -n1); [ -z "$pahole_version" ] && echo "pahole版本:未安装" || echo "pahole版本:$pahole_version"
export CCACHE_LOGFILE="${{ github.workspace }}/kernel_workspace/ccache.log"
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="3G"
export CCACHE_IS_KERNEL_COMPILING="true"
echo "sloppiness = file_stat_matches,include_file_ctime,include_file_mtime,pch_defines,file_macro,time_macros" >> "$CCACHE_DIR/ccache.conf"
cd kernel_workspace/common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfakestat.so
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfaketimeMT.so
chmod 777 ./*.so
export FAKESTAT="2025-05-25 12:00:00"
export FAKETIME="@2025-05-25 13:00:00"
echo "FAKESTAT=$FAKESTAT" >> $GITHUB_ENV
echo "FAKETIME=$FAKETIME" >> $GITHUB_ENV
SO_DIR=$(pwd)
export PRELOAD_LIBS="$SO_DIR/libfakestat.so $SO_DIR/libfaketimeMT.so"
#创建 CC (编译器) 包装器
echo '#!/bin/bash' > cc-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> cc-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> cc-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> cc-wrapper
echo 'ccache clang "$@"' >> cc-wrapper
#创建 LD (链接器) 包装器
echo '#!/bin/bash' > ld-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> ld-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> ld-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> ld-wrapper
echo 'ld.lld "$@"' >> ld-wrapper
# 测试时间劫持测试是否正常工作
echo "--- [Wrapper Test] 正在创建通用的时间劫持测试脚本 ---"
echo '#!/bin/bash' > test-wrapper.sh
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> test-wrapper.sh
echo 'export FAKESTAT="'$FAKESTAT'"' >> test-wrapper.sh
echo 'export FAKETIME="'$FAKETIME'"' >> test-wrapper.sh
echo 'echo ">>> Wrapper 内部环境检查完毕."' >> test-wrapper.sh
echo 'exec "$@"' >> test-wrapper.sh # 执行所有传入的参数
chmod +x test-wrapper.sh
echo "--- [Wrapper Test] 正在测试 (date) 命令 ---"
./test-wrapper.sh date
echo "--- [Wrapper Test] 正在测试 (stat) 命令 ---"
./test-wrapper.sh stat ./Makefile
echo "--- [Wrapper Test] 测试完毕 ---"
chmod +x cc-wrapper ld-wrapper
echo "--- 编译前环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译前环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
#在构建内核的同时清除不必要的.NET, Android NDK, Haskell, CodeQL运行库,清理空间且不阻塞后续步骤运行
sudo rm -rf /usr/share/dotnet &
sudo rm -rf /usr/local/lib/android &
sudo rm -rf /opt/ghc &
sudo rm -rf /opt/hostedtoolcache/CodeQL &
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="ccache clang" LD="ld.lld" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig &&
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="$(pwd)/cc-wrapper" LD="$(pwd)/ld-wrapper" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error Image
# 编译后时间劫持二次校验
echo "--- 编译后环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译后环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
echo "内核编译完成!"
echo "ccache状态:"
ccache -s
echo "编译后空间:"
df -h
- name: 保存新的 ccache 缓存
if: inputs.ccache_update || steps.ccache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v5
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
- name: 应用KPM并修补内核
run: |
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "正在应用KPM并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
echo "正在应用KP-N并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
fi
- name: 克隆 AnyKernel3 并打包
id: create_zip
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [[ ! -f ./Image ]]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KSUNext"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KSU"
else
KSU_TYPENAME="none"
fi
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
wget https://raw.githubusercontent.com/$GITHUB_REPOSITORY/refs/heads/$GITHUB_REF_NAME/zram.zip
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ github.event.inputs.kernel_suffix }}.zip
else
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ env.KERNEL_NAME }}.zip
fi
zip -r ../$AK3_NAME ./*
echo "ak3name=$AK3_NAME" >> $GITHUB_OUTPUT
#为AK3添加注释(调试信息)
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
fi
TIME_NOW="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "Author: $GITHUB_ACTOR" > ./ak3.log
echo "Repo: $GITHUB_REPOSITORY" >> ./ak3.log
echo "Branch: $GITHUB_REF_NAME" >> ./ak3.log
echo "Run ID: $GITHUB_RUN_ID" >> ./ak3.log
echo "Commit: $GITHUB_SHA" >> ./ak3.log
echo "Time: $TIME_NOW" >> ./ak3.log
echo "Kernel Ver: $FULL_VERSION" >> ./ak3.log
echo "KSU Branch: ${KSU_TYPENAME}" >> ./ak3.log
echo "KSU Ver: ${KSUVER}" >> ./ak3.log
echo "susfs: ${{ github.event.inputs.susfs_enable }}" >> ./ak3.log
echo "KPM: ${{ github.event.inputs.kpm_enable }}" >> ./ak3.log
echo "LZ4: ${{ github.event.inputs.lz4_enable }}" >> ./ak3.log
echo "LZ4KD: ${{ github.event.inputs.lz4kd_enable }}" >> ./ak3.log
echo "IPset: ${{ github.event.inputs.better_net }}" >> ./ak3.log
echo "BBR&Brutal: ${{ github.event.inputs.bbr_enable }}" >> ./ak3.log
echo "SSG: ${{ github.event.inputs.ssg_enable }}" >> ./ak3.log
echo "Re-Kernel: ${{ github.event.inputs.rekernel_enable }}" >> ./ak3.log
echo "BBG: ${{ github.event.inputs.baseband_guard }}" >> ./ak3.log
zip -z ../$AK3_NAME < ./ak3.log
- name: 上传 Ccache 调试日志
if: always() && inputs.ccache_debug
uses: actions/upload-artifact@v7
with:
name: ccache-debug-log
path: ${{ github.workspace }}/kernel_workspace/ccache.log
archive: true
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v7
with:
path: ${{ github.workspace }}/kernel_workspace/AnyKernel*.zip
archive: false
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v8
with:
name: ${{ needs.build.outputs.ak3name }}
path: ./release_zips
skip-decompress: true
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO-OPlus-Realme-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU Ultra"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KernelSU Next"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KernelSU (Official)"
else
KSU_TYPENAME="无内置KSU"
fi
echo "KSU_TYPENAME=$KSU_TYPENAME" >> $GITHUB_ENV
- name: 创建发布
id: create_release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cat << 'EOF' > release_notes.md
### 📱 欧加真 ${{ env.KSU_TYPENAME }} MT6989 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3通用 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 内核(基于一加Ace5竞速版 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 版官方OKI源码)
- KSU分支:${{ env.KSU_TYPENAME }}
- susfs支持:${{ github.event.inputs.susfs_enable }}
- KPM支持 :${{ github.event.inputs.kpm_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- LZ4KD支持:${{ github.event.inputs.lz4kd_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- Re-Kernel支持:${{ github.event.inputs.rekernel_enable }}
- 内核级基带保护支持:${{ github.event.inputs.baseband_guard }}
- ReSukiSU管理器下载:[ReSukiSU_CI](https://github.com/cctv18/ReSukiSU_CI/releases)
- SukiSU Ultra管理器下载:[SukiSU-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra/releases)
- KernelSU Next管理器下载:[KernelSU-Next](https://github.com/KernelSU-Next/KernelSU-Next/releases)
- KSU原版管理器下载:[KernelSU](https://github.com/tiann/KernelSU/releases)
### ⏫️ 更新内容:
- 更新${{ env.KSU_TYPENAME }}至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备;
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启;
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启;
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块;
5. 由于KernelSU上游更新了元模块功能,最新版KSU管理器(包括除KernelSU Next以外的各分支)需要配合元模块(metamodule)才能正常挂载模块。目前的元模块包括[meta overlayfs](https://github.com/KernelSU-Modules-Repo/meta-overlayfs), [mountify](https://github.com/backslashxx/mountify), [meta magicmount](https://github.com/7a72/meta-magic_mount/), [meta magicmount rs](https://github.com/Tools-cx-app/meta-magic_mount/), [hybrid mount](https://github.com/YuzakiKokuban/meta-hybrid_mount)等。若你是第一次使用KSU或刚从旧版KSU管理器升级至新版,请先安装一个元模块,这样其他涉及系统挂载的模块才能正常运行;
6. KernelPatch Next(即KPN)是一个独立于KSU的KPM实现,可以运行在任意KSU/面具环境中(不适用于Apatch),且不能与(Re)SukiSU内置的kpm功能共同使用,使用前请保证你的内核没有内置的kpm实现/修补。
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
EOF
gh release create "${{ env.TAG_HEAD }}-${{ env.TIME }}" \
--repo "${{ github.repository }}" \
--title "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}" \
--notes-file release_notes.md \
release_zips/AnyKernel3_*.zip
================================================
FILE: .github/workflows/fastbuild_6.1.118.yml
================================================
name: 6.1.118 欧加真OKI内核快速构建
env:
TZ: Asia/Shanghai
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
SUB_VERSION: '118'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
CCACHE_KEY: ccache-ecsv2-6.1.118
on:
workflow_dispatch:
inputs:
ksu_type:
description: 'KernelSU分支(ReSukiSU/SukiSU Ultra/KernelSU Next/KSU(原版)/无内置KSU,默认ReSukiSU)'
required: true
type: choice
default: 'resukisu'
options:
- 'resukisu'
- 'sukisu'
- 'ksunext'
- 'ksu'
- 'none'
susfs_enable:
description: '是否开启susfs(用于增强隐藏环境挂载功能; 可能轻微增加耗电,上游更新导致不稳定时或不需要可关闭)'
required: true
type: boolean
default: 'true'
kpm_enable:
description: '是否开启kpm(builtin-使用(re)sukisu内置kpm, kpn-使用独立kpm实现(支持任意KSU/面具环境); 不需要可保持默认关闭)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'builtin'
- 'kpn'
lz4_enable:
description: '是否安装 lz4 1.10.0+zstd 1.5.7 补丁'
required: true
type: boolean
default: 'true'
lz4kd_enable:
description: '是否安装 LZ4KD 补丁(若已开启lz4+zstd补丁则可不开启)'
required: true
type: boolean
default: 'false'
bbr_enable:
description: '是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能拓展配置(用于为ipset及需要iptables等高级网络功能内核支持的程序提供支持)'
required: true
type: boolean
default: 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器支持(提升IO读写性能; 在一加12上可能导致bug)'
required: true
type: boolean
default: 'true'
rekernel_enable:
description: '是否启用Re-Kernel支持(与Freezer/NoActive等软件配合, 提升应用冻结能力)'
required: true
type: boolean
default: 'false'
baseband_guard:
description: '是否开启内核级基带保护(阻止一切对非用户分区的写入,有效防止格机)'
required: true
type: boolean
default: 'true'
ccache_update:
description: '更新ccache缓存(将本次编译生成的ccache缓存覆盖至仓库缓存,在更改编译配置、源码或需要刷新缓存时开启)'
required: true
type: boolean
default: 'false'
ccache_debug:
description: '是否上传 Ccache调试日志(用于调试, 无需要不必开启)'
required: true
type: boolean
default: 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
ak3name: ${{ steps.create_zip.outputs.ak3name }}
permissions:
actions: write
contents: read
steps:
- name: 安装环境依赖+初始化源码仓库及llvm-Clang20工具链
run: |
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
echo "当前仓库:$GITHUB_REPOSITORY"
echo "当前分支:$GITHUB_REF_NAME"
sudo apt-mark hold firefox &&
sudo apt-mark hold libc-bin &&
sudo apt purge man-db &&
sudo rm -rf /var/lib/man-db/auto-update &&
sudo apt update &&
sudo apt-get install -y --no-install-recommends binutils python-is-python3 libssl-dev libelf-dev &
#旧版完整指令:(由于经过验证大部分指令已内置于GitHub Action环境中,故进行精简)
#sudo apt-get install -y --no-install-recommends curl bison flex make binutils git perl gcc python3 python-is-python3 bc libssl-dev libelf-dev zip unzip ccache
#使用最新版ccache-ECS(特化优化内核编译缓存,大幅提升二次不同配置编译速度)
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/ccache-x86-64 -O ccache &&
sudo cp -f ./ccache /usr/bin/ccache &&
sudo chmod +x /usr/bin/ccache &&
rm -f ./ccache &
echo "正在克隆源码仓库..."
aria2c -s16 -x16 -k1M https://github.com/cctv18/android_kernel_common_oneplus_sm8650/archive/refs/heads/oneplus/sm8650_b_16.0.0_oneplus12.zip -o common.zip &&
unzip -q common.zip &&
mv "android_kernel_common_oneplus_sm8650-oneplus-sm8650_b_16.0.0_oneplus12" common &&
rm -rf common.zip &
echo "正在克隆llvm-Clang20工具链..." &&
mkdir -p clang20 &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/clang-r547379.zip -o clang.zip &&
unzip -q clang.zip -d clang20 &&
rm -rf clang.zip &
echo "正在克隆构建工具..." &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/build-tools.zip -o build-tools.zip &&
unzip -q build-tools.zip &&
rm -rf build-tools.zip &
wait
echo "所有源码及llvm-Clang20工具链初始化完成!"
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 配置ccache目录
run: |
echo "CCACHE_DIR=$HOME/.ccache_${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}" >> $GITHUB_ENV
echo "CCACHE_MAXSIZE=3G" >> $GITHUB_ENV
echo "当前磁盘空间:"
df -h
echo "当前构建内核版本:${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}"
- name: 载入当前版本内核的 ccache缓存
uses: actions/cache@v5
id: ccache-restore
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
restore-keys: |
${{ env.CCACHE_KEY }}-${{ runner.os }}-
${{ env.CCACHE_KEY }}-
- name: 拉取公共预置 ccache 缓存
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "检查本地缓存状态..."
if [ -d "${{ env.CCACHE_DIR }}" ] && [ "$(ls -A ${{ env.CCACHE_DIR }} 2>/dev/null)" ]; then
echo "检测到本地已成功载入 ccache 缓存,跳过公共 ccache 拉取!"
exit 0
fi
echo "未命中缓存,尝试拉取最新公共 ccache ..."
mkdir -p ${{ env.CCACHE_DIR }}
FILE_NAME="ccache-${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}.tar.zst"
if gh release download -p "$FILE_NAME" -R cctv18/public_ccache; then
echo "成功下载 $FILE_NAME,正在解压..."
tar -I zstd -xf "$FILE_NAME" -C ${{ env.CCACHE_DIR }}
echo "公共 ccache 恢复完成!"
else
echo "公共 ccache 中未找到对应的 ccache 文件,将进行全量全新编译..."
fi
- name: 清除旧 ccache 缓存
if: inputs.ccache_update
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "正在清除仓库中的旧 ccache 缓存..."
if gh cache delete ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }} -R ${{ github.repository }}; then
echo "成功删除旧的 ccache 缓存!"
else
echo "旧缓存不存在或已被清理!"
fi
- name: 初始化并配置ccache
run: |
# 设置ccache环境变量
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="${{ env.CCACHE_MAXSIZE }}"
# 确保ccache目录存在
mkdir -p "$CCACHE_DIR"
# 每次运行都重新配置缓存大小
echo "配置ccache缓存大小为: $CCACHE_MAXSIZE"
ccache -M "$CCACHE_MAXSIZE"
ccache -o compression=true
# 显示初始缓存状态
echo "ccache初始状态:"
ccache -s
# 如果缓存恢复命中,显示详细信息
if [ "${{ steps.ccache-restore.outputs.cache-hit }}" == 'true' ]; then
echo "ccache缓存命中详情:"
ccache -sv
fi
- name: 添加KernelSU
id: ksu_version
run: |
# 进入内核工作目录
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
echo "正在配置SukiSU Ultra..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s builtin
cd ./KernelSU
# 获取当前 Git 提交的短哈希 (8位)
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
export KSU_VERSION=$KSU_VERSION
# 尝试最多 3 次获取 KernelSU API 版本号
for i in {1..3}; do
# 从远程 Makefile 中提取 KSU_API_VERSION
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Makefile" |
# 查找第一个包含版本定义的行
grep -m1 "KSU_VERSION_API :=" |
# 提取等号后的值
awk -F'= ' '{print $2}' |
# 删除所有空白字符
tr -d '[:space:]')
# 如果成功获取到版本号则跳出循环,否则等待 1 秒后重试
[ -n "$KSU_API_VERSION" ] && break || sleep 1
done
# 如果获取失败,使用默认版本号 3.1.7
[ -z "$KSU_API_VERSION" ] && KSU_API_VERSION="3.1.7"
# 将 API 版本号存储到 GitHub 环境变量
echo "KSU_API_VERSION=$KSU_API_VERSION" >> $GITHUB_ENV
# 创建版本定义模板&版本格式函数: 使用获取的提交哈希和固定后缀
# KSU_VERSION_API: API 版本定义
# KSU_VERSION_FULL: 完整版本定义
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
# 清理内核 Makefile 中的旧版本定义
# 删除版本函数
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Makefile
# 删除 API 版本定义
sed -i '/KSU_VERSION_API :=/d' kernel/Makefile
# 删除完整版本定义
sed -i '/KSU_VERSION_FULL :=/d' kernel/Makefile
# 在 REPO_OWNER 行后插入新版本定义
awk -v def="$VERSION_DEFINITIONS" '
# 当找到 REPO_OWNER 行时,插入版本定义并设置标记
/REPO_OWNER :=/ {print; print def; inserted=1; next}
# 打印所有行
1
# 如果未找到插入点,在文件末尾追加
END {if (!inserted) print def}
' kernel/Makefile > kernel/Makefile.tmp && mv kernel/Makefile.tmp kernel/Makefile
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 37185 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
# 验证修改结果
grep -A10 "REPO_OWNER" kernel/Makefile # 检查插入点后的内容
grep "KSU_VERSION_FULL" kernel/Makefile # 确认版本定义存在
echo "SukiSU版本号: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
echo "正在配置ReSukiSU..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/refs/heads/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
cd ./KernelSU
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 30700 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
echo "正在配置KernelSU Next..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在配置原版 KernelSU (tiann/KernelSU)..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过KernelSU配置..."
fi
- name: 应用 KernelSU & SUSFS 补丁
if: inputs.susfs_enable
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} != "none" ]]; then
echo "正在添加susfs补丁..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 -N -F 3 < 69_hide_stuff.patch || true
cd ..
else
echo "已选择无内置KernelSU模式,跳过susfs配置..."
fi
if [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在为原版 KernelSU (tiann/KernelSU)添加补丁..."
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
if: inputs.lz4_enable
run: |
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone --depth=1 https://github.com/$GITHUB_REPOSITORY.git -b $GITHUB_REF_NAME $GITHUB_ACTOR
cp ./$GITHUB_ACTOR/zram_patch/001-lz4.patch ./common/
cp ./$GITHUB_ACTOR/zram_patch/lz4armv8.S ./common/lib
cp ./$GITHUB_ACTOR/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
- name: 应用 lz4kd 补丁
if: inputs.lz4kd_enable
run: |
echo "正在添加lz4kd补丁…"
cd kernel_workspace
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cd common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
- name: 添加SUSFS 配置项
if: inputs.susfs_enable
run: |
cd kernel_workspace
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 添加 KSU & 其他配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [[ "${{ github.event.inputs.susfs_enable }}" == "false" ]]; then
echo "CONFIG_KSU_SUSFS=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_TMPFS_POSIX_ACL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用网络功能增强优化配置
if: inputs.better_net
run: |
cd kernel_workspace
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" != "false" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
if: inputs.ssg_enable
run: |
echo "正在启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用Re-Kernel支持
if: inputs.rekernel_enable
run: |
echo "正在启用Re-Kernel支持(用于与Freezer,NoActive等软件配合使用,提升冻结后台能力)…"
cd kernel_workspace
echo "CONFIG_REKERNEL=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用内核级基带保护
if: inputs.baseband_guard
run: |
echo "正在启用启用内核级基带保护支持…"
cd kernel_workspace
echo "CONFIG_BBG=y" >> ./common/arch/arm64/configs/gki_defconfig
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
WORKDIR="$(pwd)"
export PATH="/usr/lib/ccache:$PATH"
export PATH="$WORKDIR/kernel_workspace/clang20/bin:$PATH"
export PATH="$WORKDIR/kernel_workspace/build-tools/bin:$PATH"
CLANG_DIR="$WORKDIR/kernel_workspace/clang20/bin"
CLANG_VERSION="$($CLANG_DIR/clang --version | head -n 1)"
LLD_VERSION="$($CLANG_DIR/ld.lld --version | head -n 1)"
echo "编译器信息:"
echo "Clang版本: $CLANG_VERSION"
echo "LLD版本: $LLD_VERSION"
pahole_version=$(pahole --version 2>/dev/null | head -n1); [ -z "$pahole_version" ] && echo "pahole版本:未安装" || echo "pahole版本:$pahole_version"
export CCACHE_LOGFILE="${{ github.workspace }}/kernel_workspace/ccache.log"
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="3G"
export CCACHE_IS_KERNEL_COMPILING="true"
echo "sloppiness = file_stat_matches,include_file_ctime,include_file_mtime,pch_defines,file_macro,time_macros" >> "$CCACHE_DIR/ccache.conf"
cd kernel_workspace/common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfakestat.so
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfaketimeMT.so
chmod 777 ./*.so
export FAKESTAT="2025-05-25 12:00:00"
export FAKETIME="@2025-05-25 13:00:00"
echo "FAKESTAT=$FAKESTAT" >> $GITHUB_ENV
echo "FAKETIME=$FAKETIME" >> $GITHUB_ENV
SO_DIR=$(pwd)
export PRELOAD_LIBS="$SO_DIR/libfakestat.so $SO_DIR/libfaketimeMT.so"
#创建 CC (编译器) 包装器
echo '#!/bin/bash' > cc-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> cc-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> cc-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> cc-wrapper
echo 'ccache clang "$@"' >> cc-wrapper
#创建 LD (链接器) 包装器
echo '#!/bin/bash' > ld-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> ld-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> ld-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> ld-wrapper
echo 'ld.lld "$@"' >> ld-wrapper
# 测试时间劫持测试是否正常工作
echo "--- [Wrapper Test] 正在创建通用的时间劫持测试脚本 ---"
echo '#!/bin/bash' > test-wrapper.sh
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> test-wrapper.sh
echo 'export FAKESTAT="'$FAKESTAT'"' >> test-wrapper.sh
echo 'export FAKETIME="'$FAKETIME'"' >> test-wrapper.sh
echo 'echo ">>> Wrapper 内部环境检查完毕."' >> test-wrapper.sh
echo 'exec "$@"' >> test-wrapper.sh # 执行所有传入的参数
chmod +x test-wrapper.sh
echo "--- [Wrapper Test] 正在测试 (date) 命令 ---"
./test-wrapper.sh date
echo "--- [Wrapper Test] 正在测试 (stat) 命令 ---"
./test-wrapper.sh stat ./Makefile
echo "--- [Wrapper Test] 测试完毕 ---"
chmod +x cc-wrapper ld-wrapper
echo "--- 编译前环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译前环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
#在构建内核的同时清除不必要的.NET, Android NDK, Haskell, CodeQL运行库,清理空间且不阻塞后续步骤运行
sudo rm -rf /usr/share/dotnet &
sudo rm -rf /usr/local/lib/android &
sudo rm -rf /opt/ghc &
sudo rm -rf /opt/hostedtoolcache/CodeQL &
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="ccache clang" LD="ld.lld" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig &&
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="$(pwd)/cc-wrapper" LD="$(pwd)/ld-wrapper" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error Image
# 编译后时间劫持二次校验
echo "--- 编译后环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译后环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
echo "内核编译完成!"
echo "ccache状态:"
ccache -s
echo "编译后空间:"
df -h
- name: 保存新的 ccache 缓存
if: inputs.ccache_update || steps.ccache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v5
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
- name: 应用KPM并修补内核
run: |
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "正在应用KPM并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
echo "正在应用KP-N并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
fi
- name: 克隆 AnyKernel3 并打包
id: create_zip
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [[ ! -f ./Image ]]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KSUNext"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KSU"
else
KSU_TYPENAME="none"
fi
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
wget https://raw.githubusercontent.com/$GITHUB_REPOSITORY/refs/heads/$GITHUB_REF_NAME/zram.zip
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ github.event.inputs.kernel_suffix }}.zip
else
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ env.KERNEL_NAME }}.zip
fi
zip -r ../$AK3_NAME ./*
echo "ak3name=$AK3_NAME" >> $GITHUB_OUTPUT
#为AK3添加注释(调试信息)
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
fi
TIME_NOW="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "Author: $GITHUB_ACTOR" > ./ak3.log
echo "Repo: $GITHUB_REPOSITORY" >> ./ak3.log
echo "Branch: $GITHUB_REF_NAME" >> ./ak3.log
echo "Run ID: $GITHUB_RUN_ID" >> ./ak3.log
echo "Commit: $GITHUB_SHA" >> ./ak3.log
echo "Time: $TIME_NOW" >> ./ak3.log
echo "Kernel Ver: $FULL_VERSION" >> ./ak3.log
echo "KSU Branch: ${KSU_TYPENAME}" >> ./ak3.log
echo "KSU Ver: ${KSUVER}" >> ./ak3.log
echo "susfs: ${{ github.event.inputs.susfs_enable }}" >> ./ak3.log
echo "KPM: ${{ github.event.inputs.kpm_enable }}" >> ./ak3.log
echo "LZ4: ${{ github.event.inputs.lz4_enable }}" >> ./ak3.log
echo "LZ4KD: ${{ github.event.inputs.lz4kd_enable }}" >> ./ak3.log
echo "IPset: ${{ github.event.inputs.better_net }}" >> ./ak3.log
echo "BBR&Brutal: ${{ github.event.inputs.bbr_enable }}" >> ./ak3.log
echo "SSG: ${{ github.event.inputs.ssg_enable }}" >> ./ak3.log
echo "Re-Kernel: ${{ github.event.inputs.rekernel_enable }}" >> ./ak3.log
echo "BBG: ${{ github.event.inputs.baseband_guard }}" >> ./ak3.log
zip -z ../$AK3_NAME < ./ak3.log
- name: 上传 Ccache 调试日志
if: always() && inputs.ccache_debug
uses: actions/upload-artifact@v7
with:
name: ccache-debug-log
path: ${{ github.workspace }}/kernel_workspace/ccache.log
archive: true
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v7
with:
path: ${{ github.workspace }}/kernel_workspace/AnyKernel*.zip
archive: false
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v8
with:
name: ${{ needs.build.outputs.ak3name }}
path: ./release_zips
skip-decompress: true
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO-OPlus-Realme-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU Ultra"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KernelSU Next"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KernelSU (Official)"
else
KSU_TYPENAME="无内置KSU"
fi
echo "KSU_TYPENAME=$KSU_TYPENAME" >> $GITHUB_ENV
- name: 创建发布
id: create_release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cat << 'EOF' > release_notes.md
### 📱 欧加真 ${{ env.KSU_TYPENAME }} SM8650 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3通用 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 内核(基于一加12 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 版官方OKI源码)
- KSU分支:${{ env.KSU_TYPENAME }}
- susfs支持:${{ github.event.inputs.susfs_enable }}
- KPM支持 :${{ github.event.inputs.kpm_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- LZ4KD支持:${{ github.event.inputs.lz4kd_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- Re-Kernel支持:${{ github.event.inputs.rekernel_enable }}
- 内核级基带保护支持:${{ github.event.inputs.baseband_guard }}
- ReSukiSU管理器下载:[ReSukiSU_CI](https://github.com/cctv18/ReSukiSU_CI/releases)
- SukiSU Ultra管理器下载:[SukiSU-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra/releases)
- KernelSU Next管理器下载:[KernelSU-Next](https://github.com/KernelSU-Next/KernelSU-Next/releases)
- KSU原版管理器下载:[KernelSU](https://github.com/tiann/KernelSU/releases)
### ⏫️ 更新内容:
- 更新${{ env.KSU_TYPENAME }}至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备;
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启;
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启;
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块;
5. 由于KernelSU上游更新了元模块功能,最新版KSU管理器(包括除KernelSU Next以外的各分支)需要配合元模块(metamodule)才能正常挂载模块。目前的元模块包括[meta overlayfs](https://github.com/KernelSU-Modules-Repo/meta-overlayfs), [mountify](https://github.com/backslashxx/mountify), [meta magicmount](https://github.com/7a72/meta-magic_mount/), [meta magicmount rs](https://github.com/Tools-cx-app/meta-magic_mount/), [hybrid mount](https://github.com/YuzakiKokuban/meta-hybrid_mount)等。若你是第一次使用KSU或刚从旧版KSU管理器升级至新版,请先安装一个元模块,这样其他涉及系统挂载的模块才能正常运行;
6. KernelPatch Next(即KPN)是一个独立于KSU的KPM实现,可以运行在任意KSU/面具环境中(不适用于Apatch),且不能与(Re)SukiSU内置的kpm功能共同使用,使用前请保证你的内核没有内置的kpm实现/修补。
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
EOF
gh release create "${{ env.TAG_HEAD }}-${{ env.TIME }}" \
--repo "${{ github.repository }}" \
--title "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}" \
--notes-file release_notes.md \
release_zips/AnyKernel3_*.zip
================================================
FILE: .github/workflows/fastbuild_6.1.128.yml
================================================
name: 6.1.128 (天玑特供)欧加真OKI内核快速构建
env:
TZ: Asia/Shanghai
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
SUB_VERSION: '128'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
CCACHE_KEY: ccache-ecsv2-6.1.128
on:
workflow_dispatch:
inputs:
ksu_type:
description: 'KernelSU分支(ReSukiSU/SukiSU Ultra/KernelSU Next/KSU(原版)/无内置KSU,默认ReSukiSU)'
required: true
type: choice
default: 'resukisu'
options:
- 'resukisu'
- 'sukisu'
- 'ksunext'
- 'ksu'
- 'none'
susfs_enable:
description: '是否开启susfs(用于增强隐藏环境挂载功能; 可能轻微增加耗电,上游更新导致不稳定时或不需要可关闭)'
required: true
type: boolean
default: 'true'
kpm_enable:
description: '是否开启kpm(builtin-使用(re)sukisu内置kpm, kpn-使用独立kpm实现(支持任意KSU/面具环境); 不需要可保持默认关闭)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'builtin'
- 'kpn'
lz4_enable:
description: '是否安装 lz4 1.10.0+zstd 1.5.7 补丁'
required: true
type: boolean
default: 'true'
lz4kd_enable:
description: '是否安装 LZ4KD 补丁(若已开启lz4+zstd补丁则可不开启)'
required: true
type: boolean
default: 'false'
bbr_enable:
description: '是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能拓展配置(用于为ipset及需要iptables等高级网络功能内核支持的程序提供支持,天玑机型可能导致bug,建议关闭)'
required: true
type: boolean
default: 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器支持(提升IO读写性能; 在一加12上可能导致bug)'
required: true
type: boolean
default: 'true'
rekernel_enable:
description: '是否启用Re-Kernel支持(与Freezer/NoActive等软件配合, 提升应用冻结能力)'
required: true
type: boolean
default: 'false'
baseband_guard:
description: '是否开启内核级基带保护(阻止一切对非用户分区的写入,有效防止格机)'
required: true
type: boolean
default: 'true'
ccache_update:
description: '更新ccache缓存(将本次编译生成的ccache缓存覆盖至仓库缓存,在更改编译配置、源码或需要刷新缓存时开启)'
required: true
type: boolean
default: 'false'
ccache_debug:
description: '是否上传 Ccache调试日志(用于调试, 无需要不必开启)'
required: true
type: boolean
default: 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
ak3name: ${{ steps.create_zip.outputs.ak3name }}
permissions:
actions: write
contents: read
steps:
- name: 安装环境依赖+初始化源码仓库及llvm-Clang20工具链
run: |
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
echo "当前仓库:$GITHUB_REPOSITORY"
echo "当前分支:$GITHUB_REF_NAME"
sudo apt-mark hold firefox &&
sudo apt-mark hold libc-bin &&
sudo apt purge man-db &&
sudo rm -rf /var/lib/man-db/auto-update &&
sudo apt update &&
sudo apt-get install -y --no-install-recommends binutils python-is-python3 libssl-dev libelf-dev &
#旧版完整指令:(由于经过验证大部分指令已内置于GitHub Action环境中,故进行精简)
#sudo apt-get install -y --no-install-recommends curl bison flex make binutils git perl gcc python3 python-is-python3 bc libssl-dev libelf-dev zip unzip ccache
#使用最新版ccache-ECS(特化优化内核编译缓存,大幅提升二次不同配置编译速度)
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/ccache-x86-64 -O ccache &&
sudo cp -f ./ccache /usr/bin/ccache &&
sudo chmod +x /usr/bin/ccache &&
rm -f ./ccache &
echo "正在克隆源码仓库..."
aria2c -s16 -x16 -k1M https://github.com/cctv18/android_kernel_oneplus_mt6897/archive/refs/heads/oneplus/mt6897_v_15.0.0_oneplus_pad.zip -o common.zip &&
unzip -q common.zip &&
mv "android_kernel_oneplus_mt6897-oneplus-mt6897_v_15.0.0_oneplus_pad" common &&
rm -rf common.zip &
echo "正在克隆llvm-Clang20工具链..." &&
mkdir -p clang20 &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/clang-r547379.zip -o clang.zip &&
unzip -q clang.zip -d clang20 &&
rm -rf clang.zip &
echo "正在克隆构建工具..." &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/build-tools.zip -o build-tools.zip &&
unzip -q build-tools.zip &&
rm -rf build-tools.zip &
wait
echo "所有源码及llvm-Clang20工具链初始化完成!"
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 配置ccache目录
run: |
echo "CCACHE_DIR=$HOME/.ccache_${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}" >> $GITHUB_ENV
echo "CCACHE_MAXSIZE=3G" >> $GITHUB_ENV
echo "当前磁盘空间:"
df -h
echo "当前构建内核版本:${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}"
- name: 载入当前版本内核的 ccache缓存
uses: actions/cache@v5
id: ccache-restore
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
restore-keys: |
${{ env.CCACHE_KEY }}-${{ runner.os }}-
${{ env.CCACHE_KEY }}-
- name: 拉取公共预置 ccache 缓存
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "检查本地缓存状态..."
if [ -d "${{ env.CCACHE_DIR }}" ] && [ "$(ls -A ${{ env.CCACHE_DIR }} 2>/dev/null)" ]; then
echo "检测到本地已成功载入 ccache 缓存,跳过公共 ccache 拉取!"
exit 0
fi
echo "未命中缓存,尝试拉取最新公共 ccache ..."
mkdir -p ${{ env.CCACHE_DIR }}
FILE_NAME="ccache-${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}.tar.zst"
if gh release download -p "$FILE_NAME" -R cctv18/public_ccache; then
echo "成功下载 $FILE_NAME,正在解压..."
tar -I zstd -xf "$FILE_NAME" -C ${{ env.CCACHE_DIR }}
echo "公共 ccache 恢复完成!"
else
echo "公共 ccache 中未找到对应的 ccache 文件,将进行全量全新编译..."
fi
- name: 清除旧 ccache 缓存
if: inputs.ccache_update
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "正在清除仓库中的旧 ccache 缓存..."
if gh cache delete ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }} -R ${{ github.repository }}; then
echo "成功删除旧的 ccache 缓存!"
else
echo "旧缓存不存在或已被清理!"
fi
- name: 初始化并配置ccache
run: |
# 设置ccache环境变量
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="${{ env.CCACHE_MAXSIZE }}"
# 确保ccache目录存在
mkdir -p "$CCACHE_DIR"
# 每次运行都重新配置缓存大小
echo "配置ccache缓存大小为: $CCACHE_MAXSIZE"
ccache -M "$CCACHE_MAXSIZE"
ccache -o compression=true
# 显示初始缓存状态
echo "ccache初始状态:"
ccache -s
# 如果缓存恢复命中,显示详细信息
if [ "${{ steps.ccache-restore.outputs.cache-hit }}" == 'true' ]; then
echo "ccache缓存命中详情:"
ccache -sv
fi
- name: 添加KernelSU
id: ksu_version
run: |
# 进入内核工作目录
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
echo "正在配置SukiSU Ultra..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s builtin
cd ./KernelSU
# 获取当前 Git 提交的短哈希 (8位)
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
export KSU_VERSION=$KSU_VERSION
# 尝试最多 3 次获取 KernelSU API 版本号
for i in {1..3}; do
# 从远程 Makefile 中提取 KSU_API_VERSION
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Makefile" |
# 查找第一个包含版本定义的行
grep -m1 "KSU_VERSION_API :=" |
# 提取等号后的值
awk -F'= ' '{print $2}' |
# 删除所有空白字符
tr -d '[:space:]')
# 如果成功获取到版本号则跳出循环,否则等待 1 秒后重试
[ -n "$KSU_API_VERSION" ] && break || sleep 1
done
# 如果获取失败,使用默认版本号 3.1.7
[ -z "$KSU_API_VERSION" ] && KSU_API_VERSION="3.1.7"
# 将 API 版本号存储到 GitHub 环境变量
echo "KSU_API_VERSION=$KSU_API_VERSION" >> $GITHUB_ENV
# 创建版本定义模板&版本格式函数: 使用获取的提交哈希和固定后缀
# KSU_VERSION_API: API 版本定义
# KSU_VERSION_FULL: 完整版本定义
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
# 清理内核 Makefile 中的旧版本定义
# 删除版本函数
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Makefile
# 删除 API 版本定义
sed -i '/KSU_VERSION_API :=/d' kernel/Makefile
# 删除完整版本定义
sed -i '/KSU_VERSION_FULL :=/d' kernel/Makefile
# 在 REPO_OWNER 行后插入新版本定义
awk -v def="$VERSION_DEFINITIONS" '
# 当找到 REPO_OWNER 行时,插入版本定义并设置标记
/REPO_OWNER :=/ {print; print def; inserted=1; next}
# 打印所有行
1
# 如果未找到插入点,在文件末尾追加
END {if (!inserted) print def}
' kernel/Makefile > kernel/Makefile.tmp && mv kernel/Makefile.tmp kernel/Makefile
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 37185 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
# 验证修改结果
grep -A10 "REPO_OWNER" kernel/Makefile # 检查插入点后的内容
grep "KSU_VERSION_FULL" kernel/Makefile # 确认版本定义存在
echo "SukiSU版本号: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
echo "正在配置ReSukiSU..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/refs/heads/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
cd ./KernelSU
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 30700 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
echo "正在配置KernelSU Next..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在配置原版 KernelSU (tiann/KernelSU)..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过KernelSU配置..."
fi
- name: 应用 KernelSU & SUSFS 补丁
if: inputs.susfs_enable
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} != "none" ]]; then
echo "正在添加susfs补丁..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 -N -F 3 < 69_hide_stuff.patch || true
cd ..
else
echo "已选择无内置KernelSU模式,跳过susfs配置..."
fi
if [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在为原版 KernelSU (tiann/KernelSU)添加补丁..."
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
if: inputs.lz4_enable
run: |
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone --depth=1 https://github.com/$GITHUB_REPOSITORY.git -b $GITHUB_REF_NAME $GITHUB_ACTOR
cp ./$GITHUB_ACTOR/zram_patch/001-lz4.patch ./common/
cp ./$GITHUB_ACTOR/zram_patch/lz4armv8.S ./common/lib
cp ./$GITHUB_ACTOR/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
- name: 应用 lz4kd 补丁
if: inputs.lz4kd_enable
run: |
echo "正在添加lz4kd补丁…"
cd kernel_workspace
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cd common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
- name: 添加SUSFS 配置项
if: inputs.susfs_enable
run: |
cd kernel_workspace
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 添加 KSU & 其他配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [[ "${{ github.event.inputs.susfs_enable }}" == "false" ]]; then
echo "CONFIG_KSU_SUSFS=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_TMPFS_POSIX_ACL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用网络功能增强优化配置
if: inputs.better_net
run: |
cd kernel_workspace
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" != "false" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
if: inputs.ssg_enable
run: |
echo "正在启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用Re-Kernel支持
if: inputs.rekernel_enable
run: |
echo "正在启用Re-Kernel支持(用于与Freezer,NoActive等软件配合使用,提升冻结后台能力)…"
cd kernel_workspace
echo "CONFIG_REKERNEL=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用内核级基带保护
if: inputs.baseband_guard
run: |
echo "正在启用启用内核级基带保护支持…"
cd kernel_workspace
echo "CONFIG_BBG=y" >> ./common/arch/arm64/configs/gki_defconfig
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
WORKDIR="$(pwd)"
export PATH="/usr/lib/ccache:$PATH"
export PATH="$WORKDIR/kernel_workspace/clang20/bin:$PATH"
export PATH="$WORKDIR/kernel_workspace/build-tools/bin:$PATH"
CLANG_DIR="$WORKDIR/kernel_workspace/clang20/bin"
CLANG_VERSION="$($CLANG_DIR/clang --version | head -n 1)"
LLD_VERSION="$($CLANG_DIR/ld.lld --version | head -n 1)"
echo "编译器信息:"
echo "Clang版本: $CLANG_VERSION"
echo "LLD版本: $LLD_VERSION"
pahole_version=$(pahole --version 2>/dev/null | head -n1); [ -z "$pahole_version" ] && echo "pahole版本:未安装" || echo "pahole版本:$pahole_version"
export CCACHE_LOGFILE="${{ github.workspace }}/kernel_workspace/ccache.log"
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="3G"
export CCACHE_IS_KERNEL_COMPILING="true"
echo "sloppiness = file_stat_matches,include_file_ctime,include_file_mtime,pch_defines,file_macro,time_macros" >> "$CCACHE_DIR/ccache.conf"
cd kernel_workspace/common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfakestat.so
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfaketimeMT.so
chmod 777 ./*.so
export FAKESTAT="2025-05-25 12:00:00"
export FAKETIME="@2025-05-25 13:00:00"
echo "FAKESTAT=$FAKESTAT" >> $GITHUB_ENV
echo "FAKETIME=$FAKETIME" >> $GITHUB_ENV
SO_DIR=$(pwd)
export PRELOAD_LIBS="$SO_DIR/libfakestat.so $SO_DIR/libfaketimeMT.so"
#创建 CC (编译器) 包装器
echo '#!/bin/bash' > cc-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> cc-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> cc-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> cc-wrapper
echo 'ccache clang "$@"' >> cc-wrapper
#创建 LD (链接器) 包装器
echo '#!/bin/bash' > ld-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> ld-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> ld-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> ld-wrapper
echo 'ld.lld "$@"' >> ld-wrapper
# 测试时间劫持测试是否正常工作
echo "--- [Wrapper Test] 正在创建通用的时间劫持测试脚本 ---"
echo '#!/bin/bash' > test-wrapper.sh
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> test-wrapper.sh
echo 'export FAKESTAT="'$FAKESTAT'"' >> test-wrapper.sh
echo 'export FAKETIME="'$FAKETIME'"' >> test-wrapper.sh
echo 'echo ">>> Wrapper 内部环境检查完毕."' >> test-wrapper.sh
echo 'exec "$@"' >> test-wrapper.sh # 执行所有传入的参数
chmod +x test-wrapper.sh
echo "--- [Wrapper Test] 正在测试 (date) 命令 ---"
./test-wrapper.sh date
echo "--- [Wrapper Test] 正在测试 (stat) 命令 ---"
./test-wrapper.sh stat ./Makefile
echo "--- [Wrapper Test] 测试完毕 ---"
chmod +x cc-wrapper ld-wrapper
echo "--- 编译前环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译前环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
#在构建内核的同时清除不必要的.NET, Android NDK, Haskell, CodeQL运行库,清理空间且不阻塞后续步骤运行
sudo rm -rf /usr/share/dotnet &
sudo rm -rf /usr/local/lib/android &
sudo rm -rf /opt/ghc &
sudo rm -rf /opt/hostedtoolcache/CodeQL &
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="ccache clang" LD="ld.lld" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig &&
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="$(pwd)/cc-wrapper" LD="$(pwd)/ld-wrapper" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error Image
# 编译后时间劫持二次校验
echo "--- 编译后环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译后环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
echo "内核编译完成!"
echo "ccache状态:"
ccache -s
echo "编译后空间:"
df -h
- name: 保存新的 ccache 缓存
if: inputs.ccache_update || steps.ccache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v5
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
- name: 应用KPM并修补内核
run: |
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "正在应用KPM并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
echo "正在应用KP-N并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
fi
- name: 克隆 AnyKernel3 并打包
id: create_zip
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [[ ! -f ./Image ]]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KSUNext"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KSU"
else
KSU_TYPENAME="none"
fi
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
wget https://raw.githubusercontent.com/$GITHUB_REPOSITORY/refs/heads/$GITHUB_REF_NAME/zram.zip
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ github.event.inputs.kernel_suffix }}.zip
else
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ env.KERNEL_NAME }}.zip
fi
zip -r ../$AK3_NAME ./*
echo "ak3name=$AK3_NAME" >> $GITHUB_OUTPUT
#为AK3添加注释(调试信息)
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
fi
TIME_NOW="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "Author: $GITHUB_ACTOR" > ./ak3.log
echo "Repo: $GITHUB_REPOSITORY" >> ./ak3.log
echo "Branch: $GITHUB_REF_NAME" >> ./ak3.log
echo "Run ID: $GITHUB_RUN_ID" >> ./ak3.log
echo "Commit: $GITHUB_SHA" >> ./ak3.log
echo "Time: $TIME_NOW" >> ./ak3.log
echo "Kernel Ver: $FULL_VERSION" >> ./ak3.log
echo "KSU Branch: ${KSU_TYPENAME}" >> ./ak3.log
echo "KSU Ver: ${KSUVER}" >> ./ak3.log
echo "susfs: ${{ github.event.inputs.susfs_enable }}" >> ./ak3.log
echo "KPM: ${{ github.event.inputs.kpm_enable }}" >> ./ak3.log
echo "LZ4: ${{ github.event.inputs.lz4_enable }}" >> ./ak3.log
echo "LZ4KD: ${{ github.event.inputs.lz4kd_enable }}" >> ./ak3.log
echo "IPset: ${{ github.event.inputs.better_net }}" >> ./ak3.log
echo "BBR&Brutal: ${{ github.event.inputs.bbr_enable }}" >> ./ak3.log
echo "SSG: ${{ github.event.inputs.ssg_enable }}" >> ./ak3.log
echo "Re-Kernel: ${{ github.event.inputs.rekernel_enable }}" >> ./ak3.log
echo "BBG: ${{ github.event.inputs.baseband_guard }}" >> ./ak3.log
zip -z ../$AK3_NAME < ./ak3.log
- name: 上传 Ccache 调试日志
if: always() && inputs.ccache_debug
uses: actions/upload-artifact@v7
with:
name: ccache-debug-log
path: ${{ github.workspace }}/kernel_workspace/ccache.log
archive: true
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v7
with:
path: ${{ github.workspace }}/kernel_workspace/AnyKernel*.zip
archive: false
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v8
with:
name: ${{ needs.build.outputs.ak3name }}
path: ./release_zips
skip-decompress: true
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO-OPlus-Realme-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU Ultra"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KernelSU Next"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KernelSU (Official)"
else
KSU_TYPENAME="无内置KSU"
fi
echo "KSU_TYPENAME=$KSU_TYPENAME" >> $GITHUB_ENV
- name: 创建发布
id: create_release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cat << 'EOF' > release_notes.md
### 📱 欧加真 ${{ env.KSU_TYPENAME }} MT6897 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3通用 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 内核(基于一加平板 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 版官方OKI源码)
- KSU分支:${{ env.KSU_TYPENAME }}
- susfs支持:${{ github.event.inputs.susfs_enable }}
- KPM支持 :${{ github.event.inputs.kpm_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- LZ4KD支持:${{ github.event.inputs.lz4kd_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- Re-Kernel支持:${{ github.event.inputs.rekernel_enable }}
- 内核级基带保护支持:${{ github.event.inputs.baseband_guard }}
- ReSukiSU管理器下载:[ReSukiSU_CI](https://github.com/cctv18/ReSukiSU_CI/releases)
- SukiSU Ultra管理器下载:[SukiSU-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra/releases)
- KernelSU Next管理器下载:[KernelSU-Next](https://github.com/KernelSU-Next/KernelSU-Next/releases)
- KSU原版管理器下载:[KernelSU](https://github.com/tiann/KernelSU/releases)
### ⏫️ 更新内容:
- 更新${{ env.KSU_TYPENAME }}至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备;
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启;
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启;
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块;
5. 由于KernelSU上游更新了元模块功能,最新版KSU管理器(包括除KernelSU Next以外的各分支)需要配合元模块(metamodule)才能正常挂载模块。目前的元模块包括[meta overlayfs](https://github.com/KernelSU-Modules-Repo/meta-overlayfs), [mountify](https://github.com/backslashxx/mountify), [meta magicmount](https://github.com/7a72/meta-magic_mount/), [meta magicmount rs](https://github.com/Tools-cx-app/meta-magic_mount/), [hybrid mount](https://github.com/YuzakiKokuban/meta-hybrid_mount)等。若你是第一次使用KSU或刚从旧版KSU管理器升级至新版,请先安装一个元模块,这样其他涉及系统挂载的模块才能正常运行;
6. KernelPatch Next(即KPN)是一个独立于KSU的KPM实现,可以运行在任意KSU/面具环境中(不适用于Apatch),且不能与(Re)SukiSU内置的kpm功能共同使用,使用前请保证你的内核没有内置的kpm实现/修补。
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
EOF
gh release create "${{ env.TAG_HEAD }}-${{ env.TIME }}" \
--repo "${{ github.repository }}" \
--title "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}" \
--notes-file release_notes.md \
release_zips/AnyKernel3_*.zip
================================================
FILE: .github/workflows/fastbuild_6.1.134.yml
================================================
name: 6.1.134 (天玑特供)欧加真OKI内核快速构建
env:
TZ: Asia/Shanghai
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
SUB_VERSION: '134'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
CCACHE_KEY: ccache-ecsv2-6.1.134
on:
workflow_dispatch:
inputs:
ksu_type:
description: 'KernelSU分支(ReSukiSU/SukiSU Ultra/KernelSU Next/KSU(原版)/无内置KSU,默认ReSukiSU)'
required: true
type: choice
default: 'resukisu'
options:
- 'resukisu'
- 'sukisu'
- 'ksunext'
- 'ksu'
- 'none'
susfs_enable:
description: '是否开启susfs(用于增强隐藏环境挂载功能; 可能轻微增加耗电,上游更新导致不稳定时或不需要可关闭)'
required: true
type: boolean
default: 'true'
kpm_enable:
description: '是否开启kpm(builtin-使用(re)sukisu内置kpm, kpn-使用独立kpm实现(支持任意KSU/面具环境); 不需要可保持默认关闭)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'builtin'
- 'kpn'
lz4_enable:
description: '是否安装 lz4 1.10.0+zstd 1.5.7 补丁'
required: true
type: boolean
default: 'true'
lz4kd_enable:
description: '是否安装 LZ4KD 补丁(若已开启lz4+zstd补丁则可不开启)'
required: true
type: boolean
default: 'false'
bbr_enable:
description: '是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能拓展配置(用于为ipset及需要iptables等高级网络功能内核支持的程序提供支持,天玑机型可能导致bug,建议关闭)'
required: true
type: boolean
default: 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器支持(提升IO读写性能; 在一加12上可能导致bug)'
required: true
type: boolean
default: 'true'
rekernel_enable:
description: '是否启用Re-Kernel支持(与Freezer/NoActive等软件配合, 提升应用冻结能力)'
required: true
type: boolean
default: 'false'
baseband_guard:
description: '是否开启内核级基带保护(阻止一切对非用户分区的写入,有效防止格机)'
required: true
type: boolean
default: 'true'
ccache_update:
description: '更新ccache缓存(将本次编译生成的ccache缓存覆盖至仓库缓存,在更改编译配置、源码或需要刷新缓存时开启)'
required: true
type: boolean
default: 'false'
ccache_debug:
description: '是否上传 Ccache调试日志(用于调试, 无需要不必开启)'
required: true
type: boolean
default: 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
ak3name: ${{ steps.create_zip.outputs.ak3name }}
permissions:
actions: write
contents: read
steps:
- name: 安装环境依赖+初始化源码仓库及llvm-Clang20工具链
run: |
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
echo "当前仓库:$GITHUB_REPOSITORY"
echo "当前分支:$GITHUB_REF_NAME"
sudo apt-mark hold firefox &&
sudo apt-mark hold libc-bin &&
sudo apt purge man-db &&
sudo rm -rf /var/lib/man-db/auto-update &&
sudo apt update &&
sudo apt-get install -y --no-install-recommends binutils python-is-python3 libssl-dev libelf-dev &
#旧版完整指令:(由于经过验证大部分指令已内置于GitHub Action环境中,故进行精简)
#sudo apt-get install -y --no-install-recommends curl bison flex make binutils git perl gcc python3 python-is-python3 bc libssl-dev libelf-dev zip unzip ccache
#使用最新版ccache-ECS(特化优化内核编译缓存,大幅提升二次不同配置编译速度)
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/ccache-x86-64 -O ccache &&
sudo cp -f ./ccache /usr/bin/ccache &&
sudo chmod +x /usr/bin/ccache &&
rm -f ./ccache &
echo "正在克隆源码仓库..."
aria2c -s16 -x16 -k1M https://github.com/cctv18/android_kernel_oneplus_mt6989/archive/refs/heads/oneplus/mt6989_b_16.0.0_ace5_race.zip -o common.zip &&
unzip -q common.zip &&
mv "android_kernel_oneplus_mt6989-oneplus-mt6989_b_16.0.0_ace5_race" common &&
rm -rf common.zip &
echo "正在克隆llvm-Clang20工具链..." &&
mkdir -p clang20 &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/clang-r547379.zip -o clang.zip &&
unzip -q clang.zip -d clang20 &&
rm -rf clang.zip &
echo "正在克隆构建工具..." &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/build-tools.zip -o build-tools.zip &&
unzip -q build-tools.zip &&
rm -rf build-tools.zip &
wait
echo "所有源码及llvm-Clang20工具链初始化完成!"
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 配置ccache目录
run: |
echo "CCACHE_DIR=$HOME/.ccache_${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}" >> $GITHUB_ENV
echo "CCACHE_MAXSIZE=3G" >> $GITHUB_ENV
echo "当前磁盘空间:"
df -h
echo "当前构建内核版本:${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}"
- name: 载入当前版本内核的 ccache缓存
uses: actions/cache@v5
id: ccache-restore
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
restore-keys: |
${{ env.CCACHE_KEY }}-${{ runner.os }}-
${{ env.CCACHE_KEY }}-
- name: 拉取公共预置 ccache 缓存
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "检查本地缓存状态..."
if [ -d "${{ env.CCACHE_DIR }}" ] && [ "$(ls -A ${{ env.CCACHE_DIR }} 2>/dev/null)" ]; then
echo "检测到本地已成功载入 ccache 缓存,跳过公共 ccache 拉取!"
exit 0
fi
echo "未命中缓存,尝试拉取最新公共 ccache ..."
mkdir -p ${{ env.CCACHE_DIR }}
FILE_NAME="ccache-${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}.tar.zst"
if gh release download -p "$FILE_NAME" -R cctv18/public_ccache; then
echo "成功下载 $FILE_NAME,正在解压..."
tar -I zstd -xf "$FILE_NAME" -C ${{ env.CCACHE_DIR }}
echo "公共 ccache 恢复完成!"
else
echo "公共 ccache 中未找到对应的 ccache 文件,将进行全量全新编译..."
fi
- name: 清除旧 ccache 缓存
if: inputs.ccache_update
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "正在清除仓库中的旧 ccache 缓存..."
if gh cache delete ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }} -R ${{ github.repository }}; then
echo "成功删除旧的 ccache 缓存!"
else
echo "旧缓存不存在或已被清理!"
fi
- name: 初始化并配置ccache
run: |
# 设置ccache环境变量
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="${{ env.CCACHE_MAXSIZE }}"
# 确保ccache目录存在
mkdir -p "$CCACHE_DIR"
# 每次运行都重新配置缓存大小
echo "配置ccache缓存大小为: $CCACHE_MAXSIZE"
ccache -M "$CCACHE_MAXSIZE"
ccache -o compression=true
# 显示初始缓存状态
echo "ccache初始状态:"
ccache -s
# 如果缓存恢复命中,显示详细信息
if [ "${{ steps.ccache-restore.outputs.cache-hit }}" == 'true' ]; then
echo "ccache缓存命中详情:"
ccache -sv
fi
- name: 添加KernelSU
id: ksu_version
run: |
# 进入内核工作目录
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
echo "正在配置SukiSU Ultra..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s builtin
cd ./KernelSU
# 获取当前 Git 提交的短哈希 (8位)
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
export KSU_VERSION=$KSU_VERSION
# 尝试最多 3 次获取 KernelSU API 版本号
for i in {1..3}; do
# 从远程 Makefile 中提取 KSU_API_VERSION
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Makefile" |
# 查找第一个包含版本定义的行
grep -m1 "KSU_VERSION_API :=" |
# 提取等号后的值
awk -F'= ' '{print $2}' |
# 删除所有空白字符
tr -d '[:space:]')
# 如果成功获取到版本号则跳出循环,否则等待 1 秒后重试
[ -n "$KSU_API_VERSION" ] && break || sleep 1
done
# 如果获取失败,使用默认版本号 3.1.7
[ -z "$KSU_API_VERSION" ] && KSU_API_VERSION="3.1.7"
# 将 API 版本号存储到 GitHub 环境变量
echo "KSU_API_VERSION=$KSU_API_VERSION" >> $GITHUB_ENV
# 创建版本定义模板&版本格式函数: 使用获取的提交哈希和固定后缀
# KSU_VERSION_API: API 版本定义
# KSU_VERSION_FULL: 完整版本定义
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
# 清理内核 Makefile 中的旧版本定义
# 删除版本函数
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Makefile
# 删除 API 版本定义
sed -i '/KSU_VERSION_API :=/d' kernel/Makefile
# 删除完整版本定义
sed -i '/KSU_VERSION_FULL :=/d' kernel/Makefile
# 在 REPO_OWNER 行后插入新版本定义
awk -v def="$VERSION_DEFINITIONS" '
# 当找到 REPO_OWNER 行时,插入版本定义并设置标记
/REPO_OWNER :=/ {print; print def; inserted=1; next}
# 打印所有行
1
# 如果未找到插入点,在文件末尾追加
END {if (!inserted) print def}
' kernel/Makefile > kernel/Makefile.tmp && mv kernel/Makefile.tmp kernel/Makefile
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 37185 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
# 验证修改结果
grep -A10 "REPO_OWNER" kernel/Makefile # 检查插入点后的内容
grep "KSU_VERSION_FULL" kernel/Makefile # 确认版本定义存在
echo "SukiSU版本号: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
echo "正在配置ReSukiSU..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/refs/heads/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
cd ./KernelSU
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 30700 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
echo "正在配置KernelSU Next..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在配置原版 KernelSU (tiann/KernelSU)..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过KernelSU配置..."
fi
- name: 应用 KernelSU & SUSFS 补丁
if: inputs.susfs_enable
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} != "none" ]]; then
echo "正在添加susfs补丁..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 -N -F 3 < 69_hide_stuff.patch || true
cd ..
else
echo "已选择无内置KernelSU模式,跳过susfs配置..."
fi
if [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在为原版 KernelSU (tiann/KernelSU)添加补丁..."
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
if: inputs.lz4_enable
run: |
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone --depth=1 https://github.com/$GITHUB_REPOSITORY.git -b $GITHUB_REF_NAME $GITHUB_ACTOR
cp ./$GITHUB_ACTOR/zram_patch/001-lz4.patch ./common/
cp ./$GITHUB_ACTOR/zram_patch/lz4armv8.S ./common/lib
cp ./$GITHUB_ACTOR/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
- name: 应用 lz4kd 补丁
if: inputs.lz4kd_enable
run: |
echo "正在添加lz4kd补丁…"
cd kernel_workspace
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cd common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
- name: 添加SUSFS 配置项
if: inputs.susfs_enable
run: |
cd kernel_workspace
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 添加 KSU & 其他配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [[ "${{ github.event.inputs.susfs_enable }}" == "false" ]]; then
echo "CONFIG_KSU_SUSFS=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_TMPFS_POSIX_ACL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用网络功能增强优化配置
if: inputs.better_net
run: |
cd kernel_workspace
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" != "false" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
if: inputs.ssg_enable
run: |
echo "正在启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用Re-Kernel支持
if: inputs.rekernel_enable
run: |
echo "正在启用Re-Kernel支持(用于与Freezer,NoActive等软件配合使用,提升冻结后台能力)…"
cd kernel_workspace
echo "CONFIG_REKERNEL=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用内核级基带保护
if: inputs.baseband_guard
run: |
echo "正在启用启用内核级基带保护支持…"
cd kernel_workspace
echo "CONFIG_BBG=y" >> ./common/arch/arm64/configs/gki_defconfig
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
WORKDIR="$(pwd)"
export PATH="/usr/lib/ccache:$PATH"
export PATH="$WORKDIR/kernel_workspace/clang20/bin:$PATH"
export PATH="$WORKDIR/kernel_workspace/build-tools/bin:$PATH"
CLANG_DIR="$WORKDIR/kernel_workspace/clang20/bin"
CLANG_VERSION="$($CLANG_DIR/clang --version | head -n 1)"
LLD_VERSION="$($CLANG_DIR/ld.lld --version | head -n 1)"
echo "编译器信息:"
echo "Clang版本: $CLANG_VERSION"
echo "LLD版本: $LLD_VERSION"
pahole_version=$(pahole --version 2>/dev/null | head -n1); [ -z "$pahole_version" ] && echo "pahole版本:未安装" || echo "pahole版本:$pahole_version"
export CCACHE_LOGFILE="${{ github.workspace }}/kernel_workspace/ccache.log"
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="3G"
export CCACHE_IS_KERNEL_COMPILING="true"
echo "sloppiness = file_stat_matches,include_file_ctime,include_file_mtime,pch_defines,file_macro,time_macros" >> "$CCACHE_DIR/ccache.conf"
cd kernel_workspace/common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfakestat.so
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfaketimeMT.so
chmod 777 ./*.so
export FAKESTAT="2025-05-25 12:00:00"
export FAKETIME="@2025-05-25 13:00:00"
echo "FAKESTAT=$FAKESTAT" >> $GITHUB_ENV
echo "FAKETIME=$FAKETIME" >> $GITHUB_ENV
SO_DIR=$(pwd)
export PRELOAD_LIBS="$SO_DIR/libfakestat.so $SO_DIR/libfaketimeMT.so"
#创建 CC (编译器) 包装器
echo '#!/bin/bash' > cc-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> cc-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> cc-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> cc-wrapper
echo 'ccache clang "$@"' >> cc-wrapper
#创建 LD (链接器) 包装器
echo '#!/bin/bash' > ld-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> ld-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> ld-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> ld-wrapper
echo 'ld.lld "$@"' >> ld-wrapper
# 测试时间劫持测试是否正常工作
echo "--- [Wrapper Test] 正在创建通用的时间劫持测试脚本 ---"
echo '#!/bin/bash' > test-wrapper.sh
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> test-wrapper.sh
echo 'export FAKESTAT="'$FAKESTAT'"' >> test-wrapper.sh
echo 'export FAKETIME="'$FAKETIME'"' >> test-wrapper.sh
echo 'echo ">>> Wrapper 内部环境检查完毕."' >> test-wrapper.sh
echo 'exec "$@"' >> test-wrapper.sh # 执行所有传入的参数
chmod +x test-wrapper.sh
echo "--- [Wrapper Test] 正在测试 (date) 命令 ---"
./test-wrapper.sh date
echo "--- [Wrapper Test] 正在测试 (stat) 命令 ---"
./test-wrapper.sh stat ./Makefile
echo "--- [Wrapper Test] 测试完毕 ---"
chmod +x cc-wrapper ld-wrapper
echo "--- 编译前环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译前环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
#在构建内核的同时清除不必要的.NET, Android NDK, Haskell, CodeQL运行库,清理空间且不阻塞后续步骤运行
sudo rm -rf /usr/share/dotnet &
sudo rm -rf /usr/local/lib/android &
sudo rm -rf /opt/ghc &
sudo rm -rf /opt/hostedtoolcache/CodeQL &
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="ccache clang" LD="ld.lld" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig &&
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="$(pwd)/cc-wrapper" LD="$(pwd)/ld-wrapper" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error Image
# 编译后时间劫持二次校验
echo "--- 编译后环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译后环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
echo "内核编译完成!"
echo "ccache状态:"
ccache -s
echo "编译后空间:"
df -h
- name: 保存新的 ccache 缓存
if: inputs.ccache_update || steps.ccache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v5
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
- name: 应用KPM并修补内核
run: |
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "正在应用KPM并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
echo "正在应用KP-N并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
fi
- name: 克隆 AnyKernel3 并打包
id: create_zip
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [[ ! -f ./Image ]]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KSUNext"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KSU"
else
KSU_TYPENAME="none"
fi
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
wget https://raw.githubusercontent.com/$GITHUB_REPOSITORY/refs/heads/$GITHUB_REF_NAME/zram.zip
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ github.event.inputs.kernel_suffix }}.zip
else
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ env.KERNEL_NAME }}.zip
fi
zip -r ../$AK3_NAME ./*
echo "ak3name=$AK3_NAME" >> $GITHUB_OUTPUT
#为AK3添加注释(调试信息)
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
fi
TIME_NOW="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "Author: $GITHUB_ACTOR" > ./ak3.log
echo "Repo: $GITHUB_REPOSITORY" >> ./ak3.log
echo "Branch: $GITHUB_REF_NAME" >> ./ak3.log
echo "Run ID: $GITHUB_RUN_ID" >> ./ak3.log
echo "Commit: $GITHUB_SHA" >> ./ak3.log
echo "Time: $TIME_NOW" >> ./ak3.log
echo "Kernel Ver: $FULL_VERSION" >> ./ak3.log
echo "KSU Branch: ${KSU_TYPENAME}" >> ./ak3.log
echo "KSU Ver: ${KSUVER}" >> ./ak3.log
echo "susfs: ${{ github.event.inputs.susfs_enable }}" >> ./ak3.log
echo "KPM: ${{ github.event.inputs.kpm_enable }}" >> ./ak3.log
echo "LZ4: ${{ github.event.inputs.lz4_enable }}" >> ./ak3.log
echo "LZ4KD: ${{ github.event.inputs.lz4kd_enable }}" >> ./ak3.log
echo "IPset: ${{ github.event.inputs.better_net }}" >> ./ak3.log
echo "BBR&Brutal: ${{ github.event.inputs.bbr_enable }}" >> ./ak3.log
echo "SSG: ${{ github.event.inputs.ssg_enable }}" >> ./ak3.log
echo "Re-Kernel: ${{ github.event.inputs.rekernel_enable }}" >> ./ak3.log
echo "BBG: ${{ github.event.inputs.baseband_guard }}" >> ./ak3.log
zip -z ../$AK3_NAME < ./ak3.log
- name: 上传 Ccache 调试日志
if: always() && inputs.ccache_debug
uses: actions/upload-artifact@v7
with:
name: ccache-debug-log
path: ${{ github.workspace }}/kernel_workspace/ccache.log
archive: true
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v7
with:
path: ${{ github.workspace }}/kernel_workspace/AnyKernel*.zip
archive: false
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v8
with:
name: ${{ needs.build.outputs.ak3name }}
path: ./release_zips
skip-decompress: true
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO-OPlus-Realme-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU Ultra"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KernelSU Next"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KernelSU (Official)"
else
KSU_TYPENAME="无内置KSU"
fi
echo "KSU_TYPENAME=$KSU_TYPENAME" >> $GITHUB_ENV
- name: 创建发布
id: create_release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cat << 'EOF' > release_notes.md
### 📱 欧加真 ${{ env.KSU_TYPENAME }} MT6989 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3通用 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 内核(基于一加Ace5竞速板 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 版官方OKI源码)
- KSU分支:${{ env.KSU_TYPENAME }}
- susfs支持:${{ github.event.inputs.susfs_enable }}
- KPM支持 :${{ github.event.inputs.kpm_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- LZ4KD支持:${{ github.event.inputs.lz4kd_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- Re-Kernel支持:${{ github.event.inputs.rekernel_enable }}
- 内核级基带保护支持:${{ github.event.inputs.baseband_guard }}
- ReSukiSU管理器下载:[ReSukiSU_CI](https://github.com/cctv18/ReSukiSU_CI/releases)
- SukiSU Ultra管理器下载:[SukiSU-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra/releases)
- KernelSU Next管理器下载:[KernelSU-Next](https://github.com/KernelSU-Next/KernelSU-Next/releases)
- KSU原版管理器下载:[KernelSU](https://github.com/tiann/KernelSU/releases)
### ⏫️ 更新内容:
- 更新${{ env.KSU_TYPENAME }}至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备;
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启;
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启;
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块;
5. 由于KernelSU上游更新了元模块功能,最新版KSU管理器(包括除KernelSU Next以外的各分支)需要配合元模块(metamodule)才能正常挂载模块。目前的元模块包括[meta overlayfs](https://github.com/KernelSU-Modules-Repo/meta-overlayfs), [mountify](https://github.com/backslashxx/mountify), [meta magicmount](https://github.com/7a72/meta-magic_mount/), [meta magicmount rs](https://github.com/Tools-cx-app/meta-magic_mount/), [hybrid mount](https://github.com/YuzakiKokuban/meta-hybrid_mount)等。若你是第一次使用KSU或刚从旧版KSU管理器升级至新版,请先安装一个元模块,这样其他涉及系统挂载的模块才能正常运行;
6. KernelPatch Next(即KPN)是一个独立于KSU的KPM实现,可以运行在任意KSU/面具环境中(不适用于Apatch),且不能与(Re)SukiSU内置的kpm功能共同使用,使用前请保证你的内核没有内置的kpm实现/修补。
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
EOF
gh release create "${{ env.TAG_HEAD }}-${{ env.TIME }}" \
--repo "${{ github.repository }}" \
--title "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}" \
--notes-file release_notes.md \
release_zips/AnyKernel3_*.zip
================================================
FILE: .github/workflows/fastbuild_6.1.141.yml
================================================
name: 6.1.141 欧加真OKI内核快速构建
env:
TZ: Asia/Shanghai
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
SUB_VERSION: '141'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
CCACHE_KEY: ccache-ecsv2-6.1.141
on:
workflow_dispatch:
inputs:
ksu_type:
description: 'KernelSU分支(ReSukiSU/SukiSU Ultra/KernelSU Next/KSU(原版)/无内置KSU,默认ReSukiSU)'
required: true
type: choice
default: 'resukisu'
options:
- 'resukisu'
- 'sukisu'
- 'ksunext'
- 'ksu'
- 'none'
susfs_enable:
description: '是否开启susfs(用于增强隐藏环境挂载功能; 可能轻微增加耗电,上游更新导致不稳定时或不需要可关闭)'
required: true
type: boolean
default: 'true'
kpm_enable:
description: '是否开启kpm(builtin-使用(re)sukisu内置kpm, kpn-使用独立kpm实现(支持任意KSU/面具环境); 不需要可保持默认关闭)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'builtin'
- 'kpn'
lz4_enable:
description: '是否安装 lz4 1.10.0+zstd 1.5.7 补丁'
required: true
type: boolean
default: 'true'
lz4kd_enable:
description: '是否安装 LZ4KD 补丁(若已开启lz4+zstd补丁则可不开启)'
required: true
type: boolean
default: 'false'
bbr_enable:
description: '是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能拓展配置(用于为ipset及需要iptables等高级网络功能内核支持的程序提供支持)'
required: true
type: boolean
default: 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器支持(提升IO读写性能; 在一加12上可能导致bug)'
required: true
type: boolean
default: 'true'
rekernel_enable:
description: '是否启用Re-Kernel支持(与Freezer/NoActive等软件配合, 提升应用冻结能力)'
required: true
type: boolean
default: 'false'
baseband_guard:
description: '是否开启内核级基带保护(阻止一切对非用户分区的写入,有效防止格机)'
required: true
type: boolean
default: 'true'
ccache_update:
description: '更新ccache缓存(将本次编译生成的ccache缓存覆盖至仓库缓存,在更改编译配置、源码或需要刷新缓存时开启)'
required: true
type: boolean
default: 'false'
ccache_debug:
description: '是否上传 Ccache调试日志(用于调试, 无需要不必开启)'
required: true
type: boolean
default: 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
ak3name: ${{ steps.create_zip.outputs.ak3name }}
permissions:
actions: write
contents: read
steps:
- name: 安装环境依赖+初始化源码仓库及llvm-Clang20工具链
run: |
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
echo "当前仓库:$GITHUB_REPOSITORY"
echo "当前分支:$GITHUB_REF_NAME"
sudo apt-mark hold firefox &&
sudo apt-mark hold libc-bin &&
sudo apt purge man-db &&
sudo rm -rf /var/lib/man-db/auto-update &&
sudo apt update &&
sudo apt-get install -y --no-install-recommends binutils python-is-python3 libssl-dev libelf-dev &
#旧版完整指令:(由于经过验证大部分指令已内置于GitHub Action环境中,故进行精简)
#sudo apt-get install -y --no-install-recommends curl bison flex make binutils git perl gcc python3 python-is-python3 bc libssl-dev libelf-dev zip unzip ccache
#使用最新版ccache-ECS(特化优化内核编译缓存,大幅提升二次不同配置编译速度)
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/ccache-x86-64 -O ccache &&
sudo cp -f ./ccache /usr/bin/ccache &&
sudo chmod +x /usr/bin/ccache &&
rm -f ./ccache &
echo "正在克隆源码仓库..."
aria2c -s16 -x16 -k1M https://github.com/cctv18/android_kernel_common_oneplus_sm8650/archive/refs/heads/oneplus/sm8650_b_16.0.0_oneplus12_6.1.141.zip -o common.zip &&
unzip -q common.zip &&
mv "android_kernel_common_oneplus_sm8650-oneplus-sm8650_b_16.0.0_oneplus12_6.1.141" common &&
rm -rf common.zip &
echo "正在克隆llvm-Clang20工具链..." &&
mkdir -p clang20 &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/clang-r547379.zip -o clang.zip &&
unzip -q clang.zip -d clang20 &&
rm -rf clang.zip &
echo "正在克隆构建工具..." &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/build-tools.zip -o build-tools.zip &&
unzip -q build-tools.zip &&
rm -rf build-tools.zip &
wait
echo "所有源码及llvm-Clang20工具链初始化完成!"
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 配置ccache目录
run: |
echo "CCACHE_DIR=$HOME/.ccache_${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}" >> $GITHUB_ENV
echo "CCACHE_MAXSIZE=3G" >> $GITHUB_ENV
echo "当前磁盘空间:"
df -h
echo "当前构建内核版本:${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}"
- name: 载入当前版本内核的 ccache缓存
uses: actions/cache@v5
id: ccache-restore
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
restore-keys: |
${{ env.CCACHE_KEY }}-${{ runner.os }}-
${{ env.CCACHE_KEY }}-
- name: 拉取公共预置 ccache 缓存
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "检查本地缓存状态..."
if [ -d "${{ env.CCACHE_DIR }}" ] && [ "$(ls -A ${{ env.CCACHE_DIR }} 2>/dev/null)" ]; then
echo "检测到本地已成功载入 ccache 缓存,跳过公共 ccache 拉取!"
exit 0
fi
echo "未命中缓存,尝试拉取最新公共 ccache ..."
mkdir -p ${{ env.CCACHE_DIR }}
FILE_NAME="ccache-${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}.tar.zst"
if gh release download -p "$FILE_NAME" -R cctv18/public_ccache; then
echo "成功下载 $FILE_NAME,正在解压..."
tar -I zstd -xf "$FILE_NAME" -C ${{ env.CCACHE_DIR }}
echo "公共 ccache 恢复完成!"
else
echo "公共 ccache 中未找到对应的 ccache 文件,将进行全量全新编译..."
fi
- name: 清除旧 ccache 缓存
if: inputs.ccache_update
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "正在清除仓库中的旧 ccache 缓存..."
if gh cache delete ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }} -R ${{ github.repository }}; then
echo "成功删除旧的 ccache 缓存!"
else
echo "旧缓存不存在或已被清理!"
fi
- name: 初始化并配置ccache
run: |
# 设置ccache环境变量
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="${{ env.CCACHE_MAXSIZE }}"
# 确保ccache目录存在
mkdir -p "$CCACHE_DIR"
# 每次运行都重新配置缓存大小
echo "配置ccache缓存大小为: $CCACHE_MAXSIZE"
ccache -M "$CCACHE_MAXSIZE"
ccache -o compression=true
# 显示初始缓存状态
echo "ccache初始状态:"
ccache -s
# 如果缓存恢复命中,显示详细信息
if [ "${{ steps.ccache-restore.outputs.cache-hit }}" == 'true' ]; then
echo "ccache缓存命中详情:"
ccache -sv
fi
- name: 添加KernelSU
id: ksu_version
run: |
# 进入内核工作目录
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
echo "正在配置SukiSU Ultra..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s builtin
cd ./KernelSU
# 获取当前 Git 提交的短哈希 (8位)
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
export KSU_VERSION=$KSU_VERSION
# 尝试最多 3 次获取 KernelSU API 版本号
for i in {1..3}; do
# 从远程 Makefile 中提取 KSU_API_VERSION
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Makefile" |
# 查找第一个包含版本定义的行
grep -m1 "KSU_VERSION_API :=" |
# 提取等号后的值
awk -F'= ' '{print $2}' |
# 删除所有空白字符
tr -d '[:space:]')
# 如果成功获取到版本号则跳出循环,否则等待 1 秒后重试
[ -n "$KSU_API_VERSION" ] && break || sleep 1
done
# 如果获取失败,使用默认版本号 3.1.7
[ -z "$KSU_API_VERSION" ] && KSU_API_VERSION="3.1.7"
# 将 API 版本号存储到 GitHub 环境变量
echo "KSU_API_VERSION=$KSU_API_VERSION" >> $GITHUB_ENV
# 创建版本定义模板&版本格式函数: 使用获取的提交哈希和固定后缀
# KSU_VERSION_API: API 版本定义
# KSU_VERSION_FULL: 完整版本定义
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
# 清理内核 Makefile 中的旧版本定义
# 删除版本函数
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Makefile
# 删除 API 版本定义
sed -i '/KSU_VERSION_API :=/d' kernel/Makefile
# 删除完整版本定义
sed -i '/KSU_VERSION_FULL :=/d' kernel/Makefile
# 在 REPO_OWNER 行后插入新版本定义
awk -v def="$VERSION_DEFINITIONS" '
# 当找到 REPO_OWNER 行时,插入版本定义并设置标记
/REPO_OWNER :=/ {print; print def; inserted=1; next}
# 打印所有行
1
# 如果未找到插入点,在文件末尾追加
END {if (!inserted) print def}
' kernel/Makefile > kernel/Makefile.tmp && mv kernel/Makefile.tmp kernel/Makefile
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 37185 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
# 验证修改结果
grep -A10 "REPO_OWNER" kernel/Makefile # 检查插入点后的内容
grep "KSU_VERSION_FULL" kernel/Makefile # 确认版本定义存在
echo "SukiSU版本号: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
echo "正在配置ReSukiSU..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/refs/heads/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
cd ./KernelSU
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 30700 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
echo "正在配置KernelSU Next..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在配置原版 KernelSU (tiann/KernelSU)..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过KernelSU配置..."
fi
- name: 应用 KernelSU & SUSFS 补丁
if: inputs.susfs_enable
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} != "none" ]]; then
echo "正在添加susfs补丁..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 -N -F 3 < 69_hide_stuff.patch || true
cd ..
else
echo "已选择无内置KernelSU模式,跳过susfs配置..."
fi
if [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在为原版 KernelSU (tiann/KernelSU)添加补丁..."
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
if: inputs.lz4_enable
run: |
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone --depth=1 https://github.com/$GITHUB_REPOSITORY.git -b $GITHUB_REF_NAME $GITHUB_ACTOR
cp ./$GITHUB_ACTOR/zram_patch/001-lz4.patch ./common/
cp ./$GITHUB_ACTOR/zram_patch/lz4armv8.S ./common/lib
cp ./$GITHUB_ACTOR/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
- name: 应用 lz4kd 补丁
if: inputs.lz4kd_enable
run: |
echo "正在添加lz4kd补丁…"
cd kernel_workspace
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cd common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
- name: 添加SUSFS 配置项
if: inputs.susfs_enable
run: |
cd kernel_workspace
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 添加 KSU & 其他配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [[ "${{ github.event.inputs.susfs_enable }}" == "false" ]]; then
echo "CONFIG_KSU_SUSFS=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_TMPFS_POSIX_ACL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用网络功能增强优化配置
if: inputs.better_net
run: |
cd kernel_workspace
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" != "false" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
if: inputs.ssg_enable
run: |
echo "正在启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用Re-Kernel支持
if: inputs.rekernel_enable
run: |
echo "正在启用Re-Kernel支持(用于与Freezer,NoActive等软件配合使用,提升冻结后台能力)…"
cd kernel_workspace
echo "CONFIG_REKERNEL=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用内核级基带保护
if: inputs.baseband_guard
run: |
echo "正在启用启用内核级基带保护支持…"
cd kernel_workspace
echo "CONFIG_BBG=y" >> ./common/arch/arm64/configs/gki_defconfig
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
WORKDIR="$(pwd)"
export PATH="/usr/lib/ccache:$PATH"
export PATH="$WORKDIR/kernel_workspace/clang20/bin:$PATH"
export PATH="$WORKDIR/kernel_workspace/build-tools/bin:$PATH"
CLANG_DIR="$WORKDIR/kernel_workspace/clang20/bin"
CLANG_VERSION="$($CLANG_DIR/clang --version | head -n 1)"
LLD_VERSION="$($CLANG_DIR/ld.lld --version | head -n 1)"
echo "编译器信息:"
echo "Clang版本: $CLANG_VERSION"
echo "LLD版本: $LLD_VERSION"
pahole_version=$(pahole --version 2>/dev/null | head -n1); [ -z "$pahole_version" ] && echo "pahole版本:未安装" || echo "pahole版本:$pahole_version"
export CCACHE_LOGFILE="${{ github.workspace }}/kernel_workspace/ccache.log"
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="3G"
export CCACHE_IS_KERNEL_COMPILING="true"
echo "sloppiness = file_stat_matches,include_file_ctime,include_file_mtime,pch_defines,file_macro,time_macros" >> "$CCACHE_DIR/ccache.conf"
cd kernel_workspace/common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfakestat.so
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfaketimeMT.so
chmod 777 ./*.so
export FAKESTAT="2025-05-25 12:00:00"
export FAKETIME="@2025-05-25 13:00:00"
echo "FAKESTAT=$FAKESTAT" >> $GITHUB_ENV
echo "FAKETIME=$FAKETIME" >> $GITHUB_ENV
SO_DIR=$(pwd)
export PRELOAD_LIBS="$SO_DIR/libfakestat.so $SO_DIR/libfaketimeMT.so"
#创建 CC (编译器) 包装器
echo '#!/bin/bash' > cc-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> cc-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> cc-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> cc-wrapper
echo 'ccache clang "$@"' >> cc-wrapper
#创建 LD (链接器) 包装器
echo '#!/bin/bash' > ld-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> ld-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> ld-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> ld-wrapper
echo 'ld.lld "$@"' >> ld-wrapper
# 测试时间劫持测试是否正常工作
echo "--- [Wrapper Test] 正在创建通用的时间劫持测试脚本 ---"
echo '#!/bin/bash' > test-wrapper.sh
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> test-wrapper.sh
echo 'export FAKESTAT="'$FAKESTAT'"' >> test-wrapper.sh
echo 'export FAKETIME="'$FAKETIME'"' >> test-wrapper.sh
echo 'echo ">>> Wrapper 内部环境检查完毕."' >> test-wrapper.sh
echo 'exec "$@"' >> test-wrapper.sh # 执行所有传入的参数
chmod +x test-wrapper.sh
echo "--- [Wrapper Test] 正在测试 (date) 命令 ---"
./test-wrapper.sh date
echo "--- [Wrapper Test] 正在测试 (stat) 命令 ---"
./test-wrapper.sh stat ./Makefile
echo "--- [Wrapper Test] 测试完毕 ---"
chmod +x cc-wrapper ld-wrapper
echo "--- 编译前环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译前环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
#在构建内核的同时清除不必要的.NET, Android NDK, Haskell, CodeQL运行库,清理空间且不阻塞后续步骤运行
sudo rm -rf /usr/share/dotnet &
sudo rm -rf /usr/local/lib/android &
sudo rm -rf /opt/ghc &
sudo rm -rf /opt/hostedtoolcache/CodeQL &
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="ccache clang" LD="ld.lld" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig &&
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="$(pwd)/cc-wrapper" LD="$(pwd)/ld-wrapper" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error Image
# 编译后时间劫持二次校验
echo "--- 编译后环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译后环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
echo "内核编译完成!"
echo "ccache状态:"
ccache -s
echo "编译后空间:"
df -h
- name: 保存新的 ccache 缓存
if: inputs.ccache_update || steps.ccache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v5
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
- name: 应用KPM并修补内核
run: |
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "正在应用KPM并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
echo "正在应用KP-N并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
fi
- name: 克隆 AnyKernel3 并打包
id: create_zip
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [[ ! -f ./Image ]]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KSUNext"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KSU"
else
KSU_TYPENAME="none"
fi
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
wget https://raw.githubusercontent.com/$GITHUB_REPOSITORY/refs/heads/$GITHUB_REF_NAME/zram.zip
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ github.event.inputs.kernel_suffix }}.zip
else
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ env.KERNEL_NAME }}.zip
fi
zip -r ../$AK3_NAME ./*
echo "ak3name=$AK3_NAME" >> $GITHUB_OUTPUT
#为AK3添加注释(调试信息)
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
fi
TIME_NOW="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "Author: $GITHUB_ACTOR" > ./ak3.log
echo "Repo: $GITHUB_REPOSITORY" >> ./ak3.log
echo "Branch: $GITHUB_REF_NAME" >> ./ak3.log
echo "Run ID: $GITHUB_RUN_ID" >> ./ak3.log
echo "Commit: $GITHUB_SHA" >> ./ak3.log
echo "Time: $TIME_NOW" >> ./ak3.log
echo "Kernel Ver: $FULL_VERSION" >> ./ak3.log
echo "KSU Branch: ${KSU_TYPENAME}" >> ./ak3.log
echo "KSU Ver: ${KSUVER}" >> ./ak3.log
echo "susfs: ${{ github.event.inputs.susfs_enable }}" >> ./ak3.log
echo "KPM: ${{ github.event.inputs.kpm_enable }}" >> ./ak3.log
echo "LZ4: ${{ github.event.inputs.lz4_enable }}" >> ./ak3.log
echo "LZ4KD: ${{ github.event.inputs.lz4kd_enable }}" >> ./ak3.log
echo "IPset: ${{ github.event.inputs.better_net }}" >> ./ak3.log
echo "BBR&Brutal: ${{ github.event.inputs.bbr_enable }}" >> ./ak3.log
echo "SSG: ${{ github.event.inputs.ssg_enable }}" >> ./ak3.log
echo "Re-Kernel: ${{ github.event.inputs.rekernel_enable }}" >> ./ak3.log
echo "BBG: ${{ github.event.inputs.baseband_guard }}" >> ./ak3.log
zip -z ../$AK3_NAME < ./ak3.log
- name: 上传 Ccache 调试日志
if: always() && inputs.ccache_debug
uses: actions/upload-artifact@v7
with:
name: ccache-debug-log
path: ${{ github.workspace }}/kernel_workspace/ccache.log
archive: true
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v7
with:
path: ${{ github.workspace }}/kernel_workspace/AnyKernel*.zip
archive: false
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v8
with:
name: ${{ needs.build.outputs.ak3name }}
path: ./release_zips
skip-decompress: true
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO-OPlus-Realme-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU Ultra"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KernelSU Next"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KernelSU (Official)"
else
KSU_TYPENAME="无内置KSU"
fi
echo "KSU_TYPENAME=$KSU_TYPENAME" >> $GITHUB_ENV
- name: 创建发布
id: create_release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cat << 'EOF' > release_notes.md
### 📱 欧加真 ${{ env.KSU_TYPENAME }} SM8650 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3通用 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 内核(基于一加12 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 版官方OKI源码)
- KSU分支:${{ env.KSU_TYPENAME }}
- susfs支持:${{ github.event.inputs.susfs_enable }}
- KPM支持 :${{ github.event.inputs.kpm_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- LZ4KD支持:${{ github.event.inputs.lz4kd_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- Re-Kernel支持:${{ github.event.inputs.rekernel_enable }}
- 内核级基带保护支持:${{ github.event.inputs.baseband_guard }}
- ReSukiSU管理器下载:[ReSukiSU_CI](https://github.com/cctv18/ReSukiSU_CI/releases)
- SukiSU Ultra管理器下载:[SukiSU-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra/releases)
- KernelSU Next管理器下载:[KernelSU-Next](https://github.com/KernelSU-Next/KernelSU-Next/releases)
- KSU原版管理器下载:[KernelSU](https://github.com/tiann/KernelSU/releases)
### ⏫️ 更新内容:
- 更新${{ env.KSU_TYPENAME }}至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备;
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启;
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启;
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块;
5. 由于KernelSU上游更新了元模块功能,最新版KSU管理器(包括除KernelSU Next以外的各分支)需要配合元模块(metamodule)才能正常挂载模块。目前的元模块包括[meta overlayfs](https://github.com/KernelSU-Modules-Repo/meta-overlayfs), [mountify](https://github.com/backslashxx/mountify), [meta magicmount](https://github.com/7a72/meta-magic_mount/), [meta magicmount rs](https://github.com/Tools-cx-app/meta-magic_mount/), [hybrid mount](https://github.com/YuzakiKokuban/meta-hybrid_mount)等。若你是第一次使用KSU或刚从旧版KSU管理器升级至新版,请先安装一个元模块,这样其他涉及系统挂载的模块才能正常运行;
6. KernelPatch Next(即KPN)是一个独立于KSU的KPM实现,可以运行在任意KSU/面具环境中(不适用于Apatch),且不能与(Re)SukiSU内置的kpm功能共同使用,使用前请保证你的内核没有内置的kpm实现/修补。
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
EOF
gh release create "${{ env.TAG_HEAD }}-${{ env.TIME }}" \
--repo "${{ github.repository }}" \
--title "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}" \
--notes-file release_notes.md \
release_zips/AnyKernel3_*.zip
================================================
FILE: .github/workflows/fastbuild_6.1.57.yml
================================================
name: 6.1.57 欧加真OKI内核快速构建
env:
TZ: Asia/Shanghai
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
SUB_VERSION: '57'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
CCACHE_KEY: ccache-ecsv2-6.1.57
on:
workflow_dispatch:
inputs:
ksu_type:
description: 'KernelSU分支(ReSukiSU/SukiSU Ultra/KernelSU Next/KSU(原版)/无内置KSU,默认ReSukiSU)'
required: true
type: choice
default: 'resukisu'
options:
- 'resukisu'
- 'sukisu'
- 'ksunext'
- 'ksu'
- 'none'
susfs_enable:
description: '是否开启susfs(用于增强隐藏环境挂载功能; 可能轻微增加耗电,上游更新导致不稳定时或不需要可关闭)'
required: true
type: boolean
default: 'true'
kpm_enable:
description: '是否开启kpm(builtin-使用(re)sukisu内置kpm, kpn-使用独立kpm实现(支持任意KSU/面具环境); 不需要可保持默认关闭)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'builtin'
- 'kpn'
lz4_enable:
description: '是否安装 lz4 1.10.0+zstd 1.5.7 补丁'
required: true
type: boolean
default: 'true'
lz4kd_enable:
description: '是否安装 LZ4KD 补丁(若已开启lz4+zstd补丁则可不开启)'
required: true
type: boolean
default: 'false'
bbr_enable:
description: '是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能拓展配置(用于为ipset及需要iptables等高级网络功能内核支持的程序提供支持)'
required: true
type: boolean
default: 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器支持(提升IO读写性能; 在一加12上可能导致bug)'
required: true
type: boolean
default: 'true'
rekernel_enable:
description: '是否启用Re-Kernel支持(与Freezer/NoActive等软件配合, 提升应用冻结能力)'
required: true
type: boolean
default: 'false'
baseband_guard:
description: '是否开启内核级基带保护(阻止一切对非用户分区的写入,有效防止格机)'
required: true
type: boolean
default: 'true'
ccache_update:
description: '更新ccache缓存(将本次编译生成的ccache缓存覆盖至仓库缓存,在更改编译配置、源码或需要刷新缓存时开启)'
required: true
type: boolean
default: 'false'
ccache_debug:
description: '是否上传 Ccache调试日志(用于调试, 无需要不必开启)'
required: true
type: boolean
default: 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
ak3name: ${{ steps.create_zip.outputs.ak3name }}
permissions:
actions: write
contents: read
steps:
- name: 安装环境依赖+初始化源码仓库及llvm-Clang20工具链
run: |
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
echo "当前仓库:$GITHUB_REPOSITORY"
echo "当前分支:$GITHUB_REF_NAME"
sudo apt-mark hold firefox &&
sudo apt-mark hold libc-bin &&
sudo apt purge man-db &&
sudo rm -rf /var/lib/man-db/auto-update &&
sudo apt update &&
sudo apt-get install -y --no-install-recommends binutils python-is-python3 libssl-dev libelf-dev &
#旧版完整指令:(由于经过验证大部分指令已内置于GitHub Action环境中,故进行精简)
#sudo apt-get install -y --no-install-recommends curl bison flex make binutils git perl gcc python3 python-is-python3 bc libssl-dev libelf-dev zip unzip ccache
#使用最新版ccache-ECS(特化优化内核编译缓存,大幅提升二次不同配置编译速度)
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/ccache-x86-64 -O ccache &&
sudo cp -f ./ccache /usr/bin/ccache &&
sudo chmod +x /usr/bin/ccache &&
rm -f ./ccache &
echo "正在克隆源码仓库..." &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/android_kernel_common_oneplus_sm8650/archive/refs/heads/oneplus/sm8650_u_14.0.0_oneplus12.zip -o common.zip &&
unzip -q common.zip &&
mv "android_kernel_common_oneplus_sm8650-oneplus-sm8650_u_14.0.0_oneplus12" common &&
rm -rf common.zip &
echo "正在克隆llvm-Clang20工具链..." &&
mkdir -p clang20 &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/clang-r547379.zip -o clang.zip &&
unzip -q clang.zip -d clang20 &&
rm -rf clang.zip &
echo "正在克隆构建工具..." &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/build-tools.zip -o build-tools.zip &&
unzip -q build-tools.zip &&
rm -rf build-tools.zip &
wait
echo "所有源码及llvm-Clang20工具链初始化完成!"
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 配置ccache目录
run: |
echo "CCACHE_DIR=$HOME/.ccache_${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}" >> $GITHUB_ENV
echo "CCACHE_MAXSIZE=3G" >> $GITHUB_ENV
echo "当前磁盘空间:"
df -h
echo "当前构建内核版本:${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}"
- name: 载入当前版本内核的 ccache缓存
uses: actions/cache@v5
id: ccache-restore
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
restore-keys: |
${{ env.CCACHE_KEY }}-${{ runner.os }}-
${{ env.CCACHE_KEY }}-
- name: 拉取公共预置 ccache 缓存
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "检查本地缓存状态..."
if [ -d "${{ env.CCACHE_DIR }}" ] && [ "$(ls -A ${{ env.CCACHE_DIR }} 2>/dev/null)" ]; then
echo "检测到本地已成功载入 ccache 缓存,跳过公共 ccache 拉取!"
exit 0
fi
echo "未命中缓存,尝试拉取最新公共 ccache ..."
mkdir -p ${{ env.CCACHE_DIR }}
FILE_NAME="ccache-${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}.tar.zst"
if gh release download -p "$FILE_NAME" -R cctv18/public_ccache; then
echo "成功下载 $FILE_NAME,正在解压..."
tar -I zstd -xf "$FILE_NAME" -C ${{ env.CCACHE_DIR }}
echo "公共 ccache 恢复完成!"
else
echo "公共 ccache 中未找到对应的 ccache 文件,将进行全量全新编译..."
fi
- name: 清除旧 ccache 缓存
if: inputs.ccache_update
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "正在清除仓库中的旧 ccache 缓存..."
if gh cache delete ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }} -R ${{ github.repository }}; then
echo "成功删除旧的 ccache 缓存!"
else
echo "旧缓存不存在或已被清理!"
fi
- name: 初始化并配置ccache
run: |
# 设置ccache环境变量
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="${{ env.CCACHE_MAXSIZE }}"
# 确保ccache目录存在
mkdir -p "$CCACHE_DIR"
# 每次运行都重新配置缓存大小
echo "配置ccache缓存大小为: $CCACHE_MAXSIZE"
ccache -M "$CCACHE_MAXSIZE"
ccache -o compression=true
# 显示初始缓存状态
echo "ccache初始状态:"
ccache -s
# 如果缓存恢复命中,显示详细信息
if [ "${{ steps.ccache-restore.outputs.cache-hit }}" == 'true' ]; then
echo "ccache缓存命中详情:"
ccache -sv
fi
- name: 添加KernelSU
id: ksu_version
run: |
# 进入内核工作目录
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
echo "正在配置SukiSU Ultra..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s builtin
cd ./KernelSU
# 获取当前 Git 提交的短哈希 (8位)
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
export KSU_VERSION=$KSU_VERSION
# 尝试最多 3 次获取 KernelSU API 版本号
for i in {1..3}; do
# 从远程 Makefile 中提取 KSU_API_VERSION
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Makefile" |
# 查找第一个包含版本定义的行
grep -m1 "KSU_VERSION_API :=" |
# 提取等号后的值
awk -F'= ' '{print $2}' |
# 删除所有空白字符
tr -d '[:space:]')
# 如果成功获取到版本号则跳出循环,否则等待 1 秒后重试
[ -n "$KSU_API_VERSION" ] && break || sleep 1
done
# 如果获取失败,使用默认版本号 3.1.7
[ -z "$KSU_API_VERSION" ] && KSU_API_VERSION="3.1.7"
# 将 API 版本号存储到 GitHub 环境变量
echo "KSU_API_VERSION=$KSU_API_VERSION" >> $GITHUB_ENV
# 创建版本定义模板&版本格式函数: 使用获取的提交哈希和固定后缀
# KSU_VERSION_API: API 版本定义
# KSU_VERSION_FULL: 完整版本定义
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
# 清理内核 Makefile 中的旧版本定义
# 删除版本函数
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Makefile
# 删除 API 版本定义
sed -i '/KSU_VERSION_API :=/d' kernel/Makefile
# 删除完整版本定义
sed -i '/KSU_VERSION_FULL :=/d' kernel/Makefile
# 在 REPO_OWNER 行后插入新版本定义
awk -v def="$VERSION_DEFINITIONS" '
# 当找到 REPO_OWNER 行时,插入版本定义并设置标记
/REPO_OWNER :=/ {print; print def; inserted=1; next}
# 打印所有行
1
# 如果未找到插入点,在文件末尾追加
END {if (!inserted) print def}
' kernel/Makefile > kernel/Makefile.tmp && mv kernel/Makefile.tmp kernel/Makefile
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 37185 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
# 验证修改结果
grep -A10 "REPO_OWNER" kernel/Makefile # 检查插入点后的内容
grep "KSU_VERSION_FULL" kernel/Makefile # 确认版本定义存在
echo "SukiSU版本号: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
echo "正在配置ReSukiSU..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/refs/heads/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
cd ./KernelSU
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 30700 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
echo "正在配置KernelSU Next..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在配置原版 KernelSU (tiann/KernelSU)..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过KernelSU配置..."
fi
- name: 应用 KernelSU & SUSFS 补丁
if: inputs.susfs_enable
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} != "none" ]]; then
echo "正在添加susfs补丁..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 -N -F 3 < 69_hide_stuff.patch || true
cd ..
else
echo "已选择无内置KernelSU模式,跳过susfs配置..."
fi
if [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在为原版 KernelSU (tiann/KernelSU)添加补丁..."
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
if: inputs.lz4_enable
run: |
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone --depth=1 https://github.com/$GITHUB_REPOSITORY.git -b $GITHUB_REF_NAME $GITHUB_ACTOR
cp ./$GITHUB_ACTOR/zram_patch/001-lz4.patch ./common/
cp ./$GITHUB_ACTOR/zram_patch/lz4armv8.S ./common/lib
cp ./$GITHUB_ACTOR/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
- name: 应用 lz4kd 补丁
if: inputs.lz4kd_enable
run: |
echo "正在添加lz4kd补丁…"
cd kernel_workspace
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cd common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
- name: 添加SUSFS 配置项
if: inputs.susfs_enable
run: |
cd kernel_workspace
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 添加 KSU & 其他配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [[ "${{ github.event.inputs.susfs_enable }}" == "false" ]]; then
echo "CONFIG_KSU_SUSFS=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_TMPFS_POSIX_ACL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用网络功能增强优化配置
if: inputs.better_net
run: |
cd kernel_workspace
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" != "false" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
if: inputs.ssg_enable
run: |
echo "正在启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用Re-Kernel支持
if: inputs.rekernel_enable
run: |
echo "正在启用Re-Kernel支持(用于与Freezer,NoActive等软件配合使用,提升冻结后台能力)…"
cd kernel_workspace
echo "CONFIG_REKERNEL=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用内核级基带保护
if: inputs.baseband_guard
run: |
echo "正在启用启用内核级基带保护支持…"
cd kernel_workspace
echo "CONFIG_BBG=y" >> ./common/arch/arm64/configs/gki_defconfig
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
WORKDIR="$(pwd)"
export PATH="/usr/lib/ccache:$PATH"
export PATH="$WORKDIR/kernel_workspace/clang20/bin:$PATH"
export PATH="$WORKDIR/kernel_workspace/build-tools/bin:$PATH"
CLANG_DIR="$WORKDIR/kernel_workspace/clang20/bin"
CLANG_VERSION="$($CLANG_DIR/clang --version | head -n 1)"
LLD_VERSION="$($CLANG_DIR/ld.lld --version | head -n 1)"
echo "编译器信息:"
echo "Clang版本: $CLANG_VERSION"
echo "LLD版本: $LLD_VERSION"
pahole_version=$(pahole --version 2>/dev/null | head -n1); [ -z "$pahole_version" ] && echo "pahole版本:未安装" || echo "pahole版本:$pahole_version"
export CCACHE_LOGFILE="${{ github.workspace }}/kernel_workspace/ccache.log"
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="3G"
export CCACHE_IS_KERNEL_COMPILING="true"
echo "sloppiness = file_stat_matches,include_file_ctime,include_file_mtime,pch_defines,file_macro,time_macros" >> "$CCACHE_DIR/ccache.conf"
cd kernel_workspace/common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfakestat.so
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfaketimeMT.so
chmod 777 ./*.so
export FAKESTAT="2025-05-25 12:00:00"
export FAKETIME="@2025-05-25 13:00:00"
echo "FAKESTAT=$FAKESTAT" >> $GITHUB_ENV
echo "FAKETIME=$FAKETIME" >> $GITHUB_ENV
SO_DIR=$(pwd)
export PRELOAD_LIBS="$SO_DIR/libfakestat.so $SO_DIR/libfaketimeMT.so"
#创建 CC (编译器) 包装器
echo '#!/bin/bash' > cc-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> cc-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> cc-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> cc-wrapper
echo 'ccache clang "$@"' >> cc-wrapper
#创建 LD (链接器) 包装器
echo '#!/bin/bash' > ld-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> ld-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> ld-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> ld-wrapper
echo 'ld.lld "$@"' >> ld-wrapper
# 测试时间劫持测试是否正常工作
echo "--- [Wrapper Test] 正在创建通用的时间劫持测试脚本 ---"
echo '#!/bin/bash' > test-wrapper.sh
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> test-wrapper.sh
echo 'export FAKESTAT="'$FAKESTAT'"' >> test-wrapper.sh
echo 'export FAKETIME="'$FAKETIME'"' >> test-wrapper.sh
echo 'echo ">>> Wrapper 内部环境检查完毕."' >> test-wrapper.sh
echo 'exec "$@"' >> test-wrapper.sh # 执行所有传入的参数
chmod +x test-wrapper.sh
echo "--- [Wrapper Test] 正在测试 (date) 命令 ---"
./test-wrapper.sh date
echo "--- [Wrapper Test] 正在测试 (stat) 命令 ---"
./test-wrapper.sh stat ./Makefile
echo "--- [Wrapper Test] 测试完毕 ---"
chmod +x cc-wrapper ld-wrapper
echo "--- 编译前环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译前环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
#在构建内核的同时清除不必要的.NET, Android NDK, Haskell, CodeQL运行库,清理空间且不阻塞后续步骤运行
sudo rm -rf /usr/share/dotnet &
sudo rm -rf /usr/local/lib/android &
sudo rm -rf /opt/ghc &
sudo rm -rf /opt/hostedtoolcache/CodeQL &
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="ccache clang" LD="ld.lld" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig &&
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="$(pwd)/cc-wrapper" LD="$(pwd)/ld-wrapper" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error Image
# 编译后时间劫持二次校验
echo "--- 编译后环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译后环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
echo "内核编译完成!"
echo "ccache状态:"
ccache -s
echo "编译后空间:"
df -h
- name: 保存新的 ccache 缓存
if: inputs.ccache_update || steps.ccache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v5
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
- name: 应用KPM并修补内核
run: |
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "正在应用KPM并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
echo "正在应用KP-N并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
fi
- name: 克隆 AnyKernel3 并打包
id: create_zip
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [[ ! -f ./Image ]]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KSUNext"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KSU"
else
KSU_TYPENAME="none"
fi
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
wget https://raw.githubusercontent.com/$GITHUB_REPOSITORY/refs/heads/$GITHUB_REF_NAME/zram.zip
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ github.event.inputs.kernel_suffix }}.zip
else
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ env.KERNEL_NAME }}.zip
fi
zip -r ../$AK3_NAME ./*
echo "ak3name=$AK3_NAME" >> $GITHUB_OUTPUT
#为AK3添加注释(调试信息)
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
fi
TIME_NOW="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "Author: $GITHUB_ACTOR" > ./ak3.log
echo "Repo: $GITHUB_REPOSITORY" >> ./ak3.log
echo "Branch: $GITHUB_REF_NAME" >> ./ak3.log
echo "Run ID: $GITHUB_RUN_ID" >> ./ak3.log
echo "Commit: $GITHUB_SHA" >> ./ak3.log
echo "Time: $TIME_NOW" >> ./ak3.log
echo "Kernel Ver: $FULL_VERSION" >> ./ak3.log
echo "KSU Branch: ${KSU_TYPENAME}" >> ./ak3.log
echo "KSU Ver: ${KSUVER}" >> ./ak3.log
echo "susfs: ${{ github.event.inputs.susfs_enable }}" >> ./ak3.log
echo "KPM: ${{ github.event.inputs.kpm_enable }}" >> ./ak3.log
echo "LZ4: ${{ github.event.inputs.lz4_enable }}" >> ./ak3.log
echo "LZ4KD: ${{ github.event.inputs.lz4kd_enable }}" >> ./ak3.log
echo "IPset: ${{ github.event.inputs.better_net }}" >> ./ak3.log
echo "BBR&Brutal: ${{ github.event.inputs.bbr_enable }}" >> ./ak3.log
echo "SSG: ${{ github.event.inputs.ssg_enable }}" >> ./ak3.log
echo "Re-Kernel: ${{ github.event.inputs.rekernel_enable }}" >> ./ak3.log
echo "BBG: ${{ github.event.inputs.baseband_guard }}" >> ./ak3.log
zip -z ../$AK3_NAME < ./ak3.log
- name: 上传 Ccache 调试日志
if: always() && inputs.ccache_debug
uses: actions/upload-artifact@v7
with:
name: ccache-debug-log
path: ${{ github.workspace }}/kernel_workspace/ccache.log
archive: true
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v7
with:
path: ${{ github.workspace }}/kernel_workspace/AnyKernel*.zip
archive: false
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v8
with:
name: ${{ needs.build.outputs.ak3name }}
path: ./release_zips
skip-decompress: true
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO-OPlus-Realme-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU Ultra"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KernelSU Next"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KernelSU (Official)"
else
KSU_TYPENAME="无内置KSU"
fi
echo "KSU_TYPENAME=$KSU_TYPENAME" >> $GITHUB_ENV
- name: 创建发布
id: create_release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cat << 'EOF' > release_notes.md
### 📱 欧加真 ${{ env.KSU_TYPENAME }} SM8650 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3通用 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 内核(基于一加12 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 版官方OKI源码)
- KSU分支:${{ env.KSU_TYPENAME }}
- susfs支持:${{ github.event.inputs.susfs_enable }}
- KPM支持 :${{ github.event.inputs.kpm_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- LZ4KD支持:${{ github.event.inputs.lz4kd_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- Re-Kernel支持:${{ github.event.inputs.rekernel_enable }}
- 内核级基带保护支持:${{ github.event.inputs.baseband_guard }}
- ReSukiSU管理器下载:[ReSukiSU_CI](https://github.com/cctv18/ReSukiSU_CI/releases)
- SukiSU Ultra管理器下载:[SukiSU-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra/releases)
- KernelSU Next管理器下载:[KernelSU-Next](https://github.com/KernelSU-Next/KernelSU-Next/releases)
- KSU原版管理器下载:[KernelSU](https://github.com/tiann/KernelSU/releases)
### ⏫️ 更新内容:
- 更新${{ env.KSU_TYPENAME }}至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备;
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启;
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启;
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块;
5. 由于KernelSU上游更新了元模块功能,最新版KSU管理器(包括除KernelSU Next以外的各分支)需要配合元模块(metamodule)才能正常挂载模块。目前的元模块包括[meta overlayfs](https://github.com/KernelSU-Modules-Repo/meta-overlayfs), [mountify](https://github.com/backslashxx/mountify), [meta magicmount](https://github.com/7a72/meta-magic_mount/), [meta magicmount rs](https://github.com/Tools-cx-app/meta-magic_mount/), [hybrid mount](https://github.com/YuzakiKokuban/meta-hybrid_mount)等。若你是第一次使用KSU或刚从旧版KSU管理器升级至新版,请先安装一个元模块,这样其他涉及系统挂载的模块才能正常运行;
6. KernelPatch Next(即KPN)是一个独立于KSU的KPM实现,可以运行在任意KSU/面具环境中(不适用于Apatch),且不能与(Re)SukiSU内置的kpm功能共同使用,使用前请保证你的内核没有内置的kpm实现/修补。
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
EOF
gh release create "${{ env.TAG_HEAD }}-${{ env.TIME }}" \
--repo "${{ github.repository }}" \
--title "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}" \
--notes-file release_notes.md \
release_zips/AnyKernel3_*.zip
================================================
FILE: .github/workflows/fastbuild_6.1.75.yml
================================================
name: 6.1.75 欧加真OKI内核快速构建
env:
TZ: Asia/Shanghai
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
SUB_VERSION: '75'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
CCACHE_KEY: ccache-ecsv2-6.1.75
on:
workflow_dispatch:
inputs:
ksu_type:
description: 'KernelSU分支(ReSukiSU/SukiSU Ultra/KernelSU Next/KSU(原版)/无内置KSU,默认ReSukiSU)'
required: true
type: choice
default: 'resukisu'
options:
- 'resukisu'
- 'sukisu'
- 'ksunext'
- 'ksu'
- 'none'
susfs_enable:
description: '是否开启susfs(用于增强隐藏环境挂载功能; 可能轻微增加耗电,上游更新导致不稳定时或不需要可关闭)'
required: true
type: boolean
default: 'true'
kpm_enable:
description: '是否开启kpm(builtin-使用(re)sukisu内置kpm, kpn-使用独立kpm实现(支持任意KSU/面具环境); 不需要可保持默认关闭)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'builtin'
- 'kpn'
lz4_enable:
description: '是否安装 lz4 1.10.0+zstd 1.5.7 补丁'
required: true
type: boolean
default: 'true'
lz4kd_enable:
description: '是否安装 LZ4KD 补丁(若已开启lz4+zstd补丁则可不开启)'
required: true
type: boolean
default: 'false'
bbr_enable:
description: '是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)'
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能拓展配置(用于为ipset及需要iptables等高级网络功能内核支持的程序提供支持)'
required: true
type: boolean
default: 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器支持(提升IO读写性能; 在一加12上可能导致bug)'
required: true
type: boolean
default: 'true'
rekernel_enable:
description: '是否启用Re-Kernel支持(与Freezer/NoActive等软件配合, 提升应用冻结能力)'
required: true
type: boolean
default: 'false'
baseband_guard:
description: '是否开启内核级基带保护(阻止一切对非用户分区的写入,有效防止格机)'
required: true
type: boolean
default: 'true'
ccache_update:
description: '更新ccache缓存(将本次编译生成的ccache缓存覆盖至仓库缓存,在更改编译配置、源码或需要刷新缓存时开启)'
required: true
type: boolean
default: 'false'
ccache_debug:
description: '是否上传 Ccache调试日志(用于调试, 无需要不必开启)'
required: true
type: boolean
default: 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
ak3name: ${{ steps.create_zip.outputs.ak3name }}
permissions:
actions: write
contents: read
steps:
- name: 安装环境依赖+初始化源码仓库及llvm-Clang20工具链
run: |
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
echo "当前仓库:$GITHUB_REPOSITORY"
echo "当前分支:$GITHUB_REF_NAME"
sudo apt-mark hold firefox &&
sudo apt-mark hold libc-bin &&
sudo apt purge man-db &&
sudo rm -rf /var/lib/man-db/auto-update &&
sudo apt update &&
sudo apt-get install -y --no-install-recommends binutils python-is-python3 libssl-dev libelf-dev &
#旧版完整指令:(由于经过验证大部分指令已内置于GitHub Action环境中,故进行精简)
#sudo apt-get install -y --no-install-recommends curl bison flex make binutils git perl gcc python3 python-is-python3 bc libssl-dev libelf-dev zip unzip ccache
#使用最新版ccache-ECS(特化优化内核编译缓存,大幅提升二次不同配置编译速度)
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/ccache-x86-64 -O ccache &&
sudo cp -f ./ccache /usr/bin/ccache &&
sudo chmod +x /usr/bin/ccache &&
rm -f ./ccache &
echo "正在克隆源码仓库..."
aria2c -s16 -x16 -k1M https://github.com/cctv18/android_kernel_common_oneplus_sm8650/archive/refs/heads/oneplus/sm8650_v_15.0.0_oneplus12.zip -o common.zip &&
unzip -q common.zip &&
mv "android_kernel_common_oneplus_sm8650-oneplus-sm8650_v_15.0.0_oneplus12" common &&
rm -rf common.zip &
echo "正在克隆llvm-Clang20工具链..." &&
mkdir -p clang20 &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/clang-r547379.zip -o clang.zip &&
unzip -q clang.zip -d clang20 &&
rm -rf clang.zip &
echo "正在克隆构建工具..." &&
aria2c -s16 -x16 -k1M https://github.com/cctv18/oneplus_sm8650_toolchain/releases/download/LLVM-Clang20-r547379/build-tools.zip -o build-tools.zip &&
unzip -q build-tools.zip &&
rm -rf build-tools.zip &
wait
echo "所有源码及llvm-Clang20工具链初始化完成!"
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 配置ccache目录
run: |
echo "CCACHE_DIR=$HOME/.ccache_${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}" >> $GITHUB_ENV
echo "CCACHE_MAXSIZE=3G" >> $GITHUB_ENV
echo "当前磁盘空间:"
df -h
echo "当前构建内核版本:${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}"
- name: 载入当前版本内核的 ccache缓存
uses: actions/cache@v5
id: ccache-restore
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
restore-keys: |
${{ env.CCACHE_KEY }}-${{ runner.os }}-
${{ env.CCACHE_KEY }}-
- name: 拉取公共预置 ccache 缓存
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "检查本地缓存状态..."
if [ -d "${{ env.CCACHE_DIR }}" ] && [ "$(ls -A ${{ env.CCACHE_DIR }} 2>/dev/null)" ]; then
echo "检测到本地已成功载入 ccache 缓存,跳过公共 ccache 拉取!"
exit 0
fi
echo "未命中缓存,尝试拉取最新公共 ccache ..."
mkdir -p ${{ env.CCACHE_DIR }}
FILE_NAME="ccache-${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}.tar.zst"
if gh release download -p "$FILE_NAME" -R cctv18/public_ccache; then
echo "成功下载 $FILE_NAME,正在解压..."
tar -I zstd -xf "$FILE_NAME" -C ${{ env.CCACHE_DIR }}
echo "公共 ccache 恢复完成!"
else
echo "公共 ccache 中未找到对应的 ccache 文件,将进行全量全新编译..."
fi
- name: 清除旧 ccache 缓存
if: inputs.ccache_update
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "正在清除仓库中的旧 ccache 缓存..."
if gh cache delete ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }} -R ${{ github.repository }}; then
echo "成功删除旧的 ccache 缓存!"
else
echo "旧缓存不存在或已被清理!"
fi
- name: 初始化并配置ccache
run: |
# 设置ccache环境变量
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="${{ env.CCACHE_MAXSIZE }}"
# 确保ccache目录存在
mkdir -p "$CCACHE_DIR"
# 每次运行都重新配置缓存大小
echo "配置ccache缓存大小为: $CCACHE_MAXSIZE"
ccache -M "$CCACHE_MAXSIZE"
ccache -o compression=true
# 显示初始缓存状态
echo "ccache初始状态:"
ccache -s
# 如果缓存恢复命中,显示详细信息
if [ "${{ steps.ccache-restore.outputs.cache-hit }}" == 'true' ]; then
echo "ccache缓存命中详情:"
ccache -sv
fi
- name: 添加KernelSU
id: ksu_version
run: |
# 进入内核工作目录
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
echo "正在配置SukiSU Ultra..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s builtin
cd ./KernelSU
# 获取当前 Git 提交的短哈希 (8位)
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
export KSU_VERSION=$KSU_VERSION
# 尝试最多 3 次获取 KernelSU API 版本号
for i in {1..3}; do
# 从远程 Makefile 中提取 KSU_API_VERSION
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Makefile" |
# 查找第一个包含版本定义的行
grep -m1 "KSU_VERSION_API :=" |
# 提取等号后的值
awk -F'= ' '{print $2}' |
# 删除所有空白字符
tr -d '[:space:]')
# 如果成功获取到版本号则跳出循环,否则等待 1 秒后重试
[ -n "$KSU_API_VERSION" ] && break || sleep 1
done
# 如果获取失败,使用默认版本号 3.1.7
[ -z "$KSU_API_VERSION" ] && KSU_API_VERSION="3.1.7"
# 将 API 版本号存储到 GitHub 环境变量
echo "KSU_API_VERSION=$KSU_API_VERSION" >> $GITHUB_ENV
# 创建版本定义模板&版本格式函数: 使用获取的提交哈希和固定后缀
# KSU_VERSION_API: API 版本定义
# KSU_VERSION_FULL: 完整版本定义
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
# 清理内核 Makefile 中的旧版本定义
# 删除版本函数
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Makefile
# 删除 API 版本定义
sed -i '/KSU_VERSION_API :=/d' kernel/Makefile
# 删除完整版本定义
sed -i '/KSU_VERSION_FULL :=/d' kernel/Makefile
# 在 REPO_OWNER 行后插入新版本定义
awk -v def="$VERSION_DEFINITIONS" '
# 当找到 REPO_OWNER 行时,插入版本定义并设置标记
/REPO_OWNER :=/ {print; print def; inserted=1; next}
# 打印所有行
1
# 如果未找到插入点,在文件末尾追加
END {if (!inserted) print def}
' kernel/Makefile > kernel/Makefile.tmp && mv kernel/Makefile.tmp kernel/Makefile
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 37185 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
# 验证修改结果
grep -A10 "REPO_OWNER" kernel/Makefile # 检查插入点后的内容
grep "KSU_VERSION_FULL" kernel/Makefile # 确认版本定义存在
echo "SukiSU版本号: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
echo "正在配置ReSukiSU..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/refs/heads/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
cd ./KernelSU
# 生成自定义版本号(基于提交计数), 失败时使用 114514
KSU_VERSION=$(expr $(git rev-list --count main) + 30700 2>/dev/null || echo 114514)
# 存储版本号到 GitHub 环境变量
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
echo "正在配置KernelSU Next..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在配置原版 KernelSU (tiann/KernelSU)..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过KernelSU配置..."
fi
- name: 应用 KernelSU & SUSFS 补丁
if: inputs.susfs_enable
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.ksu_type }} != "none" ]]; then
echo "正在添加susfs补丁..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 -N -F 3 < 69_hide_stuff.patch || true
cd ..
else
echo "已选择无内置KernelSU模式,跳过susfs配置..."
fi
if [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
echo "正在为原版 KernelSU (tiann/KernelSU)添加补丁..."
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
if: inputs.lz4_enable
run: |
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone --depth=1 https://github.com/$GITHUB_REPOSITORY.git -b $GITHUB_REF_NAME $GITHUB_ACTOR
cp ./$GITHUB_ACTOR/zram_patch/001-lz4.patch ./common/
cp ./$GITHUB_ACTOR/zram_patch/lz4armv8.S ./common/lib
cp ./$GITHUB_ACTOR/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
- name: 应用 lz4kd 补丁
if: inputs.lz4kd_enable
run: |
echo "正在添加lz4kd补丁…"
cd kernel_workspace
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cd common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
- name: 添加SUSFS 配置项
if: inputs.susfs_enable
run: |
cd kernel_workspace
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 添加 KSU & 其他配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [[ "${{ github.event.inputs.susfs_enable }}" == "false" ]]; then
echo "CONFIG_KSU_SUSFS=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_TMPFS_POSIX_ACL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用网络功能增强优化配置
if: inputs.better_net
run: |
cd kernel_workspace
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" != "false" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
if: inputs.ssg_enable
run: |
echo "正在启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用Re-Kernel支持
if: inputs.rekernel_enable
run: |
echo "正在启用Re-Kernel支持(用于与Freezer,NoActive等软件配合使用,提升冻结后台能力)…"
cd kernel_workspace
echo "CONFIG_REKERNEL=y" >> ./common/arch/arm64/configs/gki_defconfig
- name: 启用内核级基带保护
if: inputs.baseband_guard
run: |
echo "正在启用启用内核级基带保护支持…"
cd kernel_workspace
echo "CONFIG_BBG=y" >> ./common/arch/arm64/configs/gki_defconfig
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
WORKDIR="$(pwd)"
export PATH="/usr/lib/ccache:$PATH"
export PATH="$WORKDIR/kernel_workspace/clang20/bin:$PATH"
export PATH="$WORKDIR/kernel_workspace/build-tools/bin:$PATH"
CLANG_DIR="$WORKDIR/kernel_workspace/clang20/bin"
CLANG_VERSION="$($CLANG_DIR/clang --version | head -n 1)"
LLD_VERSION="$($CLANG_DIR/ld.lld --version | head -n 1)"
echo "编译器信息:"
echo "Clang版本: $CLANG_VERSION"
echo "LLD版本: $LLD_VERSION"
pahole_version=$(pahole --version 2>/dev/null | head -n1); [ -z "$pahole_version" ] && echo "pahole版本:未安装" || echo "pahole版本:$pahole_version"
export CCACHE_LOGFILE="${{ github.workspace }}/kernel_workspace/ccache.log"
export CCACHE_COMPILERCHECK="none"
export CCACHE_BASEDIR="${{ github.workspace }}"
export CCACHE_NOHASHDIR="true"
export CCACHE_NOHARDLINK="true"
export CCACHE_DIR="${{ env.CCACHE_DIR }}"
export CCACHE_MAXSIZE="3G"
export CCACHE_IS_KERNEL_COMPILING="true"
echo "sloppiness = file_stat_matches,include_file_ctime,include_file_mtime,pch_defines,file_macro,time_macros" >> "$CCACHE_DIR/ccache.conf"
cd kernel_workspace/common
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfakestat.so
wget https://github.com/$GITHUB_REPOSITORY/raw/refs/heads/$GITHUB_REF_NAME/lib/libfaketimeMT.so
chmod 777 ./*.so
export FAKESTAT="2025-05-25 12:00:00"
export FAKETIME="@2025-05-25 13:00:00"
echo "FAKESTAT=$FAKESTAT" >> $GITHUB_ENV
echo "FAKETIME=$FAKETIME" >> $GITHUB_ENV
SO_DIR=$(pwd)
export PRELOAD_LIBS="$SO_DIR/libfakestat.so $SO_DIR/libfaketimeMT.so"
#创建 CC (编译器) 包装器
echo '#!/bin/bash' > cc-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> cc-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> cc-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> cc-wrapper
echo 'ccache clang "$@"' >> cc-wrapper
#创建 LD (链接器) 包装器
echo '#!/bin/bash' > ld-wrapper
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> ld-wrapper
echo 'export FAKESTAT="'$FAKESTAT'"' >> ld-wrapper
echo 'export FAKETIME="'$FAKETIME'"' >> ld-wrapper
echo 'ld.lld "$@"' >> ld-wrapper
# 测试时间劫持测试是否正常工作
echo "--- [Wrapper Test] 正在创建通用的时间劫持测试脚本 ---"
echo '#!/bin/bash' > test-wrapper.sh
echo 'export LD_PRELOAD="'$PRELOAD_LIBS'"' >> test-wrapper.sh
echo 'export FAKESTAT="'$FAKESTAT'"' >> test-wrapper.sh
echo 'export FAKETIME="'$FAKETIME'"' >> test-wrapper.sh
echo 'echo ">>> Wrapper 内部环境检查完毕."' >> test-wrapper.sh
echo 'exec "$@"' >> test-wrapper.sh # 执行所有传入的参数
chmod +x test-wrapper.sh
echo "--- [Wrapper Test] 正在测试 (date) 命令 ---"
./test-wrapper.sh date
echo "--- [Wrapper Test] 正在测试 (stat) 命令 ---"
./test-wrapper.sh stat ./Makefile
echo "--- [Wrapper Test] 测试完毕 ---"
chmod +x cc-wrapper ld-wrapper
echo "--- 编译前环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译前环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
#在构建内核的同时清除不必要的.NET, Android NDK, Haskell, CodeQL运行库,清理空间且不阻塞后续步骤运行
sudo rm -rf /usr/share/dotnet &
sudo rm -rf /usr/local/lib/android &
sudo rm -rf /opt/ghc &
sudo rm -rf /opt/hostedtoolcache/CodeQL &
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="ccache clang" LD="ld.lld" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig &&
make -j$(nproc --all) LLVM=1 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC="$(pwd)/cc-wrapper" LD="$(pwd)/ld-wrapper" HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error Image
# 编译后时间劫持二次校验
echo "--- 编译后环境时间: $(LD_PRELOAD=$PRELOAD_LIBS date) ---"
echo "--- 编译后环境文件时间戳: ---"
LD_PRELOAD=$PRELOAD_LIBS stat ./Makefile
echo "内核编译完成!"
echo "ccache状态:"
ccache -s
echo "编译后空间:"
df -h
- name: 保存新的 ccache 缓存
if: inputs.ccache_update || steps.ccache-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v5
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ env.CCACHE_KEY }}-${{ runner.os }}-${{ github.ref_name }}
- name: 应用KPM并修补内核
run: |
if [[ ${{ github.event.inputs.kpm_enable }} == 'builtin' && ( "${{ github.event.inputs.ksu_type }}" == "sukisu" || "${{ github.event.inputs.ksu_type }}" == "resukisu" ) ]]; then
echo "正在应用KPM并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
echo "正在应用KP-N并修补内核..."
cd kernel_workspace/common/out/arch/arm64/boot
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
fi
- name: 克隆 AnyKernel3 并打包
id: create_zip
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [[ ! -f ./Image ]]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KSUNext"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KSU"
else
KSU_TYPENAME="none"
fi
if [[ ${{ github.event.inputs.lz4kd_enable }} == 'true' ]]; then
wget https://raw.githubusercontent.com/$GITHUB_REPOSITORY/refs/heads/$GITHUB_REF_NAME/zram.zip
fi
if [[ ${{ github.event.inputs.kpm_enable }} == 'kpn' ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ github.event.inputs.kernel_suffix }}.zip
else
AK3_NAME=AnyKernel3_${KSU_TYPENAME}_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_${{ env.KERNEL_NAME }}.zip
fi
zip -r ../$AK3_NAME ./*
echo "ak3name=$AK3_NAME" >> $GITHUB_OUTPUT
#为AK3添加注释(调试信息)
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
fi
TIME_NOW="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "Author: $GITHUB_ACTOR" > ./ak3.log
echo "Repo: $GITHUB_REPOSITORY" >> ./ak3.log
echo "Branch: $GITHUB_REF_NAME" >> ./ak3.log
echo "Run ID: $GITHUB_RUN_ID" >> ./ak3.log
echo "Commit: $GITHUB_SHA" >> ./ak3.log
echo "Time: $TIME_NOW" >> ./ak3.log
echo "Kernel Ver: $FULL_VERSION" >> ./ak3.log
echo "KSU Branch: ${KSU_TYPENAME}" >> ./ak3.log
echo "KSU Ver: ${KSUVER}" >> ./ak3.log
echo "susfs: ${{ github.event.inputs.susfs_enable }}" >> ./ak3.log
echo "KPM: ${{ github.event.inputs.kpm_enable }}" >> ./ak3.log
echo "LZ4: ${{ github.event.inputs.lz4_enable }}" >> ./ak3.log
echo "LZ4KD: ${{ github.event.inputs.lz4kd_enable }}" >> ./ak3.log
echo "IPset: ${{ github.event.inputs.better_net }}" >> ./ak3.log
echo "BBR&Brutal: ${{ github.event.inputs.bbr_enable }}" >> ./ak3.log
echo "SSG: ${{ github.event.inputs.ssg_enable }}" >> ./ak3.log
echo "Re-Kernel: ${{ github.event.inputs.rekernel_enable }}" >> ./ak3.log
echo "BBG: ${{ github.event.inputs.baseband_guard }}" >> ./ak3.log
zip -z ../$AK3_NAME < ./ak3.log
- name: 上传 Ccache 调试日志
if: always() && inputs.ccache_debug
uses: actions/upload-artifact@v7
with:
name: ccache-debug-log
path: ${{ github.workspace }}/kernel_workspace/ccache.log
archive: true
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v7
with:
path: ${{ github.workspace }}/kernel_workspace/AnyKernel*.zip
archive: false
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v8
with:
name: ${{ needs.build.outputs.ak3name }}
path: ./release_zips
skip-decompress: true
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ github.event.inputs.kernel_suffix }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }}-${{ env.KERNEL_NAME }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO-OPlus-Realme-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
if [[ ${{ github.event.inputs.ksu_type }} == "sukisu" ]]; then
KSU_TYPENAME="SukiSU Ultra"
elif [[ ${{ github.event.inputs.ksu_type }} == "resukisu" ]]; then
KSU_TYPENAME="ReSukiSU"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksunext" ]]; then
KSU_TYPENAME="KernelSU Next"
elif [[ ${{ github.event.inputs.ksu_type }} == "ksu" ]]; then
KSU_TYPENAME="KernelSU (Official)"
else
KSU_TYPENAME="无内置KSU"
fi
echo "KSU_TYPENAME=$KSU_TYPENAME" >> $GITHUB_ENV
- name: 创建发布
id: create_release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cat << 'EOF' > release_notes.md
### 📱 欧加真 ${{ env.KSU_TYPENAME }} SM8650 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3通用 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 内核(基于一加12 ${{ env.KERNEL_VERSION }}.${{ env.SUB_VERSION }} 版官方OKI源码)
- KSU分支:${{ env.KSU_TYPENAME }}
- susfs支持:${{ github.event.inputs.susfs_enable }}
- KPM支持 :${{ github.event.inputs.kpm_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- LZ4KD支持:${{ github.event.inputs.lz4kd_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- Re-Kernel支持:${{ github.event.inputs.rekernel_enable }}
- 内核级基带保护支持:${{ github.event.inputs.baseband_guard }}
- ReSukiSU管理器下载:[ReSukiSU_CI](https://github.com/cctv18/ReSukiSU_CI/releases)
- SukiSU Ultra管理器下载:[SukiSU-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra/releases)
- KernelSU Next管理器下载:[KernelSU-Next](https://github.com/KernelSU-Next/KernelSU-Next/releases)
- KSU原版管理器下载:[KernelSU](https://github.com/tiann/KernelSU/releases)
### ⏫️ 更新内容:
- 更新${{ env.KSU_TYPENAME }}至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备;
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启;
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启;
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块;
5. 由于KernelSU上游更新了元模块功能,最新版KSU管理器(包括除KernelSU Next以外的各分支)需要配合元模块(metamodule)才能正常挂载模块。目前的元模块包括[meta overlayfs](https://github.com/KernelSU-Modules-Repo/meta-overlayfs), [mountify](https://github.com/backslashxx/mountify), [meta magicmount](https://github.com/7a72/meta-magic_mount/), [meta magicmount rs](https://github.com/Tools-cx-app/meta-magic_mount/), [hybrid mount](https://github.com/YuzakiKokuban/meta-hybrid_mount)等。若你是第一次使用KSU或刚从旧版KSU管理器升级至新版,请先安装一个元模块,这样其他涉及系统挂载的模块才能正常运行;
6. KernelPatch Next(即KPN)是一个独立于KSU的KPM实现,可以运行在任意KSU/面具环境中(不适用于Apatch),且不能与(Re)SukiSU内置的kpm功能共同使用,使用前请保证你的内核没有内置的kpm实现/修补。
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
EOF
gh release create "${{ env.TAG_HEAD }}-${{ env.TIME }}" \
--repo "${{ github.repository }}" \
--title "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}" \
--notes-file release_notes.md \
release_zips/AnyKernel3_*.zip
================================================
FILE: .old_yml/build_6.1.118.yml
================================================
name: 欧加真 6.1.118 Android 15 通用OKI内核
env:
TZ: Asia/Shanghai
CPU: 'sm8650'
FEIL: 'oppo+oplus+realme'
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
KERNELSU_VARIANT: 'SukiSU-Ultra'
KERNELSU_BRANCH: 'susfs-main'
on:
workflow_dispatch:
# 取消此处注释代码可开启每日定时编译
# schedule:
# - cron: '0 23 * * *' # UTC时间23点(北京时间次日7点)
inputs:
hook_method:
description: hook模式(大部分情况manual即可,少数需切换sus su模式的场景才需要kprobes钩子)
required: true
type: choice
default: 'manual'
options:
- 'manual'
- 'kprobes'
kpm_enable:
description: '是否开启kpm(可能轻微增加耗电,不需要可关闭)'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
lz4k_enable:
description: '是否安装 LZ4KD 补丁'
required: true
type: choice
default: 'false'
options:
- 'true'
- 'false'
lz4_enable:
description: '是否安装 lz4 1.10.0 + zstd 1.5.7 补丁'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
bbr_enable:
description: "是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)"
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能增强优化配置(优化代理连接,IPV6等功能)'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
scx_enable:
description: '是否安装风驰内核驱动(未完成)'
required: true
type: choice
default: 'false'
options:
- 'true'
- 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
steps:
- name: 最大化建筑空间
uses: easimon/maximize-build-space@master
with:
root-reserve-mb: 8192
temp-reserve-mb: 2048
swap-size-mb: 8192
remove-dotnet: 'true'
remove-android: 'true'
remove-haskell: 'true'
remove-codeql: 'true'
- name: 安装配置环境依赖
run: |
sudo apt update && sudo apt upgrade -y
sudo apt-get install curl bison flex make binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev -y
sudo rm -rf ./llvm.sh
sudo wget https://apt.llvm.org/llvm.sh
sudo chmod +x llvm.sh
sudo ./llvm.sh 20 all
- name: 初始化源码仓库
run: |
rm -rf kernel_workspace && mkdir kernel_workspace && cd kernel_workspace
echo "正在克隆源码仓库..."
git clone --depth=1 https://github.com/cctv18/android_kernel_common_oneplus_sm8650 -b oneplus/sm8650_v_15.0.0_oneplus12_6.1.118 common
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 添加 SukiSU-Ultra
id: ksu_version
run: |
cd kernel_workspace
#if [[ "${{ env.KERNELSU_VARIANT }}" == "SukiSU-Ultra" ]]; then
# echo "BRANCH='-s susfs-main'" >> $GITHUB_ENV
#fi
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s susfs-main
cd ./KernelSU
KSU_VERSION=$(expr $(/usr/bin/git rev-list --count main) "+" 10700)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
export KSU_VERSION=$KSU_VERSION
sed -i "s/DKSU_VERSION=12800/DKSU_VERSION=$KSU_VERSION/" kernel/Makefile
- name: 应用 SukiSU-Ultra & SUSFS 补丁
run: |
cd kernel_workspace
git clone https://github.com/ShirkNeko/susfs4ksu.git -b gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
git clone https://github.com/ShirkNeko/SukiSU_patch.git
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cp ./SukiSU_patch/hooks/syscall_hooks.patch ./common/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 < syscall_hooks.patch || true
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
run: |
if [ "${{ github.event.inputs.lz4_enable }}" == "true" ]; then
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
fi
- name: 应用 Hide Stuff 补丁
run: |
cd kernel_workspace/common
cp ../SukiSU_patch/69_hide_stuff.patch ./
patch -p1 < 69_hide_stuff.patch
- name: 应用 lz4kd 补丁
run: |
if [[ "${{ github.event.inputs.lz4k_enable }}" == "true" ]]; then
echo "正在添加lz4kd补丁…"
cd kernel_workspace/common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
fi
- name: 添加 SukiSU-Ultra & SUSFS 配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [ "${{ github.event.inputs.kpm_enable }}" == "true" ]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [ "${{ github.event.inputs.hook_method }}" == "kprobes" ]; then
echo "正在开启kprobes钩子..."
echo "CONFIG_KSU_SUSFS_SUS_SU=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_MANUAL_HOOK=n" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_KPROBES_HOOK=y" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "正在开启manual钩子..."
echo "CONFIG_KSU_MANUAL_HOOK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_SU=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
#echo "CONFIG_KSU_SUSFS_SUS_OVERLAYFS=n" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.lz4k_enable }}" == "true" ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
# 以下配置未核实必要性,待测试
#echo "CONFIG_ZRAM_WRITEBACK=y" >> ./common/arch/arm64/configs/gki_defconfig
#sed -i 's/CONFIG_ZRAM=m/CONFIG_ZRAM=y/g' ./common/arch/arm64/configs/gki_defconfig
#sed -i 's/CONFIG_MODULE_SIG=y/CONFIG_MODULE_SIG=n/g' ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
- name: 启用网络功能增强优化配置
run: |
cd kernel_workspace
if [ "${{ github.event.inputs.better_net }}" == "true" ]; then
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
fi
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" == "true" || "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [ "${{ github.event.inputs.bbr_enable }}" == "default" ]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
run: |
if [[ "${{ github.event.inputs.ssg_enable }}" == "true" ]]; then
echo "正在启用三星SSG IO调度器…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.scx_enable }} == "true" ]]; then
git clone https://github.com/cctv18/sched_ext.git
rm -rf ./sched_ext/.git
rm -rf ./sched_ext/README.md
cp -r ./sched_ext/* ./common/kernel/sched
fi
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo "内核编译完成!"
- name: 应用patch_linux并修补内核
if: ${{ github.event.inputs.kpm_enable == 'true' }}
run: |
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/ShirkNeko/SukiSU_KernelPatch_patch/releases/download/0.12.0/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
- name: 克隆 AnyKernel3 并打包
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [ ! -f ./Image ]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.lz4k_enable }} == "true" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/zram.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
zip -r ../AnyKernel3_SukiSU_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_A15_${{ github.event.inputs.kernel_suffix }}.zip ./*
else
zip -r ../AnyKernel3_SukiSU_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_A15_${{ env.KERNEL_NAME }}.zip ./*
fi
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v4
with:
name: Kernel_ZIP_Artifacts
path: ${{ github.workspace }}/kernel_workspace/*.zip
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v4
with:
name: Kernel_ZIP_Artifacts
path: ./release_zips
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ format('{0}.118-{1}', env.KERNEL_VERSION, github.event.inputs.kernel_suffix) }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ format('{0}.118-{1}', env.KERNEL_VERSION, env.KERNEL_NAME) }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO+OPlus+Realme-A15-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
- name: 创建发布
id: create_release
uses: softprops/action-gh-release@v1
with:
tag_name: "${{ env.TAG_HEAD }}-${{ env.TIME }}"
name: "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}"
body: |
### 📱 欧加真 Android 15 SukiSU-Ultra SM8650 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3 6.1 Android 15内核通用(基于一加12 Android 15 版官方OKI源码)
- 特性:SukiSU Ultra + SUSFS + VFS + KPM
- hook模式:${{ github.event.inputs.hook_method }}
- LZ4KD支持:${{ github.event.inputs.lz4k_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- 风驰内核支持:${{ github.event.inputs.scx_enable }}
- 推荐系统:ColorOS 15 / RealmeUI 6.0
- SukiSU管理器下载:[SukiSU-Ultra](https://github.com/ShirkNeko/SukiSU-Ultra/releases)
### ⏫️ 更新内容:
- 更新SukiSU Ultra至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
draft: false
prerelease: false
files: |
release_zips/AnyKernel3_*.zip
================================================
FILE: .old_yml/build_6.1.57.yml
================================================
name: 欧加真 6.1.57 Android 14 通用OKI内核
env:
TZ: Asia/Shanghai
CPU: 'sm8650'
FEIL: 'oppo+oplus+realme'
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
KERNELSU_VARIANT: 'SukiSU-Ultra'
KERNELSU_BRANCH: 'susfs-main'
on:
workflow_dispatch:
# 取消此处注释代码可开启每日定时编译
# schedule:
# - cron: '0 23 * * *' # UTC时间23点(北京时间次日7点)
inputs:
hook_method:
description: hook模式(大部分情况manual即可,少数需切换sus su模式的场景才需要kprobes钩子)
required: true
type: choice
default: 'manual'
options:
- 'manual'
- 'kprobes'
kpm_enable:
description: '是否开启kpm(可能轻微增加耗电,不需要可关闭)'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
lz4k_enable:
description: '是否安装 LZ4KD 补丁'
required: true
type: choice
default: 'false'
options:
- 'true'
- 'false'
lz4_enable:
description: '是否安装 lz4 1.10.0 + zstd 1.5.7 补丁'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
bbr_enable:
description: "是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)"
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能增强优化配置(优化代理连接,IPV6等功能)'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
scx_enable:
description: '是否安装风驰内核驱动(未完成)'
required: true
type: choice
default: 'false'
options:
- 'true'
- 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
steps:
- name: 最大化建筑空间
uses: easimon/maximize-build-space@master
with:
root-reserve-mb: 8192
temp-reserve-mb: 2048
swap-size-mb: 8192
remove-dotnet: 'true'
remove-android: 'true'
remove-haskell: 'true'
remove-codeql: 'true'
- name: 安装配置环境依赖
run: |
sudo apt update && sudo apt upgrade -y
sudo apt-get install curl bison flex make binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev -y
sudo rm -rf ./llvm.sh
sudo wget https://apt.llvm.org/llvm.sh
sudo chmod +x llvm.sh
sudo ./llvm.sh 20 all
- name: 初始化源码仓库
run: |
rm -rf kernel_workspace && mkdir kernel_workspace && cd kernel_workspace
echo "正在克隆源码仓库..."
git clone --depth=1 https://github.com/cctv18/android_kernel_common_oneplus_sm8650 -b oneplus/sm8650_u_14.0.0_oneplus12 common
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 添加 SukiSU-Ultra
id: ksu_version
run: |
cd kernel_workspace
#if [[ "${{ env.KERNELSU_VARIANT }}" == "SukiSU-Ultra" ]]; then
# echo "BRANCH='-s susfs-main'" >> $GITHUB_ENV
#fi
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s susfs-main
cd ./KernelSU
KSU_VERSION=$(expr $(/usr/bin/git rev-list --count main) "+" 10700)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
export KSU_VERSION=$KSU_VERSION
sed -i "s/DKSU_VERSION=12800/DKSU_VERSION=$KSU_VERSION/" kernel/Makefile
- name: 应用 SukiSU-Ultra & SUSFS 补丁
run: |
cd kernel_workspace
git clone https://github.com/ShirkNeko/susfs4ksu.git -b gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
git clone https://github.com/ShirkNeko/SukiSU_patch.git
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cp ./SukiSU_patch/hooks/syscall_hooks.patch ./common/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 < syscall_hooks.patch || true
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
run: |
if [ "${{ github.event.inputs.lz4_enable }}" == "true" ]; then
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
fi
- name: 应用 Hide Stuff 补丁
run: |
cd kernel_workspace/common
cp ../SukiSU_patch/69_hide_stuff.patch ./
patch -p1 < 69_hide_stuff.patch
- name: 应用 lz4kd 补丁
run: |
if [[ "${{ github.event.inputs.lz4k_enable }}" == "true" ]]; then
echo "正在添加lz4kd补丁…"
cd kernel_workspace/common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
fi
- name: 添加 SukiSU-Ultra & SUSFS 配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [ "${{ github.event.inputs.kpm_enable }}" == "true" ]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [ "${{ github.event.inputs.hook_method }}" == "kprobes" ]; then
echo "正在开启kprobes钩子..."
echo "CONFIG_KSU_SUSFS_SUS_SU=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_MANUAL_HOOK=n" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_KPROBES_HOOK=y" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "正在开启manual钩子..."
echo "CONFIG_KSU_MANUAL_HOOK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_SU=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
#echo "CONFIG_KSU_SUSFS_SUS_OVERLAYFS=n" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.lz4k_enable }}" == "true" ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
# 以下配置未核实必要性,待测试
#echo "CONFIG_ZRAM_WRITEBACK=y" >> ./common/arch/arm64/configs/gki_defconfig
#sed -i 's/CONFIG_ZRAM=m/CONFIG_ZRAM=y/g' ./common/arch/arm64/configs/gki_defconfig
#sed -i 's/CONFIG_MODULE_SIG=y/CONFIG_MODULE_SIG=n/g' ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
- name: 启用网络功能增强优化配置
run: |
cd kernel_workspace
if [ "${{ github.event.inputs.better_net }}" == "true" ]; then
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
fi
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" == "true" || "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [ "${{ github.event.inputs.bbr_enable }}" == "default" ]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
run: |
if [[ "${{ github.event.inputs.ssg_enable }}" == "true" ]]; then
echo "正在启用三星SSG IO调度器…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.scx_enable }} == "true" ]]; then
git clone https://github.com/cctv18/sched_ext.git
rm -rf ./sched_ext/.git
rm -rf ./sched_ext/README.md
cp -r ./sched_ext/* ./common/kernel/sched
fi
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo "内核编译完成!"
- name: 应用patch_linux并修补内核
if: ${{ github.event.inputs.kpm_enable == 'true' }}
run: |
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/ShirkNeko/SukiSU_KernelPatch_patch/releases/download/0.12.0/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
- name: 克隆 AnyKernel3 并打包
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [ ! -f ./Image ]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.lz4k_enable }} == "true" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/zram.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
zip -r ../AnyKernel3_SukiSU_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_A14_${{ github.event.inputs.kernel_suffix }}.zip ./*
else
zip -r ../AnyKernel3_SukiSU_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_A14_${{ env.KERNEL_NAME }}.zip ./*
fi
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v4
with:
name: Kernel_ZIP_Artifacts
path: ${{ github.workspace }}/kernel_workspace/*.zip
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v4
with:
name: Kernel_ZIP_Artifacts
path: ./release_zips
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ format('{0}.57-{1}', env.KERNEL_VERSION, github.event.inputs.kernel_suffix) }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ format('{0}.57-{1}', env.KERNEL_VERSION, env.KERNEL_NAME) }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO+OPlus+Realme-A14-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
- name: 创建发布
id: create_release
uses: softprops/action-gh-release@v1
with:
tag_name: "${{ env.TAG_HEAD }}-${{ env.TIME }}"
name: "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}"
body: |
### 📱 欧加真 Android 14 SukiSU-Ultra SM8650 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3 6.1 Android 14内核通用(基于一加12 Android 14 版官方OKI源码)
- 特性:SukiSU Ultra + SUSFS + VFS + KPM
- hook模式:${{ github.event.inputs.hook_method }}
- LZ4KD支持:${{ github.event.inputs.lz4k_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- 风驰内核支持:${{ github.event.inputs.scx_enable }}
- 推荐系统:ColorOS 14 / RealmeUI 5.0
- SukiSU管理器下载:[SukiSU-Ultra](https://github.com/ShirkNeko/SukiSU-Ultra/releases)
### ⏫️ 更新内容:
- 更新SukiSU Ultra至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
draft: false
prerelease: false
files: |
release_zips/AnyKernel3_*.zip
================================================
FILE: .old_yml/build_6.1.75.yml
================================================
name: 欧加真 6.1.75 Android 15 通用OKI内核
env:
TZ: Asia/Shanghai
CPU: 'sm8650'
FEIL: 'oppo+oplus+realme'
ANDROID_VERSION: 'android14'
KERNEL_VERSION: '6.1'
KERNEL_NAME: 'android14-11-o-gca13bffobf09'
KERNELSU_VARIANT: 'SukiSU-Ultra'
KERNELSU_BRANCH: 'susfs-main'
on:
workflow_dispatch:
# 取消此处注释代码可开启每日定时编译
# schedule:
# - cron: '0 23 * * *' # UTC时间23点(北京时间次日7点)
inputs:
hook_method:
description: hook模式(大部分情况manual即可,少数需切换sus su模式的场景才需要kprobes钩子)
required: true
type: choice
default: 'manual'
options:
- 'manual'
- 'kprobes'
kpm_enable:
description: '是否开启kpm(可能轻微增加耗电,不需要可关闭)'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
lz4k_enable:
description: '是否安装 LZ4K 补丁'
required: true
type: choice
default: 'false'
options:
- 'true'
- 'false'
lz4_enable:
description: '是否安装 lz4 1.10.0 + zstd 1.5.7 补丁'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
bbr_enable:
description: "是否启用bbr算法(优化上行数据,对手机日用无太大意义甚至可能负优化;false关闭,true仅加入算法,default设为默认)"
required: true
type: choice
default: 'false'
options:
- 'false'
- 'true'
- 'default'
better_net:
description: '是否开启网络功能增强优化配置(优化代理连接,IPV6等功能)'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
ssg_enable:
description: '是否启用三星SSG IO调度器(一加12等极少数机型开启后可能不开机,若出现bug请关闭此项)'
required: true
type: choice
default: 'true'
options:
- 'true'
- 'false'
scx_enable:
description: '是否安装风驰内核驱动(未完成)'
required: true
type: choice
default: 'false'
options:
- 'true'
- 'false'
kernel_suffix:
description: '内核后缀(留空默认,开头别加连字符,勿加空格等影响指令运行的保留字符)'
required: false
type: string
default: ''
jobs:
build:
runs-on: ubuntu-latest
outputs:
ksuver: ${{ steps.ksu_version.outputs.ksuver }}
steps:
- name: 最大化建筑空间
uses: easimon/maximize-build-space@master
with:
root-reserve-mb: 8192
temp-reserve-mb: 2048
swap-size-mb: 8192
remove-dotnet: 'true'
remove-android: 'true'
remove-haskell: 'true'
remove-codeql: 'true'
- name: 安装配置环境依赖
run: |
sudo apt update && sudo apt upgrade -y
sudo apt-get install curl bison flex make binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev -y
sudo rm -rf ./llvm.sh
sudo wget https://apt.llvm.org/llvm.sh
sudo chmod +x llvm.sh
sudo ./llvm.sh 20 all
- name: 初始化源码仓库
run: |
rm -rf kernel_workspace && mkdir kernel_workspace && cd kernel_workspace
echo "正在克隆源码仓库..."
git clone --depth=1 https://github.com/cctv18/android_kernel_common_oneplus_sm8650 -b oneplus/sm8650_v_15.0.0_oneplus12 common
echo "正在去除 ABI 保护 & 去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
- name: 添加 SukiSU-Ultra
id: ksu_version
run: |
cd kernel_workspace
#if [[ "${{ env.KERNELSU_VARIANT }}" == "SukiSU-Ultra" ]]; then
# echo "BRANCH='-s susfs-main'" >> $GITHUB_ENV
#fi
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/refs/heads/main/kernel/setup.sh" | bash -s susfs-main
cd ./KernelSU
KSU_VERSION=$(expr $(/usr/bin/git rev-list --count main) "+" 10700)
echo "KSUVER=$KSU_VERSION" >> $GITHUB_ENV
echo "ksuver=$KSU_VERSION" >> $GITHUB_OUTPUT
export KSU_VERSION=$KSU_VERSION
sed -i "s/DKSU_VERSION=12800/DKSU_VERSION=$KSU_VERSION/" kernel/Makefile
- name: 应用 SukiSU-Ultra & SUSFS 补丁
run: |
cd kernel_workspace
git clone https://github.com/ShirkNeko/susfs4ksu.git -b gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}
git clone https://github.com/ShirkNeko/SukiSU_patch.git
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cp ./SukiSU_patch/hooks/syscall_hooks.patch ./common/
cd ./common
patch -p1 < 50_add_susfs_in_gki-${{ env.ANDROID_VERSION }}-${{ env.KERNEL_VERSION }}.patch || true
patch -p1 < syscall_hooks.patch || true
- name: 应用lz4 1.10.0 & zstd 1.5.7补丁
run: |
if [ "${{ github.event.inputs.lz4_enable }}" == "true" ]; then
echo "正在添加lz4 1.10.0 & zstd 1.5.7补丁…"
cd kernel_workspace
git clone https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd ./common
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
fi
- name: 应用 Hide Stuff 补丁
run: |
cd kernel_workspace/common
cp ../SukiSU_patch/69_hide_stuff.patch ./
patch -p1 < 69_hide_stuff.patch
- name: 应用 lz4kd 补丁
run: |
if [[ "${{ github.event.inputs.lz4k_enable }}" == "true" ]]; then
echo "正在添加lz4kd补丁…"
cd kernel_workspace/common
cp -r ../SukiSU_patch/other/zram/lz4k/include/linux/* ./include/linux/
cp -r ../SukiSU_patch/other/zram/lz4k/lib/* ./lib
cp -r ../SukiSU_patch/other/zram/lz4k/crypto/* ./crypto
cp ../SukiSU_patch/other/zram/zram_patch/${{ env.KERNEL_VERSION }}/lz4kd.patch ./
patch -p1 -F 3 < lz4kd.patch || true
fi
- name: 添加 SukiSU-Ultra & SUSFS 配置项
run: |
cd kernel_workspace
echo "CONFIG_KSU=y" >> ./common/arch/arm64/configs/gki_defconfig
if [ "${{ github.event.inputs.kpm_enable }}" == "true" ]; then
echo "CONFIG_KPM=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
if [ "${{ github.event.inputs.hook_method }}" == "kprobes" ]; then
echo "正在开启kprobes钩子..."
echo "CONFIG_KSU_SUSFS_SUS_SU=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_MANUAL_HOOK=n" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_KPROBES_HOOK=y" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "正在开启manual钩子..."
echo "CONFIG_KSU_MANUAL_HOOK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_SU=n" >> ./common/arch/arm64/configs/gki_defconfig
fi
echo "CONFIG_KSU_SUSFS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> ./common/arch/arm64/configs/gki_defconfig
#echo "CONFIG_KSU_SUSFS_SUS_OVERLAYFS=n" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> ./common/arch/arm64/configs/gki_defconfig
if [[ "${{ github.event.inputs.lz4k_enable }}" == "true" ]]; then
echo "CONFIG_ZSMALLOC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4HC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4K=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_LZ4KD=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_CRYPTO_842=y" >> ./common/arch/arm64/configs/gki_defconfig
# 以下配置未核实必要性,待测试
#echo "CONFIG_ZRAM_WRITEBACK=y" >> ./common/arch/arm64/configs/gki_defconfig
#sed -i 's/CONFIG_ZRAM=m/CONFIG_ZRAM=y/g' ./common/arch/arm64/configs/gki_defconfig
#sed -i 's/CONFIG_MODULE_SIG=y/CONFIG_MODULE_SIG=n/g' ./common/arch/arm64/configs/gki_defconfig
fi
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> ./common/arch/arm64/configs/gki_defconfig
#禁用 defconfig 检查
sed -i 's/check_defconfig//' ./common/build.config.gki
- name: 启用网络功能增强优化配置
run: |
cd kernel_workspace
if [ "${{ github.event.inputs.better_net }}" == "true" ]; then
#启用 BPF 流解析器,实现高性能网络流量处理,增强网络监控和分析能力
echo "CONFIG_BPF_STREAM_PARSER=y" >> ./common/arch/arm64/configs/gki_defconfig
#开启增强 Netfilter 防火墙扩展模块,支持基于地址类型的匹配规则,启用 IP 集合支持,提高防火墙规则灵活性,支持更复杂的流量过滤策略
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_NETFILTER_XT_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IP 集框架及其多种数据结构实现,提供高效的大规模 IP 地址管理,提高防火墙规则处理效率,减少内存占用
echo "CONFIG_IP_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_MAX=65534" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_MAC=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETNET=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP_SET_LIST_SET=y" >> ./common/arch/arm64/configs/gki_defconfig
#启用 IPv6 网络地址转换
echo "CONFIG_IP6_NF_NAT=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> ./common/arch/arm64/configs/gki_defconfig
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
fi
- name: 添加 BBR 等一系列拥塞控制算法
run: |
if [[ "${{ github.event.inputs.bbr_enable }}" == "true" || "${{ github.event.inputs.bbr_enable }}" == "default" ]]; then
echo "正在添加BBR等拥塞控制算法..."
cd kernel_workspace
#开启TCP拥塞控制算法控制器(必需)
echo "CONFIG_TCP_CONG_ADVANCED=y" >> ./common/arch/arm64/configs/gki_defconfig
################################################################################################################################
# BBR:基于链路容量的代表拥塞控制算法,不再使用丢包、延时等信号去衡量拥塞是否发生,而是直接对网络建模来应对、避免真实的网络拥塞;
# 具有高吞吐、低延迟、抗丢包的特点,但在手机上使用时由于持续探测带宽/RTT、高频计算与发包增加 CPU 和射频模块功耗,会增加耗电及发热;
# 且移动基站缓冲区深度通常较小(~50ms),BBR 的带宽探测阶段(ProbeRTT)过度降窗可能导致吞吐量骤降,且网络切换(WiFi→5G)时需重新
# 探测参数,反而增加延迟或导致速率振荡(短暂卡顿),且存在 RTT 不公平性,与基于丢包的流竞争时可能过于强势,在混合网络环境中
#(如 BBR + CUBIC 共存),BBR 会抢占更多其他软件的带宽,降低其他应用的公平性(如后台软件更新影响前台视频播放)。
# 因此,虽然BBR可以显著减少排队延迟,抗丢包能力强,带宽利用率高,但由于其会增加耗电,且易导致网络速率波动,故安卓系统默认不使用
# BBR拥塞算法,而是使用在吞吐量、稳定性、兼容性、能效之间取得最佳平衡的CUBIC算法。在开启BBR前,请考虑自己是否真的有使用BBR的必要。
################################################################################################################################
echo "CONFIG_TCP_CONG_BBR=y" >> ./common/arch/arm64/configs/gki_defconfig
#CUBIC:安卓的默认TCP拥塞控制算法,在吞吐量、稳定性、兼容性、能效之间取得最佳平衡,具有高兼容性与公平性、抗网络波动性强、低计算开销的特点,是绝大部分移动场景的优先选择
echo "CONFIG_TCP_CONG_CUBIC=y" >> ./common/arch/arm64/configs/gki_defconfig
#VEGAS:基于时延的拥塞控制算法之一,将回路响应时间(Round Trip Time,RTT)增加视为出现拥塞,增加时增大拥塞窗口,减小时减小拥塞窗口
echo "CONFIG_TCP_CONG_VEGAS=y" >> ./common/arch/arm64/configs/gki_defconfig
#New Vegas:Vegas 算法的改进版,优化了 RTT 测量和竞争公平性,可以更准确地检测拥塞,与 Reno/CUBIC 共存能力提升
echo "CONFIG_TCP_CONG_NV=y" >> ./common/arch/arm64/configs/gki_defconfig
#Westwood+:基于带宽估计(ACK 到达率)动态设置拥塞窗口和慢启动阈值;快速恢复,适合无线网络(区分拥塞丢包与无线丢包)
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> ./common/arch/arm64/configs/gki_defconfig
#HTCP:一种基于损失的算法,使用 AIMD 来控制拥塞窗口,根据 RTT 动态调整增长因子,结合延迟和丢包信号,针对高延迟的高速网络进行优化
echo "CONFIG_TCP_CONG_HTCP=y" >> ./common/arch/arm64/configs/gki_defconfig
#brutal:一种通过主动探测 + 激进抢占最大化吞吐量的拥塞算法,无拥塞窗口上限,轻度丢包(<20%)不降窗,避免类似 BBR 的 ProbeRTT 阶段,
#持续维持高发送速率,与 Reno/CUBIC 共存时,Brutal 可通过高频发包抢占 90%+ 带宽,适用于高丢包弱网环境(如公共 Wi-Fi、蜂窝网络)及
#直播推流、云游戏上行链路等需优先保证吞吐量而非延迟敏感的场景,提升弱网吞吐性能,对抗运营商 QoS 限速。但由于TCP Brutal 仅在应用程序
#对每个 TCP 连接设置带宽参数之后才能正常工作,绝大部分安卓应用都不支持该操作,故请勿将 TCP Brutal 设置成默认拥塞控制算法。
echo "CONFIG_TCP_CONG_BRUTAL=y" >> ./common/arch/arm64/configs/gki_defconfig
if [ "${{ github.event.inputs.bbr_enable }}" == "default" ]; then
echo "正在将BBR设为默认拥塞控制算法..."
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> ./common/arch/arm64/configs/gki_defconfig
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> ./common/arch/arm64/configs/gki_defconfig
fi
fi
- name: 启用三星SSG IO调度器
run: |
if [[ "${{ github.event.inputs.ssg_enable }}" == "true" ]]; then
echo "正在启用三星SSG IO调度器…"
cd kernel_workspace
echo "CONFIG_MQ_IOSCHED_SSG=y" >> ./common/arch/arm64/configs/gki_defconfig
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> ./common/arch/arm64/configs/gki_defconfig
fi
- name: 添加制作名称
run: |
cd kernel_workspace
echo "替换内核版本后缀..."
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
echo "当前内核版本后缀:${{ github.event.inputs.kernel_suffix }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ github.event.inputs.kernel_suffix }}\"|" "$f"
done
else
echo "当前内核版本后缀:${{ env.KERNEL_NAME }}"
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${{ env.KERNEL_NAME }}\"|" "$f"
done
fi
- name: 构建内核
run: |
cd kernel_workspace
if [[ ${{ github.event.inputs.scx_enable }} == "true" ]]; then
git clone https://github.com/cctv18/sched_ext.git
rm -rf ./sched_ext/.git
rm -rf ./sched_ext/README.md
cp -r ./sched_ext/* ./common/kernel/sched
fi
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo "内核编译完成!"
- name: 应用patch_linux并修补内核
if: ${{ github.event.inputs.kpm_enable == 'true' }}
run: |
cd kernel_workspace/common/out/arch/arm64/boot
curl -LO https://github.com/ShirkNeko/SukiSU_KernelPatch_patch/releases/download/0.12.0/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
- name: 克隆 AnyKernel3 并打包
run: |
cd kernel_workspace
git clone https://github.com/cctv18/AnyKernel3 --depth=1
rm -rf ./AnyKernel3/.git
cd AnyKernel3
cp ../common/out/arch/arm64/boot/Image ./Image
if [ ! -f ./Image ]; then
echo "未找到内核镜像文件,构建可能出错"
exit 1
fi
if [[ ${{ github.event.inputs.lz4k_enable }} == "true" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/zram.zip
fi
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
zip -r ../AnyKernel3_SukiSU_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_A15_${{ github.event.inputs.kernel_suffix }}.zip ./*
else
zip -r ../AnyKernel3_SukiSU_${{ env.KSUVER }}_${{ env.KERNEL_VERSION }}_A15_${{ env.KERNEL_NAME }}.zip ./*
fi
- name: 上传 ZIP 工件
uses: actions/upload-artifact@v4
with:
name: Kernel_ZIP_Artifacts
path: ${{ github.workspace }}/kernel_workspace/*.zip
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
actions: read
steps:
- name: 下载 ZIP 工件
uses: actions/download-artifact@v4
with:
name: Kernel_ZIP_Artifacts
path: ./release_zips
- name: 设置环境变量
run: |
if [[ -n "${{ github.event.inputs.kernel_suffix }}" ]]; then
FULL_VERSION=${{ format('{0}.75-{1}', env.KERNEL_VERSION, github.event.inputs.kernel_suffix) }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
else
FULL_VERSION=${{ format('{0}.75-{1}', env.KERNEL_VERSION, env.KERNEL_NAME) }}
echo "FULL_VERSION=$FULL_VERSION" >> $GITHUB_ENV
export FULL_VERSION=$FULL_VERSION
fi
TIME="$(TZ='Asia/Shanghai' date +'%y%m%d%H%M%S')"
TIME_FORM="$(TZ='Asia/Shanghai' date +'%Y-%m-%d %H:%M:%S')"
echo "TIME=$TIME" >> $GITHUB_ENV
echo "TIME_FORM=$TIME_FORM" >> $GITHUB_ENV
TAG_HEAD="OPPO+OPlus+Realme-A15-build"
echo "TAG_HEAD=$TAG_HEAD" >> $GITHUB_ENV
- name: 创建发布
id: create_release
uses: softprops/action-gh-release@v1
with:
tag_name: "${{ env.TAG_HEAD }}-${{ env.TIME }}"
name: "${{ env.TAG_HEAD }}-${{ env.FULL_VERSION }}"
body: |
### 📱 欧加真 Android 15 SukiSU-Ultra SM8650 通用内核 | 构建信息
- 内核版本号: ${{ env.FULL_VERSION }}
- 编译时间: ${{ env.TIME_FORM }}
- 机型:欧加真骁龙8Gen3 6.1 Android 15内核通用(基于一加12 Android 15 版官方OKI源码)
- 特性:SukiSU Ultra + SUSFS + VFS + KPM
- hook模式:${{ github.event.inputs.hook_method }}
- LZ4KD支持:${{ github.event.inputs.lz4k_enable }}
- LZ4支持:${{ github.event.inputs.lz4_enable }}
- 网络功能增强:${{ github.event.inputs.better_net }}
- BBR/Brutal 等拥塞控制算法支持:${{ github.event.inputs.bbr_enable }}
- 三星SSG IO调度器支持:${{ github.event.inputs.ssg_enable }}
- 风驰内核支持:${{ github.event.inputs.scx_enable }}
- 推荐系统:ColorOS 15 / RealmeUI 6.0
- SukiSU管理器下载:[SukiSU-Ultra](https://github.com/ShirkNeko/SukiSU-Ultra/releases)
### ⏫️ 更新内容:
- 更新SukiSU Ultra至最新版本(${{ needs.build.outputs.ksuver }})
- (预留)
### 📋 安装方法 | Installation Guide
1. 若你的手机已经安装了第三方Recovery(如TWRP),可下载对应机型的AnyKernel刷机包后进入Recovery模式,通过Recovery刷入刷机包后重启设备
2. 若你的手机之前已有 root 权限,可在手机上安装[HorizonKernelFlasher](https://github.com/libxzr/HorizonKernelFlasher/releases),在HorizonKernelFlasher中刷入AnyKernel刷机包并重启
3. 若你之前已刷入SukiSU Ultra内核,且SukiSU Ultra管理器已更新至最新版本,可在SukiSU Ultra管理器中直接刷入AnyKernel刷机包并重启
4. 刷入无lz4kd补丁版的内核前若刷入过lz4kd补丁版的内核,为避免出错,请先关闭zram模块
#### ※※※刷写内核有风险,为防止出现意外导致手机变砖,在刷入内核前请务必用[KernelFlasher](https://github.com/capntrips/KernelFlasher)等软件备份boot等关键启动分区!※※※
draft: false
prerelease: false
files: |
release_zips/AnyKernel3_*.zip
================================================
FILE: README.md
================================================
# 欧加真 SM8650/MT6989/MT6897 系列通用6.1内核自动化编译脚本
[](https://github.com/cctv18/oppo_oplus_realme_sm8650/stargazers)
[](https://github.com/cctv18/oppo_oplus_realme_sm8650/forks)
[](http://www.coolapk.com/u/22650293)
[](https://github.com/cctv18/oppo_oplus_realme_sm8650/discussions)
#####
一个更方便、快捷的自动化OPPO/一加/真我系列骁龙8Gen3(SM8650)/天玑9400e(MT6989)/天玑8350(MT6897)机型的通用内核编译脚本。
#####
这个项目的初衷是解决以下问题:
- 绿厂官方摆烂,代码开源开一半,导致部分内核代码无法通过已有的配置xml正常编译,甚至没有编译配置xml;
- 官方使用的 Bazel 编译器过于不稳定且低效,容易出现各种各样莫名其妙的错误,且全网几乎找不到任何有效解决方法,对于新手极不友好;
- 由于绿厂魔改内核f2fs代码,导致欧加真机型刷入GKI内核后不清空data分区就无法正常开机。
## 本项目的主要内容(及计划)
- 提供 OKI(官方源码)/ GKI(谷歌通用内核源码)双编译模式,OKI保留官方驱动/调度,GKI兼容性更强(无需相同内核小版本即可刷入);
- 为 GKI 移植官方内核的f2fs源码,使 GKI 内核可以和官方 OKI 内核一样,刷入后可保留数据正常开机,不需要清空data ~~(新建文件夹)~~;
- 改用 LLVM/Clang 20 进行编译,并排除了官方源码中不必要的 vendor 源码参与,大幅优化编译流程,对比原 bazel 编译器缩短了近2/3的编译时间(原版官方编译器每次约需要超过1h才能完成编译),提高了编译过程的稳定性,输出日志更便于维护调试;
- 修复官方代码部分bug/未及时更新的补丁,并计划引入风驰内核支持;
- 提供 Github Action 在线编译/shell本地编译双版本脚本。
## 已实现:
- [x] 欧加真 SM8650 通用OKI内核(基于一加12 6.1.57/6.1.75/6.1.118 官方内核源码,其他同内核版本非SM8650机型可自行测试,部分机型可完全兼容)
- [x] 欧加真 MT6989 通用OKI内核(基于一加Ace5竞速版 6.1.115 官方内核源码,其他同内核版本非MT6989机型可自行测试,部分机型可完全兼容)
- [x] 欧加真 MT6897 通用OKI内核(基于一加平板 6.1.128 官方内核源码,其他同内核版本非MT6897机型可自行测试,部分机型可完全兼容)
- [x] ReSukiSU/SukiSU Ultra/KernelSU Next/原版KernelSU多版本KSU可选
- [x] 引入ccache缓存及大量独家编译流程优化,首次编译时间约11min,二次编译时间可稳定在约6min *(首次编译时会拉取公共预置ccache,从第二次开始配置不变的情况下,单次编译时间约6min(由于ccache缓存机制,更改任意内核编译选项会使二次编译速度下降至约11分钟,若使用创建缓存时相同的配置可恢复至约6分钟,如需要长期修改配置选项建议开启“更新ccache缓存”选项);距离上一次调用两周未调用后缓存会被自动清除,此时编译会自动重建缓存)*
- [x] 引入O2编译优化,改善内核运行性能
- [x] ~~可选manual/kprobes/syscall钩子模式(kprobes钩子模式下支持切换至sus su模式)~~ 由于最新版KSU已更新inline hook,故旧版manual/syscall钩子已作废
- [x] lz4 1.10.0 & zstd 1.5.7 算法更新&优化补丁(来自[@ferstar](https://github.com/ferstar), 移植by [@Xiaomichael](https://github.com/Xiaomichael))
- [x] 可选加入 BBR/Brutal 及一系列 tcp 拥塞控制算法
- [x] 三星SSG IO调度器移植(目前已知仅在一加12上会导致无法正常启动,原因尚不明确,待进一步研究修复)
- [x] 加入一些网络功能拓展配置选项(用于为ipset及需要iptables等高级网络功能内核支持的程序提供支持)
- [x] 添加了对[Mountify](https://github.com/backslashxx/mountify)模块的支持
- [x] 加入Re:Kernel支持,与Freezer,NoActive等软件配合降低功耗
- [x] 加入内核防格基带保护(By [@showdo](https://github.com/showdo)),有效防止恶意格机脚本/程序对系统分区数据的破坏
## 待实现:
- [ ] 为非官方支持机型移植完整风驰内核支持(正在补全中)
- [ ] zram内置化,无需外置zram.ko挂载 ~~(有了新版 lz4&zstd 补丁真的还有必要吗)~~
- [ ] LXC/Docker 功能支持
- [ ] Nethunter 驱动移植
- [ ] 欧加真 6.1 通用 GKI内核(移植一加f2fs源码,实现免清data刷入)
- ~~整合多版本内核编译脚本(出于操作便捷性及GitHub Action的选项数量限制,暂不进行多脚本整合)~~
- 更多优化与特性移植……
#####
#####
#####
## 鸣谢
- ReSukiSU:[ReSukiSU/ReSukiSU](https://github.com/ReSukiSU/ReSukiSU)
- Sukisu Ultra:[SukiSU-Ultra/SukiSU-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra)
- susfs4ksu:[ShirkNeko/susfs4ksu](https://github.com/ShirkNeko/susfs4ksu)
- SukiSU内核补丁:[SukiSU-Ultra/SukiSU_patch](https://github.com/SukiSU-Ultra/SukiSU_patch)
- pershoot维护的KernelSU Next分支:[pershoot/KernelSU-Next](https://github.com/pershoot/KernelSU-Next)
- 手动钩子等补丁:[WildKernels/kernel_patches](https://github.com/WildKernels/kernel_patches)
- 原版KernelSU: [tiann/KernelSU](https://github.com/tiann/KernelSU)
- 内核防格基带保护模块:[vc-teahouse/Baseband-guard](https://github.com/vc-teahouse/Baseband-guard)
- GKI 内核构建脚本:(待定)
- ~~本地化内核构建脚本(已失效):[Suxiaoqinx/kernel_manifest_OnePlus_Sukisu_Ultra](https://github.com/Suxiaoqinx/kernel_manifest_OnePlus_Sukisu_Ultra)~~
================================================
FILE: local/builder_6.1.115.sh
================================================
#!/bin/bash
set -e
# ===== 获取脚本目录 =====
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
cd "$SCRIPT_DIR"
# ===== 设置自定义参数 =====
echo "===== 欧加真MT6989通用6.1.115 A15 (天玑特供)OKI内核本地编译脚本 By Coolapk@cctv18 ====="
echo ">>> 读取用户配置..."
MANIFEST=${MANIFEST:-oppo+oplus+realme}
read -p "请输入自定义内核后缀(默认:android14-11-o-gca13bffobf09): " CUSTOM_SUFFIX
CUSTOM_SUFFIX=${CUSTOM_SUFFIX:-android14-11-o-gca13bffobf09}
read -p "是否启用susfs?(y/n,默认:y): " APPLY_SUSFS
APPLY_SUSFS=${APPLY_SUSFS:-y}
read -p "是否启用 KPM?(b-(re)sukisu内置kpm, k-kernelpatch next独立kpm实现, n-关闭kpm,默认:n): " USE_PATCH_LINUX
USE_PATCH_LINUX=${USE_PATCH_LINUX:-n}
read -p "KSU分支版本(r=ReSukiSU, y=SukiSU Ultra, n=KernelSU Next, k=KSU, l=lkm模式(无内置KSU), 默认:r): " KSU_BRANCH
KSU_BRANCH=${KSU_BRANCH:-r}
read -p "是否应用 lz4 1.10.0 & zstd 1.5.7 补丁?(y/n,默认:y): " APPLY_LZ4
APPLY_LZ4=${APPLY_LZ4:-y}
read -p "是否应用 lz4kd 补丁?(y/n,默认:n): " APPLY_LZ4KD
APPLY_LZ4KD=${APPLY_LZ4KD:-n}
read -p "是否启用网络功能增强优化配置?(y/n,在天玑机型上可能导致bug,建议关闭;默认:n): " APPLY_BETTERNET
APPLY_BETTERNET=${APPLY_BETTERNET:-n}
read -p "是否添加 BBR 等一系列拥塞控制算法?(y添加/n禁用/d默认,默认:n): " APPLY_BBR
APPLY_BBR=${APPLY_BBR:-n}
read -p "是否启用三星SSG IO调度器?(y/n,默认:y): " APPLY_SSG
APPLY_SSG=${APPLY_SSG:-y}
read -p "是否启用Re-Kernel?(y/n,默认:n): " APPLY_REKERNEL
APPLY_REKERNEL=${APPLY_REKERNEL:-n}
read -p "是否启用内核级基带保护?(y/n,默认:y): " APPLY_BBG
APPLY_BBG=${APPLY_BBG:-y}
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
KSU_TYPE="SukiSU Ultra"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
KSU_TYPE="ReSukiSU"
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
KSU_TYPE="KernelSU Next"
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
KSU_TYPE="KernelSU"
else
KSU_TYPE="no KSU"
fi
if [[ "$USE_PATCH_LINUX" == "b" || "$USE_PATCH_LINUX" == "B" ]]; then
KPM_TYPE="builtin"
elif [[ "$USE_PATCH_LINUX" == "k" || "$USE_PATCH_LINUX" == "K" ]]; then
KPM_TYPE="KernelPatch Next"
else
KPM_TYPE="no kpm"
fi
echo
echo "===== 配置信息 ====="
echo "适用机型: $MANIFEST"
echo "自定义内核后缀: -$CUSTOM_SUFFIX"
echo "KSU分支版本: $KSU_TYPE"
echo "启用susfs: $APPLY_SUSFS"
echo "启用 KPM: $KPM_TYPE"
echo "应用 lz4&zstd 补丁: $APPLY_LZ4"
echo "应用 lz4kd 补丁: $APPLY_LZ4KD"
echo "应用网络功能增强优化配置: $APPLY_BETTERNET"
echo "应用 BBR 等算法: $APPLY_BBR"
echo "启用三星SSG IO调度器: $APPLY_SSG"
echo "启用Re-Kernel: $APPLY_REKERNEL"
echo "启用内核级基带保护: $APPLY_BBG"
echo "===================="
echo
# ===== 创建工作目录 =====
WORKDIR="$SCRIPT_DIR"
cd "$WORKDIR"
# ===== 安装构建依赖 =====
echo ">>> 安装构建依赖..."
# Function to run a command with sudo if not already root
SU() {
if [ "$(id -u)" -eq 0 ]; then
"$@"
else
sudo "$@"
fi
}
SU apt-mark hold firefox && apt-mark hold libc-bin && apt-mark hold man-db
SU rm -rf /var/lib/man-db/auto-update
SU apt-get update
SU apt-get install --no-install-recommends -y curl bison flex clang binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev cpio xz-utils tar unzip
SU rm -rf ./llvm.sh && wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh
SU ./llvm.sh 20 all
# ===== 初始化仓库 =====
echo ">>> 初始化仓库..."
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
git clone --depth=1 https://github.com/cctv18/android_kernel_oneplus_mt6989 -b oneplus/mt6989_v_15.0.2_ace5_race common
echo ">>> 初始化仓库完成"
# ===== 清除 abi 文件、去除 -dirty 后缀 =====
echo ">>> 正在清除 ABI 文件及去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
# ===== 替换版本后缀 =====
echo ">>> 替换内核版本后缀..."
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${CUSTOM_SUFFIX}\"|" "$f"
done
# ===== 拉取 KSU 并设置版本号 =====
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
echo ">>> 拉取 SukiSU-Ultra 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/main/kernel/setup.sh" | bash -s builtin
cd KernelSU
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
echo ">>> 正在获取上游 API 版本信息..."
for i in {1..3}; do
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Kbuild" | \
grep -m1 "KSU_VERSION_API :=" | \
awk -F'= ' '{print $2}' | \
tr -d '[:space:]')
if [ -n "$KSU_API_VERSION" ]; then
echo "成功获取 API 版本: $KSU_API_VERSION"
break
else
echo "获取失败,重试中 ($i/3)..."
sleep 1
fi
done
if [ -z "$KSU_API_VERSION" ]; then
echo -e "无法获取 API 版本,使用默认值 3.1.7..."
KSU_API_VERSION="3.1.7"
fi
export KSU_API_VERSION=$KSU_API_VERSION
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
echo ">>> 正在修改 kernel/Kbuild 文件..."
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Kbuild
sed -i '/KSU_VERSION_API :=/d' kernel/Kbuild
sed -i '/KSU_VERSION_FULL :=/d' kernel/Kbuild
awk -v def="$VERSION_DEFINITIONS" '
/REPO_OWNER :=/ {print; print def; inserted=1; next}
1
END {if (!inserted) print def}
' kernel/Kbuild > kernel/Kbuild.tmp && mv kernel/Kbuild.tmp kernel/Kbuild
KSU_VERSION_CODE=$(expr $(git rev-list --count main 2>/dev/null) + 37185 2>/dev/null || echo 114514)
echo ">>> 修改完成!验证结果:"
echo "------------------------------------------------"
grep -A10 "REPO_OWNER" kernel/Kbuild | head -n 10
echo "------------------------------------------------"
grep "KSU_VERSION_FULL" kernel/Kbuild
echo ">>> 最终版本字符串: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
echo ">>> Version Code: ${KSU_VERSION_CODE}"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
echo ">>> 拉取 ReSukiSU 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
echo ">>> 拉取 KernelSU Next 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
echo ">>> 拉取 KernelSU (tiann/KernelSU) 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过配置..."
fi
# ===== 克隆补丁仓库&应用 SUSFS 补丁 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 应用 SUSFS&hook 补丁..."
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo ">>> 克隆补丁仓库..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-android14-6.1
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-android14-6.1.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-android14-6.1.patch || true
patch -p1 -F 3 < 69_hide_stuff.patch || true
else
echo ">>> 未开启susfs,跳过susfs补丁配置..."
fi
cd "$WORKDIR/kernel_workspace"
if [[ "$KSU_BRANCH" == [kK] && "$APPLY_SUSFS" == [yY] ]]; then
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
cd "$WORKDIR/kernel_workspace"
# ===== 应用 LZ4 & ZSTD 补丁 =====
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
echo ">>> 正在添加lz4 1.10.0 & zstd 1.5.7补丁..."
git clone --depth=1 https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4&ZSTD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 应用 LZ4KD 补丁 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
echo ">>> 应用 LZ4KD 补丁..."
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cp -r ./SukiSU_patch/other/zram/lz4k/include/linux/* ./common/include/linux/
cp -r ./SukiSU_patch/other/zram/lz4k/lib/* ./common/lib
cp -r ./SukiSU_patch/other/zram/lz4k/crypto/* ./common/crypto
cp ./SukiSU_patch/other/zram/zram_patch/6.1/lz4kd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
patch -p1 -F 3 < lz4kd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4KD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 添加 defconfig 配置项 =====
echo ">>> 添加 defconfig 配置项..."
DEFCONFIG_FILE=./common/arch/arm64/configs/gki_defconfig
# 写入通用 SUSFS/KSU 配置
echo "CONFIG_KSU=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo "CONFIG_KSU_SUSFS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_KSU_SUSFS=n" >> "$DEFCONFIG_FILE"
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TMPFS_POSIX_ACL=y" >> "$DEFCONFIG_FILE"
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> "$DEFCONFIG_FILE"
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> "$DEFCONFIG_FILE"
# 仅在启用了 KPM 时添加 KPM 支持
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo "CONFIG_KPM=y" >> "$DEFCONFIG_FILE"
fi
# 仅在启用了 LZ4KD 补丁时添加相关算法支持
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
cat >> "$DEFCONFIG_FILE" <>> 正在启用网络功能增强优化配置..."
echo "CONFIG_BPF_STREAM_PARSER=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_MAX=65534" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_MAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_LIST_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_NAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> "$DEFCONFIG_FILE"
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
cd ..
fi
# ===== 添加 BBR 等一系列拥塞控制算法 =====
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" || "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo ">>> 正在添加 BBR 等一系列拥塞控制算法..."
echo "CONFIG_TCP_CONG_ADVANCED=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BBR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_CUBIC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_VEGAS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_NV=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_HTCP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BRUTAL=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> "$DEFCONFIG_FILE"
fi
fi
# ===== 启用三星SSG IO调度器 =====
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
echo ">>> 正在启用三星SSG IO调度器..."
echo "CONFIG_MQ_IOSCHED_SSG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用Re-Kernel =====
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
echo ">>> 正在启用Re-Kernel..."
echo "CONFIG_REKERNEL=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用内核级基带保护 =====
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
echo ">>> 正在启用内核级基带保护..."
echo "CONFIG_BBG=y" >> "$DEFCONFIG_FILE"
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
cd ..
fi
# ===== 禁用 defconfig 检查 =====
echo ">>> 禁用 defconfig 检查..."
sed -i 's/check_defconfig//' ./common/build.config.gki
# ===== 编译内核 =====
echo ">>> 开始编译内核..."
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo ">>> 内核编译成功!"
# ===== 选择使用 patch_linux (KPM补丁)=====
OUT_DIR="$WORKDIR/kernel_workspace/common/out/arch/arm64/boot"
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo ">>> 使用 patch_linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
echo ">>> 已成功打上KPM补丁!"
elif [[ "$USE_PATCH_LINUX" == [kK] ]]; then
echo ">>> 使用 kptools-linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
echo ">>> 已成功打上KP-N补丁!"
else
echo ">>> 跳过 KPM 修补操作..."
fi
# ===== 克隆并打包 AnyKernel3 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 克隆 AnyKernel3 项目..."
git clone https://github.com/cctv18/AnyKernel3 --depth=1
echo ">>> 清理 AnyKernel3 Git 信息..."
rm -rf ./AnyKernel3/.git
echo ">>> 拷贝内核镜像到 AnyKernel3 目录..."
cp "$OUT_DIR/Image" ./AnyKernel3/
echo ">>> 进入 AnyKernel3 目录并打包 zip..."
cd "$WORKDIR/kernel_workspace/AnyKernel3"
# ===== 如果启用 lz4kd,则下载 zram.zip 并放入当前目录 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/zram.zip
fi
if [[ "$USE_PATCH_LINUX" == [kK] ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
# ===== 生成 ZIP 文件名 =====
ZIP_NAME="Anykernel3-${MANIFEST}"
if [[ "$APPLY_SUSFS" == "y" || "$APPLY_SUSFS" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-susfs"
fi
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4kd"
fi
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4-zstd"
fi
if [[ "$USE_PATCH_LINUX" == [bBkK] ]]; then
ZIP_NAME="${ZIP_NAME}-kpm"
fi
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbr"
fi
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-ssg"
fi
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-rek"
fi
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbg"
fi
ZIP_NAME="${ZIP_NAME}-v$(date +%Y%m%d).zip"
# ===== 打包 ZIP 文件,包括 zram.zip(如果存在) =====
echo ">>> 打包文件: $ZIP_NAME"
zip -r "../$ZIP_NAME" ./*
ZIP_PATH="$(realpath "../$ZIP_NAME")"
echo ">>> 打包完成 文件所在目录: $ZIP_PATH"
================================================
FILE: local/builder_6.1.118.sh
================================================
#!/bin/bash
set -e
# ===== 获取脚本目录 =====
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
cd "$SCRIPT_DIR"
# ===== 设置自定义参数 =====
echo "===== 欧加真SM8650通用6.1.118 A15 OKI内核本地编译脚本 By Coolapk@cctv18 ====="
echo ">>> 读取用户配置..."
MANIFEST=${MANIFEST:-oppo+oplus+realme}
read -p "请输入自定义内核后缀(默认:android14-11-o-gca13bffobf09): " CUSTOM_SUFFIX
CUSTOM_SUFFIX=${CUSTOM_SUFFIX:-android14-11-o-gca13bffobf09}
read -p "是否启用susfs?(y/n,默认:y): " APPLY_SUSFS
APPLY_SUSFS=${APPLY_SUSFS:-y}
read -p "是否启用 KPM?(b-(re)sukisu内置kpm, k-kernelpatch next独立kpm实现, n-关闭kpm,默认:n): " USE_PATCH_LINUX
USE_PATCH_LINUX=${USE_PATCH_LINUX:-n}
read -p "KSU分支版本(r=ReSukiSU, y=SukiSU Ultra, n=KernelSU Next, k=KSU, l=lkm模式(无内置KSU), 默认:r): " KSU_BRANCH
KSU_BRANCH=${KSU_BRANCH:-r}
read -p "是否应用 lz4 1.10.0 & zstd 1.5.7 补丁?(y/n,默认:y): " APPLY_LZ4
APPLY_LZ4=${APPLY_LZ4:-y}
read -p "是否应用 lz4kd 补丁?(y/n,默认:n): " APPLY_LZ4KD
APPLY_LZ4KD=${APPLY_LZ4KD:-n}
read -p "是否启用网络功能增强优化配置?(y/n,默认:y): " APPLY_BETTERNET
APPLY_BETTERNET=${APPLY_BETTERNET:-y}
read -p "是否添加 BBR 等一系列拥塞控制算法?(y添加/n禁用/d默认,默认:n): " APPLY_BBR
APPLY_BBR=${APPLY_BBR:-n}
read -p "是否启用三星SSG IO调度器?(y/n,默认:y): " APPLY_SSG
APPLY_SSG=${APPLY_SSG:-y}
read -p "是否启用Re-Kernel?(y/n,默认:n): " APPLY_REKERNEL
APPLY_REKERNEL=${APPLY_REKERNEL:-n}
read -p "是否启用内核级基带保护?(y/n,默认:y): " APPLY_BBG
APPLY_BBG=${APPLY_BBG:-y}
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
KSU_TYPE="SukiSU Ultra"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
KSU_TYPE="ReSukiSU"
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
KSU_TYPE="KernelSU Next"
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
KSU_TYPE="KernelSU"
else
KSU_TYPE="no KSU"
fi
if [[ "$USE_PATCH_LINUX" == "b" || "$USE_PATCH_LINUX" == "B" ]]; then
KPM_TYPE="builtin"
elif [[ "$USE_PATCH_LINUX" == "k" || "$USE_PATCH_LINUX" == "K" ]]; then
KPM_TYPE="KernelPatch Next"
else
KPM_TYPE="no kpm"
fi
echo
echo "===== 配置信息 ====="
echo "适用机型: $MANIFEST"
echo "自定义内核后缀: -$CUSTOM_SUFFIX"
echo "KSU分支版本: $KSU_TYPE"
echo "启用susfs: $APPLY_SUSFS"
echo "启用 KPM: $KPM_TYPE"
echo "应用 lz4&zstd 补丁: $APPLY_LZ4"
echo "应用 lz4kd 补丁: $APPLY_LZ4KD"
echo "应用网络功能增强优化配置: $APPLY_BETTERNET"
echo "应用 BBR 等算法: $APPLY_BBR"
echo "启用三星SSG IO调度器: $APPLY_SSG"
echo "启用Re-Kernel: $APPLY_REKERNEL"
echo "启用内核级基带保护: $APPLY_BBG"
echo "===================="
echo
# ===== 创建工作目录 =====
WORKDIR="$SCRIPT_DIR"
cd "$WORKDIR"
# ===== 安装构建依赖 =====
echo ">>> 安装构建依赖..."
# Function to run a command with sudo if not already root
SU() {
if [ "$(id -u)" -eq 0 ]; then
"$@"
else
sudo "$@"
fi
}
SU apt-mark hold firefox && apt-mark hold libc-bin && apt-mark hold man-db
SU rm -rf /var/lib/man-db/auto-update
SU apt-get update
SU apt-get install --no-install-recommends -y curl bison flex clang binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev cpio xz-utils tar unzip
SU rm -rf ./llvm.sh && wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh
SU ./llvm.sh 20 all
# ===== 初始化仓库 =====
echo ">>> 初始化仓库..."
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
git clone --depth=1 https://github.com/cctv18/android_kernel_common_oneplus_sm8650 -b oneplus/sm8650_b_16.0.0_oneplus12 common
echo ">>> 初始化仓库完成"
# ===== 清除 abi 文件、去除 -dirty 后缀 =====
echo ">>> 正在清除 ABI 文件及去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
# ===== 替换版本后缀 =====
echo ">>> 替换内核版本后缀..."
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${CUSTOM_SUFFIX}\"|" "$f"
done
# ===== 拉取 KSU 并设置版本号 =====
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
echo ">>> 拉取 SukiSU-Ultra 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/main/kernel/setup.sh" | bash -s builtin
cd KernelSU
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
echo ">>> 正在获取上游 API 版本信息..."
for i in {1..3}; do
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Kbuild" | \
grep -m1 "KSU_VERSION_API :=" | \
awk -F'= ' '{print $2}' | \
tr -d '[:space:]')
if [ -n "$KSU_API_VERSION" ]; then
echo "成功获取 API 版本: $KSU_API_VERSION"
break
else
echo "获取失败,重试中 ($i/3)..."
sleep 1
fi
done
if [ -z "$KSU_API_VERSION" ]; then
echo -e "无法获取 API 版本,使用默认值 3.1.7..."
KSU_API_VERSION="3.1.7"
fi
export KSU_API_VERSION=$KSU_API_VERSION
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
echo ">>> 正在修改 kernel/Kbuild 文件..."
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Kbuild
sed -i '/KSU_VERSION_API :=/d' kernel/Kbuild
sed -i '/KSU_VERSION_FULL :=/d' kernel/Kbuild
awk -v def="$VERSION_DEFINITIONS" '
/REPO_OWNER :=/ {print; print def; inserted=1; next}
1
END {if (!inserted) print def}
' kernel/Kbuild > kernel/Kbuild.tmp && mv kernel/Kbuild.tmp kernel/Kbuild
KSU_VERSION_CODE=$(expr $(git rev-list --count main 2>/dev/null) + 37185 2>/dev/null || echo 114514)
echo ">>> 修改完成!验证结果:"
echo "------------------------------------------------"
grep -A10 "REPO_OWNER" kernel/Kbuild | head -n 10
echo "------------------------------------------------"
grep "KSU_VERSION_FULL" kernel/Kbuild
echo ">>> 最终版本字符串: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
echo ">>> Version Code: ${KSU_VERSION_CODE}"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
echo ">>> 拉取 ReSukiSU 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
echo ">>> 拉取 KernelSU Next 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
echo ">>> 拉取 KernelSU (tiann/KernelSU) 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过配置..."
fi
# ===== 克隆补丁仓库&应用 SUSFS 补丁 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 应用 SUSFS&hook 补丁..."
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo ">>> 克隆补丁仓库..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-android14-6.1
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-android14-6.1.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-android14-6.1.patch || true
patch -p1 -F 3 < 69_hide_stuff.patch || true
else
echo ">>> 未开启susfs,跳过susfs补丁配置..."
fi
cd "$WORKDIR/kernel_workspace"
if [[ "$KSU_BRANCH" == [kK] && "$APPLY_SUSFS" == [yY] ]]; then
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
cd "$WORKDIR/kernel_workspace"
# ===== 应用 LZ4 & ZSTD 补丁 =====
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
echo ">>> 正在添加lz4 1.10.0 & zstd 1.5.7补丁..."
git clone --depth=1 https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4&ZSTD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 应用 LZ4KD 补丁 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
echo ">>> 应用 LZ4KD 补丁..."
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cp -r ./SukiSU_patch/other/zram/lz4k/include/linux/* ./common/include/linux/
cp -r ./SukiSU_patch/other/zram/lz4k/lib/* ./common/lib
cp -r ./SukiSU_patch/other/zram/lz4k/crypto/* ./common/crypto
cp ./SukiSU_patch/other/zram/zram_patch/6.1/lz4kd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
patch -p1 -F 3 < lz4kd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4KD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 添加 defconfig 配置项 =====
echo ">>> 添加 defconfig 配置项..."
DEFCONFIG_FILE=./common/arch/arm64/configs/gki_defconfig
# 写入通用 SUSFS/KSU 配置
echo "CONFIG_KSU=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo "CONFIG_KSU_SUSFS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_KSU_SUSFS=n" >> "$DEFCONFIG_FILE"
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TMPFS_POSIX_ACL=y" >> "$DEFCONFIG_FILE"
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> "$DEFCONFIG_FILE"
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> "$DEFCONFIG_FILE"
# 仅在启用了 KPM 时添加 KPM 支持
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo "CONFIG_KPM=y" >> "$DEFCONFIG_FILE"
fi
# 仅在启用了 LZ4KD 补丁时添加相关算法支持
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
cat >> "$DEFCONFIG_FILE" <>> 正在启用网络功能增强优化配置..."
echo "CONFIG_BPF_STREAM_PARSER=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_MAX=65534" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_MAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_LIST_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_NAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> "$DEFCONFIG_FILE"
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
cd ..
fi
# ===== 添加 BBR 等一系列拥塞控制算法 =====
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" || "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo ">>> 正在添加 BBR 等一系列拥塞控制算法..."
echo "CONFIG_TCP_CONG_ADVANCED=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BBR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_CUBIC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_VEGAS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_NV=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_HTCP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BRUTAL=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> "$DEFCONFIG_FILE"
fi
fi
# ===== 启用三星SSG IO调度器 =====
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
echo ">>> 正在启用三星SSG IO调度器..."
echo "CONFIG_MQ_IOSCHED_SSG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用Re-Kernel =====
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
echo ">>> 正在启用Re-Kernel..."
echo "CONFIG_REKERNEL=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用内核级基带保护 =====
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
echo ">>> 正在启用内核级基带保护..."
echo "CONFIG_BBG=y" >> "$DEFCONFIG_FILE"
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
cd ..
fi
# ===== 禁用 defconfig 检查 =====
echo ">>> 禁用 defconfig 检查..."
sed -i 's/check_defconfig//' ./common/build.config.gki
# ===== 编译内核 =====
echo ">>> 开始编译内核..."
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo ">>> 内核编译成功!"
# ===== 选择使用 patch_linux (KPM补丁)=====
OUT_DIR="$WORKDIR/kernel_workspace/common/out/arch/arm64/boot"
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo ">>> 使用 patch_linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
echo ">>> 已成功打上KPM补丁!"
elif [[ "$USE_PATCH_LINUX" == [kK] ]]; then
echo ">>> 使用 kptools-linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
echo ">>> 已成功打上KP-N补丁!"
else
echo ">>> 跳过 KPM 修补操作..."
fi
# ===== 克隆并打包 AnyKernel3 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 克隆 AnyKernel3 项目..."
git clone https://github.com/cctv18/AnyKernel3 --depth=1
echo ">>> 清理 AnyKernel3 Git 信息..."
rm -rf ./AnyKernel3/.git
echo ">>> 拷贝内核镜像到 AnyKernel3 目录..."
cp "$OUT_DIR/Image" ./AnyKernel3/
echo ">>> 进入 AnyKernel3 目录并打包 zip..."
cd "$WORKDIR/kernel_workspace/AnyKernel3"
# ===== 如果启用 lz4kd,则下载 zram.zip 并放入当前目录 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/zram.zip
fi
if [[ "$USE_PATCH_LINUX" == [kK] ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
# ===== 生成 ZIP 文件名 =====
ZIP_NAME="Anykernel3-${MANIFEST}"
if [[ "$APPLY_SUSFS" == "y" || "$APPLY_SUSFS" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-susfs"
fi
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4kd"
fi
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4-zstd"
fi
if [[ "$USE_PATCH_LINUX" == [bBkK] ]]; then
ZIP_NAME="${ZIP_NAME}-kpm"
fi
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbr"
fi
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-ssg"
fi
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-rek"
fi
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbg"
fi
ZIP_NAME="${ZIP_NAME}-v$(date +%Y%m%d).zip"
# ===== 打包 ZIP 文件,包括 zram.zip(如果存在) =====
echo ">>> 打包文件: $ZIP_NAME"
zip -r "../$ZIP_NAME" ./*
ZIP_PATH="$(realpath "../$ZIP_NAME")"
echo ">>> 打包完成 文件所在目录: $ZIP_PATH"
================================================
FILE: local/builder_6.1.128.sh
================================================
#!/bin/bash
set -e
# ===== 获取脚本目录 =====
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
cd "$SCRIPT_DIR"
# ===== 设置自定义参数 =====
echo "===== 欧加真MT6897通用6.1.128 A15 (天玑特供)OKI内核本地编译脚本 By Coolapk@cctv18 ====="
echo ">>> 读取用户配置..."
MANIFEST=${MANIFEST:-oppo+oplus+realme}
read -p "请输入自定义内核后缀(默认:android14-11-o-gca13bffobf09): " CUSTOM_SUFFIX
CUSTOM_SUFFIX=${CUSTOM_SUFFIX:-android14-11-o-gca13bffobf09}
read -p "是否启用susfs?(y/n,默认:y): " APPLY_SUSFS
APPLY_SUSFS=${APPLY_SUSFS:-y}
read -p "是否启用 KPM?(b-(re)sukisu内置kpm, k-kernelpatch next独立kpm实现, n-关闭kpm,默认:n): " USE_PATCH_LINUX
USE_PATCH_LINUX=${USE_PATCH_LINUX:-n}
read -p "KSU分支版本(r=ReSukiSU, y=SukiSU Ultra, n=KernelSU Next, k=KSU, l=lkm模式(无内置KSU), 默认:r): " KSU_BRANCH
KSU_BRANCH=${KSU_BRANCH:-r}
read -p "是否应用 lz4 1.10.0 & zstd 1.5.7 补丁?(y/n,默认:y): " APPLY_LZ4
APPLY_LZ4=${APPLY_LZ4:-y}
read -p "是否应用 lz4kd 补丁?(y/n,默认:n): " APPLY_LZ4KD
APPLY_LZ4KD=${APPLY_LZ4KD:-n}
read -p "是否启用网络功能增强优化配置?(y/n,在天玑机型上可能导致bug,建议关闭;默认:n): " APPLY_BETTERNET
APPLY_BETTERNET=${APPLY_BETTERNET:-n}
read -p "是否添加 BBR 等一系列拥塞控制算法?(y添加/n禁用/d默认,默认:n): " APPLY_BBR
APPLY_BBR=${APPLY_BBR:-n}
read -p "是否启用三星SSG IO调度器?(y/n,默认:y): " APPLY_SSG
APPLY_SSG=${APPLY_SSG:-y}
read -p "是否启用Re-Kernel?(y/n,默认:n): " APPLY_REKERNEL
APPLY_REKERNEL=${APPLY_REKERNEL:-n}
read -p "是否启用内核级基带保护?(y/n,默认:y): " APPLY_BBG
APPLY_BBG=${APPLY_BBG:-y}
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
KSU_TYPE="SukiSU Ultra"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
KSU_TYPE="ReSukiSU"
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
KSU_TYPE="KernelSU Next"
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
KSU_TYPE="KernelSU"
else
KSU_TYPE="no KSU"
fi
if [[ "$USE_PATCH_LINUX" == "b" || "$USE_PATCH_LINUX" == "B" ]]; then
KPM_TYPE="builtin"
elif [[ "$USE_PATCH_LINUX" == "k" || "$USE_PATCH_LINUX" == "K" ]]; then
KPM_TYPE="KernelPatch Next"
else
KPM_TYPE="no kpm"
fi
echo
echo "===== 配置信息 ====="
echo "适用机型: $MANIFEST"
echo "自定义内核后缀: -$CUSTOM_SUFFIX"
echo "KSU分支版本: $KSU_TYPE"
echo "启用susfs: $APPLY_SUSFS"
echo "启用 KPM: $KPM_TYPE"
echo "应用 lz4&zstd 补丁: $APPLY_LZ4"
echo "应用 lz4kd 补丁: $APPLY_LZ4KD"
echo "应用网络功能增强优化配置: $APPLY_BETTERNET"
echo "应用 BBR 等算法: $APPLY_BBR"
echo "启用三星SSG IO调度器: $APPLY_SSG"
echo "启用Re-Kernel: $APPLY_REKERNEL"
echo "启用内核级基带保护: $APPLY_BBG"
echo "===================="
echo
# ===== 创建工作目录 =====
WORKDIR="$SCRIPT_DIR"
cd "$WORKDIR"
# ===== 安装构建依赖 =====
echo ">>> 安装构建依赖..."
# Function to run a command with sudo if not already root
SU() {
if [ "$(id -u)" -eq 0 ]; then
"$@"
else
sudo "$@"
fi
}
SU apt-mark hold firefox && apt-mark hold libc-bin && apt-mark hold man-db
SU rm -rf /var/lib/man-db/auto-update
SU apt-get update
SU apt-get install --no-install-recommends -y curl bison flex clang binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev cpio xz-utils tar unzip
SU rm -rf ./llvm.sh && wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh
SU ./llvm.sh 20 all
# ===== 初始化仓库 =====
echo ">>> 初始化仓库..."
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
git clone --depth=1 https://github.com/cctv18/android_kernel_oneplus_mt6897 -b oneplus/mt6897_v_15.0.0_oneplus_pad common
echo ">>> 初始化仓库完成"
# ===== 清除 abi 文件、去除 -dirty 后缀 =====
echo ">>> 正在清除 ABI 文件及去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
# ===== 替换版本后缀 =====
echo ">>> 替换内核版本后缀..."
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${CUSTOM_SUFFIX}\"|" "$f"
done
# ===== 拉取 KSU 并设置版本号 =====
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
echo ">>> 拉取 SukiSU-Ultra 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/main/kernel/setup.sh" | bash -s builtin
cd KernelSU
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
echo ">>> 正在获取上游 API 版本信息..."
for i in {1..3}; do
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Kbuild" | \
grep -m1 "KSU_VERSION_API :=" | \
awk -F'= ' '{print $2}' | \
tr -d '[:space:]')
if [ -n "$KSU_API_VERSION" ]; then
echo "成功获取 API 版本: $KSU_API_VERSION"
break
else
echo "获取失败,重试中 ($i/3)..."
sleep 1
fi
done
if [ -z "$KSU_API_VERSION" ]; then
echo -e "无法获取 API 版本,使用默认值 3.1.7..."
KSU_API_VERSION="3.1.7"
fi
export KSU_API_VERSION=$KSU_API_VERSION
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
echo ">>> 正在修改 kernel/Kbuild 文件..."
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Kbuild
sed -i '/KSU_VERSION_API :=/d' kernel/Kbuild
sed -i '/KSU_VERSION_FULL :=/d' kernel/Kbuild
awk -v def="$VERSION_DEFINITIONS" '
/REPO_OWNER :=/ {print; print def; inserted=1; next}
1
END {if (!inserted) print def}
' kernel/Kbuild > kernel/Kbuild.tmp && mv kernel/Kbuild.tmp kernel/Kbuild
KSU_VERSION_CODE=$(expr $(git rev-list --count main 2>/dev/null) + 37185 2>/dev/null || echo 114514)
echo ">>> 修改完成!验证结果:"
echo "------------------------------------------------"
grep -A10 "REPO_OWNER" kernel/Kbuild | head -n 10
echo "------------------------------------------------"
grep "KSU_VERSION_FULL" kernel/Kbuild
echo ">>> 最终版本字符串: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
echo ">>> Version Code: ${KSU_VERSION_CODE}"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
echo ">>> 拉取 ReSukiSU 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
echo ">>> 拉取 KernelSU Next 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
echo ">>> 拉取 KernelSU (tiann/KernelSU) 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过配置..."
fi
# ===== 克隆补丁仓库&应用 SUSFS 补丁 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 应用 SUSFS&hook 补丁..."
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo ">>> 克隆补丁仓库..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-android14-6.1
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-android14-6.1.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-android14-6.1.patch || true
patch -p1 -F 3 < 69_hide_stuff.patch || true
else
echo ">>> 未开启susfs,跳过susfs补丁配置..."
fi
cd "$WORKDIR/kernel_workspace"
if [[ "$KSU_BRANCH" == [kK] && "$APPLY_SUSFS" == [yY] ]]; then
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
cd "$WORKDIR/kernel_workspace"
# ===== 应用 LZ4 & ZSTD 补丁 =====
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
echo ">>> 正在添加lz4 1.10.0 & zstd 1.5.7补丁..."
git clone --depth=1 https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4&ZSTD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 应用 LZ4KD 补丁 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
echo ">>> 应用 LZ4KD 补丁..."
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cp -r ./SukiSU_patch/other/zram/lz4k/include/linux/* ./common/include/linux/
cp -r ./SukiSU_patch/other/zram/lz4k/lib/* ./common/lib
cp -r ./SukiSU_patch/other/zram/lz4k/crypto/* ./common/crypto
cp ./SukiSU_patch/other/zram/zram_patch/6.1/lz4kd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
patch -p1 -F 3 < lz4kd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4KD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 添加 defconfig 配置项 =====
echo ">>> 添加 defconfig 配置项..."
DEFCONFIG_FILE=./common/arch/arm64/configs/gki_defconfig
# 写入通用 SUSFS/KSU 配置
echo "CONFIG_KSU=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo "CONFIG_KSU_SUSFS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_KSU_SUSFS=n" >> "$DEFCONFIG_FILE"
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TMPFS_POSIX_ACL=y" >> "$DEFCONFIG_FILE"
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> "$DEFCONFIG_FILE"
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> "$DEFCONFIG_FILE"
# 仅在启用了 KPM 时添加 KPM 支持
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo "CONFIG_KPM=y" >> "$DEFCONFIG_FILE"
fi
# 仅在启用了 LZ4KD 补丁时添加相关算法支持
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
cat >> "$DEFCONFIG_FILE" <>> 正在启用网络功能增强优化配置..."
echo "CONFIG_BPF_STREAM_PARSER=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_MAX=65534" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_MAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_LIST_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_NAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> "$DEFCONFIG_FILE"
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
cd ..
fi
# ===== 添加 BBR 等一系列拥塞控制算法 =====
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" || "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo ">>> 正在添加 BBR 等一系列拥塞控制算法..."
echo "CONFIG_TCP_CONG_ADVANCED=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BBR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_CUBIC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_VEGAS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_NV=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_HTCP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BRUTAL=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> "$DEFCONFIG_FILE"
fi
fi
# ===== 启用三星SSG IO调度器 =====
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
echo ">>> 正在启用三星SSG IO调度器..."
echo "CONFIG_MQ_IOSCHED_SSG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用Re-Kernel =====
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
echo ">>> 正在启用Re-Kernel..."
echo "CONFIG_REKERNEL=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用内核级基带保护 =====
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
echo ">>> 正在启用内核级基带保护..."
echo "CONFIG_BBG=y" >> "$DEFCONFIG_FILE"
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
cd ..
fi
# ===== 禁用 defconfig 检查 =====
echo ">>> 禁用 defconfig 检查..."
sed -i 's/check_defconfig//' ./common/build.config.gki
# ===== 编译内核 =====
echo ">>> 开始编译内核..."
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo ">>> 内核编译成功!"
# ===== 选择使用 patch_linux (KPM补丁)=====
OUT_DIR="$WORKDIR/kernel_workspace/common/out/arch/arm64/boot"
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo ">>> 使用 patch_linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
echo ">>> 已成功打上KPM补丁!"
elif [[ "$USE_PATCH_LINUX" == [kK] ]]; then
echo ">>> 使用 kptools-linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
echo ">>> 已成功打上KP-N补丁!"
else
echo ">>> 跳过 KPM 修补操作..."
fi
# ===== 克隆并打包 AnyKernel3 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 克隆 AnyKernel3 项目..."
git clone https://github.com/cctv18/AnyKernel3 --depth=1
echo ">>> 清理 AnyKernel3 Git 信息..."
rm -rf ./AnyKernel3/.git
echo ">>> 拷贝内核镜像到 AnyKernel3 目录..."
cp "$OUT_DIR/Image" ./AnyKernel3/
echo ">>> 进入 AnyKernel3 目录并打包 zip..."
cd "$WORKDIR/kernel_workspace/AnyKernel3"
# ===== 如果启用 lz4kd,则下载 zram.zip 并放入当前目录 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/zram.zip
fi
if [[ "$USE_PATCH_LINUX" == [kK] ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
# ===== 生成 ZIP 文件名 =====
ZIP_NAME="Anykernel3-${MANIFEST}"
if [[ "$APPLY_SUSFS" == "y" || "$APPLY_SUSFS" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-susfs"
fi
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4kd"
fi
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4-zstd"
fi
if [[ "$USE_PATCH_LINUX" == [bBkK] ]]; then
ZIP_NAME="${ZIP_NAME}-kpm"
fi
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbr"
fi
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-ssg"
fi
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-rek"
fi
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbg"
fi
ZIP_NAME="${ZIP_NAME}-v$(date +%Y%m%d).zip"
# ===== 打包 ZIP 文件,包括 zram.zip(如果存在) =====
echo ">>> 打包文件: $ZIP_NAME"
zip -r "../$ZIP_NAME" ./*
ZIP_PATH="$(realpath "../$ZIP_NAME")"
echo ">>> 打包完成 文件所在目录: $ZIP_PATH"
================================================
FILE: local/builder_6.1.134.sh
================================================
#!/bin/bash
set -e
# ===== 获取脚本目录 =====
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
cd "$SCRIPT_DIR"
# ===== 设置自定义参数 =====
echo "===== 欧加真MT6989通用6.1.134 A16 (天玑特供)OKI内核本地编译脚本 By Coolapk@cctv18 ====="
echo ">>> 读取用户配置..."
MANIFEST=${MANIFEST:-oppo+oplus+realme}
read -p "请输入自定义内核后缀(默认:android14-11-o-gca13bffobf09): " CUSTOM_SUFFIX
CUSTOM_SUFFIX=${CUSTOM_SUFFIX:-android14-11-o-gca13bffobf09}
read -p "是否启用susfs?(y/n,默认:y): " APPLY_SUSFS
APPLY_SUSFS=${APPLY_SUSFS:-y}
read -p "是否启用 KPM?(b-(re)sukisu内置kpm, k-kernelpatch next独立kpm实现, n-关闭kpm,默认:n): " USE_PATCH_LINUX
USE_PATCH_LINUX=${USE_PATCH_LINUX:-n}
read -p "KSU分支版本(r=ReSukiSU, y=SukiSU Ultra, n=KernelSU Next, k=KSU, l=lkm模式(无内置KSU), 默认:r): " KSU_BRANCH
KSU_BRANCH=${KSU_BRANCH:-r}
read -p "是否应用 lz4 1.10.0 & zstd 1.5.7 补丁?(y/n,默认:y): " APPLY_LZ4
APPLY_LZ4=${APPLY_LZ4:-y}
read -p "是否应用 lz4kd 补丁?(y/n,默认:n): " APPLY_LZ4KD
APPLY_LZ4KD=${APPLY_LZ4KD:-n}
read -p "是否启用网络功能增强优化配置?(y/n,在天玑机型上可能导致bug,建议关闭;默认:n): " APPLY_BETTERNET
APPLY_BETTERNET=${APPLY_BETTERNET:-n}
read -p "是否添加 BBR 等一系列拥塞控制算法?(y添加/n禁用/d默认,默认:n): " APPLY_BBR
APPLY_BBR=${APPLY_BBR:-n}
read -p "是否启用三星SSG IO调度器?(y/n,默认:y): " APPLY_SSG
APPLY_SSG=${APPLY_SSG:-y}
read -p "是否启用Re-Kernel?(y/n,默认:n): " APPLY_REKERNEL
APPLY_REKERNEL=${APPLY_REKERNEL:-n}
read -p "是否启用内核级基带保护?(y/n,默认:y): " APPLY_BBG
APPLY_BBG=${APPLY_BBG:-y}
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
KSU_TYPE="SukiSU Ultra"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
KSU_TYPE="ReSukiSU"
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
KSU_TYPE="KernelSU Next"
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
KSU_TYPE="KernelSU"
else
KSU_TYPE="no KSU"
fi
if [[ "$USE_PATCH_LINUX" == "b" || "$USE_PATCH_LINUX" == "B" ]]; then
KPM_TYPE="builtin"
elif [[ "$USE_PATCH_LINUX" == "k" || "$USE_PATCH_LINUX" == "K" ]]; then
KPM_TYPE="KernelPatch Next"
else
KPM_TYPE="no kpm"
fi
echo
echo "===== 配置信息 ====="
echo "适用机型: $MANIFEST"
echo "自定义内核后缀: -$CUSTOM_SUFFIX"
echo "KSU分支版本: $KSU_TYPE"
echo "启用susfs: $APPLY_SUSFS"
echo "启用 KPM: $KPM_TYPE"
echo "应用 lz4&zstd 补丁: $APPLY_LZ4"
echo "应用 lz4kd 补丁: $APPLY_LZ4KD"
echo "应用网络功能增强优化配置: $APPLY_BETTERNET"
echo "应用 BBR 等算法: $APPLY_BBR"
echo "启用三星SSG IO调度器: $APPLY_SSG"
echo "启用Re-Kernel: $APPLY_REKERNEL"
echo "启用内核级基带保护: $APPLY_BBG"
echo "===================="
echo
# ===== 创建工作目录 =====
WORKDIR="$SCRIPT_DIR"
cd "$WORKDIR"
# ===== 安装构建依赖 =====
echo ">>> 安装构建依赖..."
# Function to run a command with sudo if not already root
SU() {
if [ "$(id -u)" -eq 0 ]; then
"$@"
else
sudo "$@"
fi
}
SU apt-mark hold firefox && apt-mark hold libc-bin && apt-mark hold man-db
SU rm -rf /var/lib/man-db/auto-update
SU apt-get update
SU apt-get install --no-install-recommends -y curl bison flex clang binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev cpio xz-utils tar unzip
SU rm -rf ./llvm.sh && wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh
SU ./llvm.sh 20 all
# ===== 初始化仓库 =====
echo ">>> 初始化仓库..."
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
git clone --depth=1 https://github.com/cctv18/android_kernel_oneplus_mt6989 -b oneplus/mt6989_b_16.0.0_ace5_race common
echo ">>> 初始化仓库完成"
# ===== 清除 abi 文件、去除 -dirty 后缀 =====
echo ">>> 正在清除 ABI 文件及去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
# ===== 替换版本后缀 =====
echo ">>> 替换内核版本后缀..."
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${CUSTOM_SUFFIX}\"|" "$f"
done
# ===== 拉取 KSU 并设置版本号 =====
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
echo ">>> 拉取 SukiSU-Ultra 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/main/kernel/setup.sh" | bash -s builtin
cd KernelSU
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
echo ">>> 正在获取上游 API 版本信息..."
for i in {1..3}; do
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Kbuild" | \
grep -m1 "KSU_VERSION_API :=" | \
awk -F'= ' '{print $2}' | \
tr -d '[:space:]')
if [ -n "$KSU_API_VERSION" ]; then
echo "成功获取 API 版本: $KSU_API_VERSION"
break
else
echo "获取失败,重试中 ($i/3)..."
sleep 1
fi
done
if [ -z "$KSU_API_VERSION" ]; then
echo -e "无法获取 API 版本,使用默认值 3.1.7..."
KSU_API_VERSION="3.1.7"
fi
export KSU_API_VERSION=$KSU_API_VERSION
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
echo ">>> 正在修改 kernel/Kbuild 文件..."
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Kbuild
sed -i '/KSU_VERSION_API :=/d' kernel/Kbuild
sed -i '/KSU_VERSION_FULL :=/d' kernel/Kbuild
awk -v def="$VERSION_DEFINITIONS" '
/REPO_OWNER :=/ {print; print def; inserted=1; next}
1
END {if (!inserted) print def}
' kernel/Kbuild > kernel/Kbuild.tmp && mv kernel/Kbuild.tmp kernel/Kbuild
KSU_VERSION_CODE=$(expr $(git rev-list --count main 2>/dev/null) + 37185 2>/dev/null || echo 114514)
echo ">>> 修改完成!验证结果:"
echo "------------------------------------------------"
grep -A10 "REPO_OWNER" kernel/Kbuild | head -n 10
echo "------------------------------------------------"
grep "KSU_VERSION_FULL" kernel/Kbuild
echo ">>> 最终版本字符串: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
echo ">>> Version Code: ${KSU_VERSION_CODE}"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
echo ">>> 拉取 ReSukiSU 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
echo ">>> 拉取 KernelSU Next 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
echo ">>> 拉取 KernelSU (tiann/KernelSU) 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过配置..."
fi
# ===== 克隆补丁仓库&应用 SUSFS 补丁 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 应用 SUSFS&hook 补丁..."
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo ">>> 克隆补丁仓库..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-android14-6.1
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-android14-6.1.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-android14-6.1.patch || true
patch -p1 -F 3 < 69_hide_stuff.patch || true
else
echo ">>> 未开启susfs,跳过susfs补丁配置..."
fi
cd "$WORKDIR/kernel_workspace"
if [[ "$KSU_BRANCH" == [kK] && "$APPLY_SUSFS" == [yY] ]]; then
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
cd "$WORKDIR/kernel_workspace"
# ===== 应用 LZ4 & ZSTD 补丁 =====
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
echo ">>> 正在添加lz4 1.10.0 & zstd 1.5.7补丁..."
git clone --depth=1 https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4&ZSTD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 应用 LZ4KD 补丁 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
echo ">>> 应用 LZ4KD 补丁..."
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cp -r ./SukiSU_patch/other/zram/lz4k/include/linux/* ./common/include/linux/
cp -r ./SukiSU_patch/other/zram/lz4k/lib/* ./common/lib
cp -r ./SukiSU_patch/other/zram/lz4k/crypto/* ./common/crypto
cp ./SukiSU_patch/other/zram/zram_patch/6.1/lz4kd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
patch -p1 -F 3 < lz4kd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4KD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 添加 defconfig 配置项 =====
echo ">>> 添加 defconfig 配置项..."
DEFCONFIG_FILE=./common/arch/arm64/configs/gki_defconfig
# 写入通用 SUSFS/KSU 配置
echo "CONFIG_KSU=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo "CONFIG_KSU_SUSFS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_KSU_SUSFS=n" >> "$DEFCONFIG_FILE"
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TMPFS_POSIX_ACL=y" >> "$DEFCONFIG_FILE"
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> "$DEFCONFIG_FILE"
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> "$DEFCONFIG_FILE"
# 仅在启用了 KPM 时添加 KPM 支持
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo "CONFIG_KPM=y" >> "$DEFCONFIG_FILE"
fi
# 仅在启用了 LZ4KD 补丁时添加相关算法支持
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
cat >> "$DEFCONFIG_FILE" <>> 正在启用网络功能增强优化配置..."
echo "CONFIG_BPF_STREAM_PARSER=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_MAX=65534" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_MAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_LIST_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_NAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> "$DEFCONFIG_FILE"
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
cd ..
fi
# ===== 添加 BBR 等一系列拥塞控制算法 =====
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" || "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo ">>> 正在添加 BBR 等一系列拥塞控制算法..."
echo "CONFIG_TCP_CONG_ADVANCED=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BBR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_CUBIC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_VEGAS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_NV=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_HTCP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BRUTAL=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> "$DEFCONFIG_FILE"
fi
fi
# ===== 启用三星SSG IO调度器 =====
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
echo ">>> 正在启用三星SSG IO调度器..."
echo "CONFIG_MQ_IOSCHED_SSG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用Re-Kernel =====
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
echo ">>> 正在启用Re-Kernel..."
echo "CONFIG_REKERNEL=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用内核级基带保护 =====
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
echo ">>> 正在启用内核级基带保护..."
echo "CONFIG_BBG=y" >> "$DEFCONFIG_FILE"
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
cd ..
fi
# ===== 禁用 defconfig 检查 =====
echo ">>> 禁用 defconfig 检查..."
sed -i 's/check_defconfig//' ./common/build.config.gki
# ===== 编译内核 =====
echo ">>> 开始编译内核..."
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo ">>> 内核编译成功!"
# ===== 选择使用 patch_linux (KPM补丁)=====
OUT_DIR="$WORKDIR/kernel_workspace/common/out/arch/arm64/boot"
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo ">>> 使用 patch_linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
echo ">>> 已成功打上KPM补丁!"
elif [[ "$USE_PATCH_LINUX" == [kK] ]]; then
echo ">>> 使用 kptools-linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
echo ">>> 已成功打上KP-N补丁!"
else
echo ">>> 跳过 KPM 修补操作..."
fi
# ===== 克隆并打包 AnyKernel3 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 克隆 AnyKernel3 项目..."
git clone https://github.com/cctv18/AnyKernel3 --depth=1
echo ">>> 清理 AnyKernel3 Git 信息..."
rm -rf ./AnyKernel3/.git
echo ">>> 拷贝内核镜像到 AnyKernel3 目录..."
cp "$OUT_DIR/Image" ./AnyKernel3/
echo ">>> 进入 AnyKernel3 目录并打包 zip..."
cd "$WORKDIR/kernel_workspace/AnyKernel3"
# ===== 如果启用 lz4kd,则下载 zram.zip 并放入当前目录 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/zram.zip
fi
if [[ "$USE_PATCH_LINUX" == [kK] ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
# ===== 生成 ZIP 文件名 =====
ZIP_NAME="Anykernel3-${MANIFEST}"
if [[ "$APPLY_SUSFS" == "y" || "$APPLY_SUSFS" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-susfs"
fi
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4kd"
fi
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4-zstd"
fi
if [[ "$USE_PATCH_LINUX" == [bBkK] ]]; then
ZIP_NAME="${ZIP_NAME}-kpm"
fi
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbr"
fi
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-ssg"
fi
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-rek"
fi
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbg"
fi
ZIP_NAME="${ZIP_NAME}-v$(date +%Y%m%d).zip"
# ===== 打包 ZIP 文件,包括 zram.zip(如果存在) =====
echo ">>> 打包文件: $ZIP_NAME"
zip -r "../$ZIP_NAME" ./*
ZIP_PATH="$(realpath "../$ZIP_NAME")"
echo ">>> 打包完成 文件所在目录: $ZIP_PATH"
================================================
FILE: local/builder_6.1.141.sh
================================================
#!/bin/bash
set -e
# ===== 获取脚本目录 =====
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
cd "$SCRIPT_DIR"
# ===== 设置自定义参数 =====
echo "===== 欧加真SM8650通用6.1.141 A15 OKI内核本地编译脚本 By Coolapk@cctv18 ====="
echo ">>> 读取用户配置..."
MANIFEST=${MANIFEST:-oppo+oplus+realme}
read -p "请输入自定义内核后缀(默认:android14-11-o-gca13bffobf09): " CUSTOM_SUFFIX
CUSTOM_SUFFIX=${CUSTOM_SUFFIX:-android14-11-o-gca13bffobf09}
read -p "是否启用susfs?(y/n,默认:y): " APPLY_SUSFS
APPLY_SUSFS=${APPLY_SUSFS:-y}
read -p "是否启用 KPM?(b-(re)sukisu内置kpm, k-kernelpatch next独立kpm实现, n-关闭kpm,默认:n): " USE_PATCH_LINUX
USE_PATCH_LINUX=${USE_PATCH_LINUX:-n}
read -p "KSU分支版本(r=ReSukiSU, y=SukiSU Ultra, n=KernelSU Next, k=KSU, l=lkm模式(无内置KSU), 默认:r): " KSU_BRANCH
KSU_BRANCH=${KSU_BRANCH:-r}
read -p "是否应用 lz4 1.10.0 & zstd 1.5.7 补丁?(y/n,默认:y): " APPLY_LZ4
APPLY_LZ4=${APPLY_LZ4:-y}
read -p "是否应用 lz4kd 补丁?(y/n,默认:n): " APPLY_LZ4KD
APPLY_LZ4KD=${APPLY_LZ4KD:-n}
read -p "是否启用网络功能增强优化配置?(y/n,默认:y): " APPLY_BETTERNET
APPLY_BETTERNET=${APPLY_BETTERNET:-y}
read -p "是否添加 BBR 等一系列拥塞控制算法?(y添加/n禁用/d默认,默认:n): " APPLY_BBR
APPLY_BBR=${APPLY_BBR:-n}
read -p "是否启用三星SSG IO调度器?(y/n,默认:y): " APPLY_SSG
APPLY_SSG=${APPLY_SSG:-y}
read -p "是否启用Re-Kernel?(y/n,默认:n): " APPLY_REKERNEL
APPLY_REKERNEL=${APPLY_REKERNEL:-n}
read -p "是否启用内核级基带保护?(y/n,默认:y): " APPLY_BBG
APPLY_BBG=${APPLY_BBG:-y}
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
KSU_TYPE="SukiSU Ultra"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
KSU_TYPE="ReSukiSU"
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
KSU_TYPE="KernelSU Next"
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
KSU_TYPE="KernelSU"
else
KSU_TYPE="no KSU"
fi
if [[ "$USE_PATCH_LINUX" == "b" || "$USE_PATCH_LINUX" == "B" ]]; then
KPM_TYPE="builtin"
elif [[ "$USE_PATCH_LINUX" == "k" || "$USE_PATCH_LINUX" == "K" ]]; then
KPM_TYPE="KernelPatch Next"
else
KPM_TYPE="no kpm"
fi
echo
echo "===== 配置信息 ====="
echo "适用机型: $MANIFEST"
echo "自定义内核后缀: -$CUSTOM_SUFFIX"
echo "KSU分支版本: $KSU_TYPE"
echo "启用susfs: $APPLY_SUSFS"
echo "启用 KPM: $KPM_TYPE"
echo "应用 lz4&zstd 补丁: $APPLY_LZ4"
echo "应用 lz4kd 补丁: $APPLY_LZ4KD"
echo "应用网络功能增强优化配置: $APPLY_BETTERNET"
echo "应用 BBR 等算法: $APPLY_BBR"
echo "启用三星SSG IO调度器: $APPLY_SSG"
echo "启用Re-Kernel: $APPLY_REKERNEL"
echo "启用内核级基带保护: $APPLY_BBG"
echo "===================="
echo
# ===== 创建工作目录 =====
WORKDIR="$SCRIPT_DIR"
cd "$WORKDIR"
# ===== 安装构建依赖 =====
echo ">>> 安装构建依赖..."
# Function to run a command with sudo if not already root
SU() {
if [ "$(id -u)" -eq 0 ]; then
"$@"
else
sudo "$@"
fi
}
SU apt-mark hold firefox && apt-mark hold libc-bin && apt-mark hold man-db
SU rm -rf /var/lib/man-db/auto-update
SU apt-get update
SU apt-get install --no-install-recommends -y curl bison flex clang binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev cpio xz-utils tar unzip
SU rm -rf ./llvm.sh && wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh
SU ./llvm.sh 20 all
# ===== 初始化仓库 =====
echo ">>> 初始化仓库..."
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
git clone --depth=1 https://github.com/cctv18/android_kernel_common_oneplus_sm8650 -b oneplus/sm8650_b_16.0.0_oneplus12_6.1.141 common
echo ">>> 初始化仓库完成"
# ===== 清除 abi 文件、去除 -dirty 后缀 =====
echo ">>> 正在清除 ABI 文件及去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
# ===== 替换版本后缀 =====
echo ">>> 替换内核版本后缀..."
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${CUSTOM_SUFFIX}\"|" "$f"
done
# ===== 拉取 KSU 并设置版本号 =====
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
echo ">>> 拉取 SukiSU-Ultra 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/main/kernel/setup.sh" | bash -s builtin
cd KernelSU
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
echo ">>> 正在获取上游 API 版本信息..."
for i in {1..3}; do
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Kbuild" | \
grep -m1 "KSU_VERSION_API :=" | \
awk -F'= ' '{print $2}' | \
tr -d '[:space:]')
if [ -n "$KSU_API_VERSION" ]; then
echo "成功获取 API 版本: $KSU_API_VERSION"
break
else
echo "获取失败,重试中 ($i/3)..."
sleep 1
fi
done
if [ -z "$KSU_API_VERSION" ]; then
echo -e "无法获取 API 版本,使用默认值 3.1.7..."
KSU_API_VERSION="3.1.7"
fi
export KSU_API_VERSION=$KSU_API_VERSION
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
echo ">>> 正在修改 kernel/Kbuild 文件..."
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Kbuild
sed -i '/KSU_VERSION_API :=/d' kernel/Kbuild
sed -i '/KSU_VERSION_FULL :=/d' kernel/Kbuild
awk -v def="$VERSION_DEFINITIONS" '
/REPO_OWNER :=/ {print; print def; inserted=1; next}
1
END {if (!inserted) print def}
' kernel/Kbuild > kernel/Kbuild.tmp && mv kernel/Kbuild.tmp kernel/Kbuild
KSU_VERSION_CODE=$(expr $(git rev-list --count main 2>/dev/null) + 37185 2>/dev/null || echo 114514)
echo ">>> 修改完成!验证结果:"
echo "------------------------------------------------"
grep -A10 "REPO_OWNER" kernel/Kbuild | head -n 10
echo "------------------------------------------------"
grep "KSU_VERSION_FULL" kernel/Kbuild
echo ">>> 最终版本字符串: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
echo ">>> Version Code: ${KSU_VERSION_CODE}"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
echo ">>> 拉取 ReSukiSU 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
echo ">>> 拉取 KernelSU Next 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
echo ">>> 拉取 KernelSU (tiann/KernelSU) 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过配置..."
fi
# ===== 克隆补丁仓库&应用 SUSFS 补丁 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 应用 SUSFS&hook 补丁..."
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo ">>> 克隆补丁仓库..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-android14-6.1
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-android14-6.1.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-android14-6.1.patch || true
patch -p1 -F 3 < 69_hide_stuff.patch || true
else
echo ">>> 未开启susfs,跳过susfs补丁配置..."
fi
cd "$WORKDIR/kernel_workspace"
if [[ "$KSU_BRANCH" == [kK] && "$APPLY_SUSFS" == [yY] ]]; then
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
cd "$WORKDIR/kernel_workspace"
# ===== 应用 LZ4 & ZSTD 补丁 =====
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
echo ">>> 正在添加lz4 1.10.0 & zstd 1.5.7补丁..."
git clone --depth=1 https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4&ZSTD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 应用 LZ4KD 补丁 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
echo ">>> 应用 LZ4KD 补丁..."
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cp -r ./SukiSU_patch/other/zram/lz4k/include/linux/* ./common/include/linux/
cp -r ./SukiSU_patch/other/zram/lz4k/lib/* ./common/lib
cp -r ./SukiSU_patch/other/zram/lz4k/crypto/* ./common/crypto
cp ./SukiSU_patch/other/zram/zram_patch/6.1/lz4kd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
patch -p1 -F 3 < lz4kd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4KD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 添加 defconfig 配置项 =====
echo ">>> 添加 defconfig 配置项..."
DEFCONFIG_FILE=./common/arch/arm64/configs/gki_defconfig
# 写入通用 SUSFS/KSU 配置
echo "CONFIG_KSU=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo "CONFIG_KSU_SUSFS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_KSU_SUSFS=n" >> "$DEFCONFIG_FILE"
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TMPFS_POSIX_ACL=y" >> "$DEFCONFIG_FILE"
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> "$DEFCONFIG_FILE"
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> "$DEFCONFIG_FILE"
# 仅在启用了 KPM 时添加 KPM 支持
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo "CONFIG_KPM=y" >> "$DEFCONFIG_FILE"
fi
# 仅在启用了 LZ4KD 补丁时添加相关算法支持
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
cat >> "$DEFCONFIG_FILE" <>> 正在启用网络功能增强优化配置..."
echo "CONFIG_BPF_STREAM_PARSER=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_MAX=65534" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_MAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_LIST_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_NAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> "$DEFCONFIG_FILE"
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
cd ..
fi
# ===== 添加 BBR 等一系列拥塞控制算法 =====
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" || "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo ">>> 正在添加 BBR 等一系列拥塞控制算法..."
echo "CONFIG_TCP_CONG_ADVANCED=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BBR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_CUBIC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_VEGAS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_NV=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_HTCP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BRUTAL=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> "$DEFCONFIG_FILE"
fi
fi
# ===== 启用三星SSG IO调度器 =====
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
echo ">>> 正在启用三星SSG IO调度器..."
echo "CONFIG_MQ_IOSCHED_SSG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用Re-Kernel =====
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
echo ">>> 正在启用Re-Kernel..."
echo "CONFIG_REKERNEL=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用内核级基带保护 =====
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
echo ">>> 正在启用内核级基带保护..."
echo "CONFIG_BBG=y" >> "$DEFCONFIG_FILE"
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
cd ..
fi
# ===== 禁用 defconfig 检查 =====
echo ">>> 禁用 defconfig 检查..."
sed -i 's/check_defconfig//' ./common/build.config.gki
# ===== 编译内核 =====
echo ">>> 开始编译内核..."
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo ">>> 内核编译成功!"
# ===== 选择使用 patch_linux (KPM补丁)=====
OUT_DIR="$WORKDIR/kernel_workspace/common/out/arch/arm64/boot"
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo ">>> 使用 patch_linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
echo ">>> 已成功打上KPM补丁!"
elif [[ "$USE_PATCH_LINUX" == [kK] ]]; then
echo ">>> 使用 kptools-linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
echo ">>> 已成功打上KP-N补丁!"
else
echo ">>> 跳过 KPM 修补操作..."
fi
# ===== 克隆并打包 AnyKernel3 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 克隆 AnyKernel3 项目..."
git clone https://github.com/cctv18/AnyKernel3 --depth=1
echo ">>> 清理 AnyKernel3 Git 信息..."
rm -rf ./AnyKernel3/.git
echo ">>> 拷贝内核镜像到 AnyKernel3 目录..."
cp "$OUT_DIR/Image" ./AnyKernel3/
echo ">>> 进入 AnyKernel3 目录并打包 zip..."
cd "$WORKDIR/kernel_workspace/AnyKernel3"
# ===== 如果启用 lz4kd,则下载 zram.zip 并放入当前目录 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/zram.zip
fi
if [[ "$USE_PATCH_LINUX" == [kK] ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
# ===== 生成 ZIP 文件名 =====
ZIP_NAME="Anykernel3-${MANIFEST}"
if [[ "$APPLY_SUSFS" == "y" || "$APPLY_SUSFS" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-susfs"
fi
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4kd"
fi
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4-zstd"
fi
if [[ "$USE_PATCH_LINUX" == [bBkK] ]]; then
ZIP_NAME="${ZIP_NAME}-kpm"
fi
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbr"
fi
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-ssg"
fi
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-rek"
fi
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbg"
fi
ZIP_NAME="${ZIP_NAME}-v$(date +%Y%m%d).zip"
# ===== 打包 ZIP 文件,包括 zram.zip(如果存在) =====
echo ">>> 打包文件: $ZIP_NAME"
zip -r "../$ZIP_NAME" ./*
ZIP_PATH="$(realpath "../$ZIP_NAME")"
echo ">>> 打包完成 文件所在目录: $ZIP_PATH"
================================================
FILE: local/builder_6.1.57.sh
================================================
#!/bin/bash
set -e
# ===== 获取脚本目录 =====
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
cd "$SCRIPT_DIR"
# ===== 设置自定义参数 =====
echo "===== 欧加真SM8650通用6.1.57 A14 OKI内核本地编译脚本 By Coolapk@cctv18 ====="
echo ">>> 读取用户配置..."
MANIFEST=${MANIFEST:-oppo+oplus+realme}
read -p "请输入自定义内核后缀(默认:android14-11-o-gca13bffobf09): " CUSTOM_SUFFIX
CUSTOM_SUFFIX=${CUSTOM_SUFFIX:-android14-11-o-gca13bffobf09}
read -p "是否启用susfs?(y/n,默认:y): " APPLY_SUSFS
APPLY_SUSFS=${APPLY_SUSFS:-y}
read -p "是否启用 KPM?(b-(re)sukisu内置kpm, k-kernelpatch next独立kpm实现, n-关闭kpm,默认:n): " USE_PATCH_LINUX
USE_PATCH_LINUX=${USE_PATCH_LINUX:-n}
read -p "KSU分支版本(r=ReSukiSU, y=SukiSU Ultra, n=KernelSU Next, k=KSU, l=lkm模式(无内置KSU), 默认:r): " KSU_BRANCH
KSU_BRANCH=${KSU_BRANCH:-r}
read -p "是否应用 lz4 1.10.0 & zstd 1.5.7 补丁?(y/n,默认:y): " APPLY_LZ4
APPLY_LZ4=${APPLY_LZ4:-y}
read -p "是否应用 lz4kd 补丁?(y/n,默认:n): " APPLY_LZ4KD
APPLY_LZ4KD=${APPLY_LZ4KD:-n}
read -p "是否启用网络功能增强优化配置?(y/n,默认:y): " APPLY_BETTERNET
APPLY_BETTERNET=${APPLY_BETTERNET:-y}
read -p "是否添加 BBR 等一系列拥塞控制算法?(y添加/n禁用/d默认,默认:n): " APPLY_BBR
APPLY_BBR=${APPLY_BBR:-n}
read -p "是否启用三星SSG IO调度器?(y/n,默认:y): " APPLY_SSG
APPLY_SSG=${APPLY_SSG:-y}
read -p "是否启用Re-Kernel?(y/n,默认:n): " APPLY_REKERNEL
APPLY_REKERNEL=${APPLY_REKERNEL:-n}
read -p "是否启用内核级基带保护?(y/n,默认:y): " APPLY_BBG
APPLY_BBG=${APPLY_BBG:-y}
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
KSU_TYPE="SukiSU Ultra"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
KSU_TYPE="ReSukiSU"
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
KSU_TYPE="KernelSU Next"
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
KSU_TYPE="KernelSU"
else
KSU_TYPE="no KSU"
fi
if [[ "$USE_PATCH_LINUX" == "b" || "$USE_PATCH_LINUX" == "B" ]]; then
KPM_TYPE="builtin"
elif [[ "$USE_PATCH_LINUX" == "k" || "$USE_PATCH_LINUX" == "K" ]]; then
KPM_TYPE="KernelPatch Next"
else
KPM_TYPE="no kpm"
fi
echo
echo "===== 配置信息 ====="
echo "适用机型: $MANIFEST"
echo "自定义内核后缀: -$CUSTOM_SUFFIX"
echo "KSU分支版本: $KSU_TYPE"
echo "启用susfs: $APPLY_SUSFS"
echo "启用 KPM: $KPM_TYPE"
echo "应用 lz4&zstd 补丁: $APPLY_LZ4"
echo "应用 lz4kd 补丁: $APPLY_LZ4KD"
echo "应用网络功能增强优化配置: $APPLY_BETTERNET"
echo "应用 BBR 等算法: $APPLY_BBR"
echo "启用三星SSG IO调度器: $APPLY_SSG"
echo "启用Re-Kernel: $APPLY_REKERNEL"
echo "启用内核级基带保护: $APPLY_BBG"
echo "===================="
echo
# ===== 创建工作目录 =====
WORKDIR="$SCRIPT_DIR"
cd "$WORKDIR"
# ===== 安装构建依赖 =====
echo ">>> 安装构建依赖..."
# Function to run a command with sudo if not already root
su() {
if [ "$(id -u)" -eq 0 ]; then
"$@"
else
sudo "$@"
fi
}
SU apt-mark hold firefox && apt-mark hold libc-bin && apt-mark hold man-db
SU rm -rf /var/lib/man-db/auto-update
SU apt-get update
SU apt-get install --no-install-recommends -y curl bison flex clang binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev cpio xz-utils tar unzip
SU rm -rf ./llvm.sh && wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh
SU ./llvm.sh 20 all
# ===== 初始化仓库 =====
echo ">>> 初始化仓库..."
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
git clone --depth=1 https://github.com/cctv18/android_kernel_common_oneplus_sm8650 -b oneplus/sm8650_u_14.0.0_oneplus12 common
echo ">>> 初始化仓库完成"
# ===== 清除 abi 文件、去除 -dirty 后缀 =====
echo ">>> 正在清除 ABI 文件及去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
# ===== 替换版本后缀 =====
echo ">>> 替换内核版本后缀..."
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${CUSTOM_SUFFIX}\"|" "$f"
done
# ===== 拉取 KSU 并设置版本号 =====
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
echo ">>> 拉取 SukiSU-Ultra 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/main/kernel/setup.sh" | bash -s builtin
cd KernelSU
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
echo ">>> 正在获取上游 API 版本信息..."
for i in {1..3}; do
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Kbuild" | \
grep -m1 "KSU_VERSION_API :=" | \
awk -F'= ' '{print $2}' | \
tr -d '[:space:]')
if [ -n "$KSU_API_VERSION" ]; then
echo "成功获取 API 版本: $KSU_API_VERSION"
break
else
echo "获取失败,重试中 ($i/3)..."
sleep 1
fi
done
if [ -z "$KSU_API_VERSION" ]; then
echo -e "无法获取 API 版本,使用默认值 3.1.7..."
KSU_API_VERSION="3.1.7"
fi
export KSU_API_VERSION=$KSU_API_VERSION
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
echo ">>> 正在修改 kernel/Kbuild 文件..."
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Kbuild
sed -i '/KSU_VERSION_API :=/d' kernel/Kbuild
sed -i '/KSU_VERSION_FULL :=/d' kernel/Kbuild
awk -v def="$VERSION_DEFINITIONS" '
/REPO_OWNER :=/ {print; print def; inserted=1; next}
1
END {if (!inserted) print def}
' kernel/Kbuild > kernel/Kbuild.tmp && mv kernel/Kbuild.tmp kernel/Kbuild
KSU_VERSION_CODE=$(expr $(git rev-list --count main 2>/dev/null) + 37185 2>/dev/null || echo 114514)
echo ">>> 修改完成!验证结果:"
echo "------------------------------------------------"
grep -A10 "REPO_OWNER" kernel/Kbuild | head -n 10
echo "------------------------------------------------"
grep "KSU_VERSION_FULL" kernel/Kbuild
echo ">>> 最终版本字符串: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
echo ">>> Version Code: ${KSU_VERSION_CODE}"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
echo ">>> 拉取 ReSukiSU 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
echo ">>> 拉取 KernelSU Next 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
echo ">>> 拉取 KernelSU (tiann/KernelSU) 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过配置..."
fi
# ===== 克隆补丁仓库&应用 SUSFS 补丁 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 应用 SUSFS&hook 补丁..."
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo ">>> 克隆补丁仓库..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-android14-6.1
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-android14-6.1.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-android14-6.1.patch || true
patch -p1 -F 3 < 69_hide_stuff.patch || true
else
echo ">>> 未开启susfs,跳过susfs补丁配置..."
fi
cd "$WORKDIR/kernel_workspace"
if [[ "$KSU_BRANCH" == [kK] && "$APPLY_SUSFS" == [yY] ]]; then
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
cd "$WORKDIR/kernel_workspace"
# ===== 应用 LZ4 & ZSTD 补丁 =====
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
echo ">>> 正在添加lz4 1.10.0 & zstd 1.5.7补丁..."
git clone --depth=1 https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4&ZSTD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 应用 LZ4KD 补丁 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
echo ">>> 应用 LZ4KD 补丁..."
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cp -r ./SukiSU_patch/other/zram/lz4k/include/linux/* ./common/include/linux/
cp -r ./SukiSU_patch/other/zram/lz4k/lib/* ./common/lib
cp -r ./SukiSU_patch/other/zram/lz4k/crypto/* ./common/crypto
cp ./SukiSU_patch/other/zram/zram_patch/6.1/lz4kd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
patch -p1 -F 3 < lz4kd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4KD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 添加 defconfig 配置项 =====
echo ">>> 添加 defconfig 配置项..."
DEFCONFIG_FILE=./common/arch/arm64/configs/gki_defconfig
# 写入通用 SUSFS/KSU 配置
echo "CONFIG_KSU=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo "CONFIG_KSU_SUSFS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_KSU_SUSFS=n" >> "$DEFCONFIG_FILE"
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TMPFS_POSIX_ACL=y" >> "$DEFCONFIG_FILE"
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> "$DEFCONFIG_FILE"
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> "$DEFCONFIG_FILE"
# 仅在启用了 KPM 时添加 KPM 支持
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo "CONFIG_KPM=y" >> "$DEFCONFIG_FILE"
fi
# 仅在启用了 LZ4KD 补丁时添加相关算法支持
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
cat >> "$DEFCONFIG_FILE" <>> 正在启用网络功能增强优化配置..."
echo "CONFIG_BPF_STREAM_PARSER=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_MAX=65534" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_MAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_LIST_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_NAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> "$DEFCONFIG_FILE"
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
cd ..
fi
# ===== 添加 BBR 等一系列拥塞控制算法 =====
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" || "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo ">>> 正在添加 BBR 等一系列拥塞控制算法..."
echo "CONFIG_TCP_CONG_ADVANCED=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BBR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_CUBIC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_VEGAS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_NV=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_HTCP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BRUTAL=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> "$DEFCONFIG_FILE"
fi
fi
# ===== 启用三星SSG IO调度器 =====
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
echo ">>> 正在启用三星SSG IO调度器..."
echo "CONFIG_MQ_IOSCHED_SSG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用Re-Kernel =====
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
echo ">>> 正在启用Re-Kernel..."
echo "CONFIG_REKERNEL=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用内核级基带保护 =====
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
echo ">>> 正在启用内核级基带保护..."
echo "CONFIG_BBG=y" >> "$DEFCONFIG_FILE"
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
cd ..
fi
# ===== 禁用 defconfig 检查 =====
echo ">>> 禁用 defconfig 检查..."
sed -i 's/check_defconfig//' ./common/build.config.gki
# ===== 编译内核 =====
echo ">>> 开始编译内核..."
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo ">>> 内核编译成功!"
# ===== 选择使用 patch_linux (KPM补丁)=====
OUT_DIR="$WORKDIR/kernel_workspace/common/out/arch/arm64/boot"
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo ">>> 使用 patch_linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
echo ">>> 已成功打上KPM补丁!"
elif [[ "$USE_PATCH_LINUX" == [kK] ]]; then
echo ">>> 使用 kptools-linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
echo ">>> 已成功打上KP-N补丁!"
else
echo ">>> 跳过 KPM 修补操作..."
fi
# ===== 克隆并打包 AnyKernel3 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 克隆 AnyKernel3 项目..."
git clone https://github.com/cctv18/AnyKernel3 --depth=1
echo ">>> 清理 AnyKernel3 Git 信息..."
rm -rf ./AnyKernel3/.git
echo ">>> 拷贝内核镜像到 AnyKernel3 目录..."
cp "$OUT_DIR/Image" ./AnyKernel3/
echo ">>> 进入 AnyKernel3 目录并打包 zip..."
cd "$WORKDIR/kernel_workspace/AnyKernel3"
# ===== 如果启用 lz4kd,则下载 zram.zip 并放入当前目录 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/other_patch/zram.zip
fi
if [[ "$USE_PATCH_LINUX" == [kK] ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
# ===== 生成 ZIP 文件名 =====
ZIP_NAME="Anykernel3-${MANIFEST}"
if [[ "$APPLY_SUSFS" == "y" || "$APPLY_SUSFS" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-susfs"
fi
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4kd"
fi
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4-zstd"
fi
if [[ "$USE_PATCH_LINUX" == [bBkK] ]]; then
ZIP_NAME="${ZIP_NAME}-kpm"
fi
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbr"
fi
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-ssg"
fi
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-rek"
fi
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbg"
fi
ZIP_NAME="${ZIP_NAME}-v$(date +%Y%m%d).zip"
# ===== 打包 ZIP 文件,包括 zram.zip(如果存在) =====
echo ">>> 打包文件: $ZIP_NAME"
zip -r "../$ZIP_NAME" ./*
ZIP_PATH="$(realpath "../$ZIP_NAME")"
echo ">>> 打包完成 文件所在目录: $ZIP_PATH"
================================================
FILE: local/builder_6.1.75.sh
================================================
#!/bin/bash
set -e
# ===== 获取脚本目录 =====
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
cd "$SCRIPT_DIR"
# ===== 设置自定义参数 =====
echo "===== 欧加真SM8650通用6.1.75 A15 OKI内核本地编译脚本 By Coolapk@cctv18 ====="
echo ">>> 读取用户配置..."
MANIFEST=${MANIFEST:-oppo+oplus+realme}
read -p "请输入自定义内核后缀(默认:android14-11-o-gca13bffobf09): " CUSTOM_SUFFIX
CUSTOM_SUFFIX=${CUSTOM_SUFFIX:-android14-11-o-gca13bffobf09}
read -p "是否启用susfs?(y/n,默认:y): " APPLY_SUSFS
APPLY_SUSFS=${APPLY_SUSFS:-y}
read -p "是否启用 KPM?(b-(re)sukisu内置kpm, k-kernelpatch next独立kpm实现, n-关闭kpm,默认:n): " USE_PATCH_LINUX
USE_PATCH_LINUX=${USE_PATCH_LINUX:-n}
read -p "KSU分支版本(r=ReSukiSU, y=SukiSU Ultra, n=KernelSU Next, k=KSU, l=lkm模式(无内置KSU), 默认:r): " KSU_BRANCH
KSU_BRANCH=${KSU_BRANCH:-r}
read -p "是否应用 lz4 1.10.0 & zstd 1.5.7 补丁?(y/n,默认:y): " APPLY_LZ4
APPLY_LZ4=${APPLY_LZ4:-y}
read -p "是否应用 lz4kd 补丁?(y/n,默认:n): " APPLY_LZ4KD
APPLY_LZ4KD=${APPLY_LZ4KD:-n}
read -p "是否启用网络功能增强优化配置?(y/n,默认:y): " APPLY_BETTERNET
APPLY_BETTERNET=${APPLY_BETTERNET:-y}
read -p "是否添加 BBR 等一系列拥塞控制算法?(y添加/n禁用/d默认,默认:n): " APPLY_BBR
APPLY_BBR=${APPLY_BBR:-n}
read -p "是否启用三星SSG IO调度器?(y/n,默认:y): " APPLY_SSG
APPLY_SSG=${APPLY_SSG:-y}
read -p "是否启用Re-Kernel?(y/n,默认:n): " APPLY_REKERNEL
APPLY_REKERNEL=${APPLY_REKERNEL:-n}
read -p "是否启用内核级基带保护?(y/n,默认:y): " APPLY_BBG
APPLY_BBG=${APPLY_BBG:-y}
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
KSU_TYPE="SukiSU Ultra"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
KSU_TYPE="ReSukiSU"
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
KSU_TYPE="KernelSU Next"
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
KSU_TYPE="KernelSU"
else
KSU_TYPE="no KSU"
fi
if [[ "$USE_PATCH_LINUX" == "b" || "$USE_PATCH_LINUX" == "B" ]]; then
KPM_TYPE="builtin"
elif [[ "$USE_PATCH_LINUX" == "k" || "$USE_PATCH_LINUX" == "K" ]]; then
KPM_TYPE="KernelPatch Next"
else
KPM_TYPE="no kpm"
fi
echo
echo "===== 配置信息 ====="
echo "适用机型: $MANIFEST"
echo "自定义内核后缀: -$CUSTOM_SUFFIX"
echo "KSU分支版本: $KSU_TYPE"
echo "启用susfs: $APPLY_SUSFS"
echo "启用 KPM: $KPM_TYPE"
echo "应用 lz4&zstd 补丁: $APPLY_LZ4"
echo "应用 lz4kd 补丁: $APPLY_LZ4KD"
echo "应用网络功能增强优化配置: $APPLY_BETTERNET"
echo "应用 BBR 等算法: $APPLY_BBR"
echo "启用三星SSG IO调度器: $APPLY_SSG"
echo "启用Re-Kernel: $APPLY_REKERNEL"
echo "启用内核级基带保护: $APPLY_BBG"
echo "===================="
echo
# ===== 创建工作目录 =====
WORKDIR="$SCRIPT_DIR"
cd "$WORKDIR"
# ===== 安装构建依赖 =====
echo ">>> 安装构建依赖..."
# Function to run a command with sudo if not already root
SU() {
if [ "$(id -u)" -eq 0 ]; then
"$@"
else
sudo "$@"
fi
}
SU apt-mark hold firefox && apt-mark hold libc-bin && apt-mark hold man-db
SU rm -rf /var/lib/man-db/auto-update
SU apt-get update
SU apt-get install --no-install-recommends -y curl bison flex clang binutils dwarves git lld pahole zip perl make gcc python3 python-is-python3 bc libssl-dev libelf-dev cpio xz-utils tar unzip
SU rm -rf ./llvm.sh && wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh
SU ./llvm.sh 20 all
# ===== 初始化仓库 =====
echo ">>> 初始化仓库..."
rm -rf kernel_workspace
mkdir kernel_workspace
cd kernel_workspace
git clone --depth=1 https://github.com/cctv18/android_kernel_common_oneplus_sm8650 -b oneplus/sm8650_v_15.0.0_oneplus12 common
echo ">>> 初始化仓库完成"
# ===== 清除 abi 文件、去除 -dirty 后缀 =====
echo ">>> 正在清除 ABI 文件及去除 dirty 后缀..."
rm common/android/abi_gki_protected_exports_* || true
for f in common/scripts/setlocalversion; do
sed -i 's/ -dirty//g' "$f"
sed -i '$i res=$(echo "$res" | sed '\''s/-dirty//g'\'')' "$f"
done
# ===== 替换版本后缀 =====
echo ">>> 替换内核版本后缀..."
for f in ./common/scripts/setlocalversion; do
sed -i "\$s|echo \"\\\$res\"|echo \"-${CUSTOM_SUFFIX}\"|" "$f"
done
# ===== 拉取 KSU 并设置版本号 =====
if [[ "$KSU_BRANCH" == "y" || "$KSU_BRANCH" == "Y" ]]; then
echo ">>> 拉取 SukiSU-Ultra 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ShirkNeko/SukiSU-Ultra/main/kernel/setup.sh" | bash -s builtin
cd KernelSU
GIT_COMMIT_HASH=$(git rev-parse --short=8 HEAD)
echo "当前提交哈希: $GIT_COMMIT_HASH"
echo ">>> 正在获取上游 API 版本信息..."
for i in {1..3}; do
KSU_API_VERSION=$(curl -s "https://raw.githubusercontent.com/SukiSU-Ultra/SukiSU-Ultra/builtin/kernel/Kbuild" | \
grep -m1 "KSU_VERSION_API :=" | \
awk -F'= ' '{print $2}' | \
tr -d '[:space:]')
if [ -n "$KSU_API_VERSION" ]; then
echo "成功获取 API 版本: $KSU_API_VERSION"
break
else
echo "获取失败,重试中 ($i/3)..."
sleep 1
fi
done
if [ -z "$KSU_API_VERSION" ]; then
echo -e "无法获取 API 版本,使用默认值 3.1.7..."
KSU_API_VERSION="3.1.7"
fi
export KSU_API_VERSION=$KSU_API_VERSION
VERSION_DEFINITIONS=$'define get_ksu_version_full\nv\\$1-'"$GIT_COMMIT_HASH"$'@cctv18\nendef\n\nKSU_VERSION_API := '"$KSU_API_VERSION"$'\nKSU_VERSION_FULL := v'"$KSU_API_VERSION"$'-'"$GIT_COMMIT_HASH"$'@cctv18'
echo ">>> 正在修改 kernel/Kbuild 文件..."
sed -i '/define get_ksu_version_full/,/endef/d' kernel/Kbuild
sed -i '/KSU_VERSION_API :=/d' kernel/Kbuild
sed -i '/KSU_VERSION_FULL :=/d' kernel/Kbuild
awk -v def="$VERSION_DEFINITIONS" '
/REPO_OWNER :=/ {print; print def; inserted=1; next}
1
END {if (!inserted) print def}
' kernel/Kbuild > kernel/Kbuild.tmp && mv kernel/Kbuild.tmp kernel/Kbuild
KSU_VERSION_CODE=$(expr $(git rev-list --count main 2>/dev/null) + 37185 2>/dev/null || echo 114514)
echo ">>> 修改完成!验证结果:"
echo "------------------------------------------------"
grep -A10 "REPO_OWNER" kernel/Kbuild | head -n 10
echo "------------------------------------------------"
grep "KSU_VERSION_FULL" kernel/Kbuild
echo ">>> 最终版本字符串: v${KSU_API_VERSION}-${GIT_COMMIT_HASH}@cctv18"
echo ">>> Version Code: ${KSU_VERSION_CODE}"
elif [[ "$KSU_BRANCH" == "r" || "$KSU_BRANCH" == "R" ]]; then
echo ">>> 拉取 ReSukiSU 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/ReSukiSU/ReSukiSU/main/kernel/setup.sh" | bash -s main
echo 'CONFIG_KSU_FULL_NAME_FORMAT="%TAG_NAME%-%COMMIT_SHA%@cctv18"' >> ./common/arch/arm64/configs/gki_defconfig
elif [[ "$KSU_BRANCH" == "n" || "$KSU_BRANCH" == "N" ]]; then
echo ">>> 拉取 KernelSU Next 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/pershoot/KernelSU-Next/refs/heads/dev-susfs/kernel/setup.sh" | bash -s dev-susfs
cd KernelSU-Next
rm -rf .git
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/pershoot/KernelSU-Next/commits?sha=dev&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/KSU_VERSION_FALLBACK := 1/KSU_VERSION_FALLBACK := $KSU_VERSION/g" kernel/Kbuild
KSU_GIT_TAG=$(curl -sL "https://api.github.com/repos/KernelSU-Next/KernelSU-Next/tags" | grep -o '"name": *"[^"]*"' | head -n 1 | sed 's/"name": "//;s/"//')
sed -i "s/KSU_VERSION_TAG_FALLBACK := v0.0.1/KSU_VERSION_TAG_FALLBACK := $KSU_GIT_TAG/g" kernel/Kbuild
#为KernelSU Next添加WildKSU管理器支持
cd ../common/drivers/kernelsu
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/apk_sign.patch
patch -p2 -N -F 3 < apk_sign.patch || true
elif [[ "$KSU_BRANCH" == "k" || "$KSU_BRANCH" == "K" ]]; then
echo ">>> 拉取 KernelSU (tiann/KernelSU) 并设置版本..."
curl -LSs "https://raw.githubusercontent.com/tiann/KernelSU/refs/heads/main/kernel/setup.sh" | bash -s main
cd ./KernelSU
KSU_VERSION=$(expr $(curl -sI "https://api.github.com/repos/tiann/KernelSU/commits?sha=main&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') "+" 30000)
sed -i "s/DKSU_VERSION=16/DKSU_VERSION=${KSU_VERSION}/" kernel/Kbuild
else
echo "已选择无内置KernelSU模式,跳过配置..."
fi
# ===== 克隆补丁仓库&应用 SUSFS 补丁 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 应用 SUSFS&hook 补丁..."
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo ">>> 克隆补丁仓库..."
git clone --depth=1 https://github.com/cctv18/susfs4oki.git susfs4ksu -b oki-android14-6.1
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/69_hide_stuff.patch -O ./common/69_hide_stuff.patch
cp ./susfs4ksu/kernel_patches/50_add_susfs_in_gki-android14-6.1.patch ./common/
cp ./susfs4ksu/kernel_patches/fs/* ./common/fs/
cp ./susfs4ksu/kernel_patches/include/linux/* ./common/include/linux/
cd ./common
patch -p1 < 50_add_susfs_in_gki-android14-6.1.patch || true
patch -p1 -F 3 < 69_hide_stuff.patch || true
else
echo ">>> 未开启susfs,跳过susfs补丁配置..."
fi
cd "$WORKDIR/kernel_workspace"
if [[ "$KSU_BRANCH" == [kK] && "$APPLY_SUSFS" == [yY] ]]; then
cp ./susfs4ksu/kernel_patches/KernelSU/10_enable_susfs_for_ksu.patch ./KernelSU/
cd ./KernelSU
patch -p1 < 10_enable_susfs_for_ksu.patch || true
fi
cd "$WORKDIR/kernel_workspace"
# ===== 应用 LZ4 & ZSTD 补丁 =====
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
echo ">>> 正在添加lz4 1.10.0 & zstd 1.5.7补丁..."
git clone --depth=1 https://github.com/cctv18/oppo_oplus_realme_sm8650.git
cp ./oppo_oplus_realme_sm8650/zram_patch/001-lz4.patch ./common/
cp ./oppo_oplus_realme_sm8650/zram_patch/lz4armv8.S ./common/lib
cp ./oppo_oplus_realme_sm8650/zram_patch/002-zstd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
git apply -p1 < 001-lz4.patch || true
patch -p1 < 002-zstd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4&ZSTD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 应用 LZ4KD 补丁 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
echo ">>> 应用 LZ4KD 补丁..."
if [ ! -d "SukiSU_patch" ]; then
git clone --depth=1 https://github.com/ShirkNeko/SukiSU_patch.git
fi
cp -r ./SukiSU_patch/other/zram/lz4k/include/linux/* ./common/include/linux/
cp -r ./SukiSU_patch/other/zram/lz4k/lib/* ./common/lib
cp -r ./SukiSU_patch/other/zram/lz4k/crypto/* ./common/crypto
cp ./SukiSU_patch/other/zram/zram_patch/6.1/lz4kd.patch ./common/
cd "$WORKDIR/kernel_workspace/common"
patch -p1 -F 3 < lz4kd.patch || true
cd "$WORKDIR/kernel_workspace"
else
echo ">>> 跳过 LZ4KD 补丁..."
cd "$WORKDIR/kernel_workspace"
fi
# ===== 添加 defconfig 配置项 =====
echo ">>> 添加 defconfig 配置项..."
DEFCONFIG_FILE=./common/arch/arm64/configs/gki_defconfig
# 写入通用 SUSFS/KSU 配置
echo "CONFIG_KSU=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_SUSFS" == [yY] ]]; then
echo "CONFIG_KSU_SUSFS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_PATH=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_KSTAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_TRY_UMOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_UNAME=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_ENABLE_LOG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_OPEN_REDIRECT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_KSU_SUSFS_SUS_MAP=y" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_KSU_SUSFS=n" >> "$DEFCONFIG_FILE"
fi
#添加对 Mountify (backslashxx/mountify) 模块的支持
echo "CONFIG_TMPFS_XATTR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TMPFS_POSIX_ACL=y" >> "$DEFCONFIG_FILE"
# 开启O2编译优化配置
echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y" >> "$DEFCONFIG_FILE"
#跳过将uapi标准头安装到 usr/include 目录的不必要操作,节省编译时间
echo "CONFIG_HEADERS_INSTALL=n" >> "$DEFCONFIG_FILE"
# 仅在启用了 KPM 时添加 KPM 支持
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo "CONFIG_KPM=y" >> "$DEFCONFIG_FILE"
fi
# 仅在启用了 LZ4KD 补丁时添加相关算法支持
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
cat >> "$DEFCONFIG_FILE" <>> 正在启用网络功能增强优化配置..."
echo "CONFIG_BPF_STREAM_PARSER=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_NETFILTER_XT_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_MAX=65534" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_BITMAP_PORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMARK=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTIP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_IPMAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_MAC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORTNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETNET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETPORT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_HASH_NETIFACE=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP_SET_LIST_SET=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_NAT=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_IP6_NF_TARGET_MASQUERADE=y" >> "$DEFCONFIG_FILE"
#由于部分机型的vintf兼容性检测规则,在开启CONFIG_IP6_NF_NAT后开机会出现"您的设备内部出现了问题。请联系您的设备制造商了解详情。"的提示,故添加一个配置修复补丁,在编译内核时隐藏CONFIG_IP6_NF_NAT=y但不影响对应功能编译
cd common
wget https://github.com/cctv18/oppo_oplus_realme_sm8650/raw/refs/heads/main/other_patch/config.patch
patch -p1 -F 3 < config.patch || true
cd ..
fi
# ===== 添加 BBR 等一系列拥塞控制算法 =====
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" || "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo ">>> 正在添加 BBR 等一系列拥塞控制算法..."
echo "CONFIG_TCP_CONG_ADVANCED=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BBR=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_CUBIC=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_VEGAS=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_NV=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_WESTWOOD=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_HTCP=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_TCP_CONG_BRUTAL=y" >> "$DEFCONFIG_FILE"
if [[ "$APPLY_BBR" == "d" || "$APPLY_BBR" == "D" ]]; then
echo "CONFIG_DEFAULT_TCP_CONG=bbr" >> "$DEFCONFIG_FILE"
else
echo "CONFIG_DEFAULT_TCP_CONG=cubic" >> "$DEFCONFIG_FILE"
fi
fi
# ===== 启用三星SSG IO调度器 =====
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
echo ">>> 正在启用三星SSG IO调度器..."
echo "CONFIG_MQ_IOSCHED_SSG=y" >> "$DEFCONFIG_FILE"
echo "CONFIG_MQ_IOSCHED_SSG_CGROUP=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用Re-Kernel =====
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
echo ">>> 正在启用Re-Kernel..."
echo "CONFIG_REKERNEL=y" >> "$DEFCONFIG_FILE"
fi
# ===== 启用内核级基带保护 =====
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
echo ">>> 正在启用内核级基带保护..."
echo "CONFIG_BBG=y" >> "$DEFCONFIG_FILE"
cd ./common
curl -sSL https://github.com/cctv18/Baseband-guard/raw/master/setup.sh | bash
sed -i '/^config LSM$/,/^help$/{ /^[[:space:]]*default/ { /baseband_guard/! s/selinux/selinux,baseband_guard/ } }' security/Kconfig
cd ..
fi
# ===== 禁用 defconfig 检查 =====
echo ">>> 禁用 defconfig 检查..."
sed -i 's/check_defconfig//' ./common/build.config.gki
# ===== 编译内核 =====
echo ">>> 开始编译内核..."
cd common
make -j$(nproc --all) LLVM=-20 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CROSS_COMPILE_ARM32=arm-linux-gnuabeihf- CC=clang LD=ld.lld HOSTCC=clang HOSTLD=ld.lld O=out KCFLAGS+=-O2 KCFLAGS+=-Wno-error gki_defconfig all
echo ">>> 内核编译成功!"
# ===== 选择使用 patch_linux (KPM补丁)=====
OUT_DIR="$WORKDIR/kernel_workspace/common/out/arch/arm64/boot"
if [[ "$USE_PATCH_LINUX" == [bB] && $KSU_BRANCH == [yYrR] ]]; then
echo ">>> 使用 patch_linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/SukiSU-Ultra/SukiSU_KernelPatch_patch/releases/latest/download/patch_linux
chmod +x patch_linux
./patch_linux
rm -f Image
mv oImage Image
echo ">>> 已成功打上KPM补丁!"
elif [[ "$USE_PATCH_LINUX" == [kK] ]]; then
echo ">>> 使用 kptools-linux 工具处理输出..."
cd "$OUT_DIR"
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kptools-linux
wget https://github.com/KernelSU-Next/KPatch-Next/releases/latest/download/kpimg-linux
chmod +x ./kptools-linux
./kptools-linux -p -i ./Image -k ./kpimg-linux -o ./oImage
rm -f Image
mv oImage Image
echo ">>> 已成功打上KP-N补丁!"
else
echo ">>> 跳过 KPM 修补操作..."
fi
# ===== 克隆并打包 AnyKernel3 =====
cd "$WORKDIR/kernel_workspace"
echo ">>> 克隆 AnyKernel3 项目..."
git clone https://github.com/cctv18/AnyKernel3 --depth=1
echo ">>> 清理 AnyKernel3 Git 信息..."
rm -rf ./AnyKernel3/.git
echo ">>> 拷贝内核镜像到 AnyKernel3 目录..."
cp "$OUT_DIR/Image" ./AnyKernel3/
echo ">>> 进入 AnyKernel3 目录并打包 zip..."
cd "$WORKDIR/kernel_workspace/AnyKernel3"
# ===== 如果启用 lz4kd,则下载 zram.zip 并放入当前目录 =====
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
wget https://raw.githubusercontent.com/cctv18/oppo_oplus_realme_sm8650/refs/heads/main/zram.zip
fi
if [[ "$USE_PATCH_LINUX" == [kK] ]]; then
wget https://github.com/cctv18/KPatch-Next/releases/latest/download/kpn.zip
fi
# ===== 生成 ZIP 文件名 =====
ZIP_NAME="Anykernel3-${MANIFEST}"
if [[ "$APPLY_SUSFS" == "y" || "$APPLY_SUSFS" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-susfs"
fi
if [[ "$APPLY_LZ4KD" == "y" || "$APPLY_LZ4KD" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4kd"
fi
if [[ "$APPLY_LZ4" == "y" || "$APPLY_LZ4" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-lz4-zstd"
fi
if [[ "$USE_PATCH_LINUX" == [bBkK] ]]; then
ZIP_NAME="${ZIP_NAME}-kpm"
fi
if [[ "$APPLY_BBR" == "y" || "$APPLY_BBR" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbr"
fi
if [[ "$APPLY_SSG" == "y" || "$APPLY_SSG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-ssg"
fi
if [[ "$APPLY_REKERNEL" == "y" || "$APPLY_REKERNEL" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-rek"
fi
if [[ "$APPLY_BBG" == "y" || "$APPLY_BBG" == "Y" ]]; then
ZIP_NAME="${ZIP_NAME}-bbg"
fi
ZIP_NAME="${ZIP_NAME}-v$(date +%Y%m%d).zip"
# ===== 打包 ZIP 文件,包括 zram.zip(如果存在) =====
echo ">>> 打包文件: $ZIP_NAME"
zip -r "../$ZIP_NAME" ./*
ZIP_PATH="$(realpath "../$ZIP_NAME")"
echo ">>> 打包完成 文件所在目录: $ZIP_PATH"
================================================
FILE: other_patch/69_hide_stuff.patch
================================================
--- a/fs/proc/task_mmu.c 2024-12-17 11:21:16.646581300 -0500
+++ b/fs/proc/task_mmu.c 2024-12-17 11:35:36.873887048 -0500
@@ -416,6 +416,23 @@
extern void susfs_sus_ino_for_show_map_vma(unsigned long ino, dev_t *out_dev, unsigned long *out_ino);
#endif
+static void show_vma_header_prefix_fake(struct seq_file *m,
+ unsigned long start, unsigned long end,
+ vm_flags_t flags, unsigned long long pgoff,
+ dev_t dev, unsigned long ino)
+{
+ seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
+ start,
+ end,
+ flags & VM_READ ? 'r' : '-',
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? '-' : '-',
+ flags & VM_MAYSHARE ? 's' : 'p',
+ pgoff,
+ MAJOR(dev), MINOR(dev), ino);
+}
+
static void
show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
{
@@ -427,6 +444,7 @@
unsigned long start, end;
dev_t dev = 0;
const char *name = NULL;
+ struct dentry *dentry;
if (file) {
struct inode *inode = file_inode(vma->vm_file);
@@ -442,6 +460,23 @@
bypass_orig_flow:
#endif
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+ dentry = file->f_path.dentry;
+ if (dentry) {
+ const char *path = (const char *)dentry->d_name.name;
+ if (strstr(path, "lineage")) {
+ start = vma->vm_start;
+ end = vma->vm_end;
+ show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
+ name = "/system/framework/framework-res.apk";
+ goto done;
+ }
+ if (strstr(path, "jit-zygote-cache")) {
+ start = vma->vm_start;
+ end = vma->vm_end;
+ show_vma_header_prefix_fake(m, start, end, flags, pgoff, dev, ino);
+ goto bypass;
+ }
+ }
}
start = vma->vm_start;
@@ -449,6 +484,7 @@
if (show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino))
return;
+ bypass:
/*
* Print the dentry name for named mappings, and a
* special [heap] marker for the heap:
--- a/fs/proc/base.c 2024-12-15 11:30:00.213422100 -0500
+++ b/fs/proc/base.c 2024-12-15 11:36:21.422813925 -0500
@@ -2229,11 +2229,17 @@
rc = -ENOENT;
vma = find_exact_vma(mm, vm_start, vm_end);
- if (vma && vma->vm_file) {
- *path = vma->vm_file->f_path;
- path_get(path);
- rc = 0;
- }
+ if (vma) {
+ if (vma->vm_file) {
+ if (strstr(vma->vm_file->f_path.dentry->d_name.name, "lineage")) {
+ rc = kern_path("/system/framework/framework-res.apk", LOOKUP_FOLLOW, path);
+ } else {
+ *path = vma->vm_file->f_path;
+ path_get(path);
+ rc = 0;
+ }
+ }
+ }
mmap_read_unlock(mm);
out_mmput:
================================================
FILE: other_patch/apk_sign.patch
================================================
diff --git a/kernel/apk_sign.c b/kernel/apk_sign.c
index 3f2ae2d4..862e471b 100644
--- a/kernel/apk_sign.c 2026-01-05 21:32:57.870040182 +0800
+++ a/kernel/apk_sign.c 2026-01-07 05:34:13.644884315 +0800
@@ -376,7 +376,9 @@
char hash[65] = {0};
if (sscanf(p, "%x:%64[^,]", &size, hash) == 2) {
- if (check_v2_signature(path, size, hash)) {
+ if (check_v2_signature(path, size, hash)
+ || check_v2_signature(path, 0x338, "f26471a28031130362bce7eebffb9a0b8afc3095f163ce0c75a309f03b644a1f") // pershoot Manager
+ || check_v2_signature(path, 0x381, "52d52d8c8bfbe53dc2b6ff1c613184e2c03013e090fe8905d8e3d5dc2658c2e4")){ // Wild_KSU Manager
pr_info("KernelSU: matched manager APK: %s (size=0x%x, hash=%s)\n",
path, size, hash);
return true;
================================================
FILE: other_patch/config.patch
================================================
diff --git a/kernel/Makefile b/kernel/Makefile
index df02908..f654c9b 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -154,8 +154,27 @@ $(obj)/config_data.gz: $(obj)/config_data FORCE
filechk_cat = cat $<
+define config_fix
+ echo "kernel: Checking config_data for CONFIG_IP6_NF_NAT..."; \
+ if grep -q '^CONFIG_IP6_NF_NAT=y' $@; then \
+ echo "kernel: Found CONFIG_IP6_NF_NAT=y - modifying to display as 'n'"; \
+ sed -i 's/^CONFIG_IP6_NF_NAT=y$$/CONFIG_IP6_NF_NAT=n/' $@; \
+ echo "kernel: Successfully modified CONFIG_IP6_NF_NAT display in config_data"; \
+ elif grep -q '^CONFIG_IP6_NF_NAT=n' $@; then \
+ echo "kernel: CONFIG_IP6_NF_NAT already set to 'n' (no change needed)"; \
+ elif grep -q 'CONFIG_IP6_NF_NAT' $@; then \
+ echo "kernel: Warning: Unexpected CONFIG_IP6_NF_NAT value found"; \
+ grep 'CONFIG_IP6_NF_NAT' $@; \
+ else \
+ echo "kernel: Warning: CONFIG_IP6_NF_NAT not found in config_data"; \
+ fi; \
+ echo "kernel: Final CONFIG_IP6_NF_NAT value in config_data: "; \
+ grep 'CONFIG_IP6_NF_NAT' $@ || echo "kernel: (not present)"
+endef
+
$(obj)/config_data: $(KCONFIG_CONFIG) FORCE
$(call filechk,cat)
+ $(Q)$(config_fix)
$(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
================================================
FILE: zram_patch/001-lz4-old.patch
================================================
Subject: [PATCH] lz4: Update to version 1.9.4
---
Index: include/linux/lz4.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
--- a/include/linux/lz4.h (revision 550dad3df59e8c39fe93484e6f207d4110619483)
+++ b/include/linux/lz4.h (revision 30c5a157bf8495708813366f0ba3fc350f8a9657)
@@ -42,7 +42,7 @@
#define __LZ4_H__
#include
-#include /* memset, memcpy */
+#include /* memset, memcpy */
/*-************************************************************************
* CONSTANTS
@@ -55,25 +55,27 @@
* Reduced memory usage can improve speed, due to cache effect
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
*/
-#define LZ4_MEMORY_USAGE 14
+/* We only use LZ4 for zRAM, so the blocks are 4KB in size. 1KB is enough here */
+#define LZ4_MEMORY_USAGE 10
-#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
-#define LZ4_COMPRESSBOUND(isize) (\
- (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \
- ? 0 \
- : (isize) + ((isize)/255) + 16)
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) \
+ ((unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE ? \
+ 0 : \
+ (isize) + ((isize) / 255) + 16)
#define LZ4_ACCELERATION_DEFAULT 1
-#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define LZ4_ACCELERATION_MAX 65537
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE - 2)
#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG)
-#define LZ4HC_MIN_CLEVEL 3
-#define LZ4HC_DEFAULT_CLEVEL 9
-#define LZ4HC_MAX_CLEVEL 16
+#define LZ4HC_MIN_CLEVEL 3
+#define LZ4HC_DEFAULT_CLEVEL 9
+#define LZ4HC_MAX_CLEVEL 16
#define LZ4HC_DICTIONARY_LOGSIZE 16
-#define LZ4HC_MAXD (1<UTF-8
===================================================================
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
--- a/lib/lz4/lz4_compress.c (revision 550dad3df59e8c39fe93484e6f207d4110619483)
+++ b/lib/lz4/lz4_compress.c (revision 30c5a157bf8495708813366f0ba3fc350f8a9657)
@@ -41,29 +41,28 @@
static const int LZ4_minLength = (MFLIMIT + 1);
static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
+/* Increase this value ==> compression run slower on incompressible data */
+static const U32 LZ4_skipTrigger = 6;
+
+LZ4_stream_t *LZ4_initStream(void *buffer, size_t size);
/*-******************************
* Compression functions
********************************/
-static FORCE_INLINE U32 LZ4_hash4(
- U32 sequence,
- tableType_t const tableType)
+static FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
{
if (tableType == byU16)
- return ((sequence * 2654435761U)
- >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
+ return ((sequence * 2654435761U) >>
+ ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
else
- return ((sequence * 2654435761U)
- >> ((MINMATCH * 8) - LZ4_HASHLOG));
+ return ((sequence * 2654435761U) >>
+ ((MINMATCH * 8) - LZ4_HASHLOG));
}
-static FORCE_INLINE U32 LZ4_hash5(
- U64 sequence,
- tableType_t const tableType)
+static FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
{
- const U32 hashLog = (tableType == byU16)
- ? LZ4_HASHLOG + 1
- : LZ4_HASHLOG;
+ const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG + 1 :
+ LZ4_HASHLOG;
#if LZ4_LITTLE_ENDIAN
static const U64 prime5bytes = 889523592379ULL;
@@ -76,245 +75,454 @@
#endif
}
-static FORCE_INLINE U32 LZ4_hashPosition(
- const void *p,
- tableType_t const tableType)
+static FORCE_INLINE U32 LZ4_hashPosition(const void *p,
+ tableType_t const tableType)
{
#if LZ4_ARCH64
- if (tableType == byU32)
+ if (tableType != byU16)
return LZ4_hash5(LZ4_read_ARCH(p), tableType);
#endif
return LZ4_hash4(LZ4_read32(p), tableType);
}
-static void LZ4_putPositionOnHash(
- const BYTE *p,
- U32 h,
- void *tableBase,
- tableType_t const tableType,
- const BYTE *srcBase)
+static FORCE_INLINE void LZ4_clearHash(U32 h, void *tableBase,
+ tableType_t const tableType)
+{
+ switch (tableType) {
+ default: /* fallthrough */
+ case clearedTable: { /* illegal! */
+ assert(0);
+ return;
+ }
+ case byPtr: {
+ const BYTE **hashTable = (const BYTE **)tableBase;
+ hashTable[h] = NULL;
+ return;
+ }
+ case byU32: {
+ U32 *hashTable = (U32 *)tableBase;
+ hashTable[h] = 0;
+ return;
+ }
+ case byU16: {
+ U16 *hashTable = (U16 *)tableBase;
+ hashTable[h] = 0;
+ return;
+ }
+ }
+}
+
+static FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void *tableBase,
+ tableType_t const tableType)
+{
+ switch (tableType) {
+ default: /* fallthrough */
+ case clearedTable: /* fallthrough */
+ case byPtr: { /* illegal! */
+ assert(0);
+ return;
+ }
+ case byU32: {
+ U32 *hashTable = (U32 *)tableBase;
+ hashTable[h] = idx;
+ return;
+ }
+ case byU16: {
+ U16 *hashTable = (U16 *)tableBase;
+ assert(idx < 65536);
+ hashTable[h] = (U16)idx;
+ return;
+ }
+ }
+}
+
+static void LZ4_putPositionOnHash(const BYTE *p, U32 h, void *tableBase,
+ tableType_t const tableType,
+ const BYTE *srcBase)
{
switch (tableType) {
- case byPtr:
- {
+ case byPtr: {
const BYTE **hashTable = (const BYTE **)tableBase;
hashTable[h] = p;
return;
}
- case byU32:
- {
- U32 *hashTable = (U32 *) tableBase;
+ case byU32: {
+ U32 *hashTable = (U32 *)tableBase;
hashTable[h] = (U32)(p - srcBase);
return;
}
- case byU16:
- {
- U16 *hashTable = (U16 *) tableBase;
+ case byU16: {
+ U16 *hashTable = (U16 *)tableBase;
hashTable[h] = (U16)(p - srcBase);
return;
}
+ case clearedTable: { /* fallthrough */
+ }
}
}
-static FORCE_INLINE void LZ4_putPosition(
- const BYTE *p,
- void *tableBase,
- tableType_t tableType,
- const BYTE *srcBase)
+static FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
}
-static const BYTE *LZ4_getPositionOnHash(
- U32 h,
- void *tableBase,
- tableType_t tableType,
- const BYTE *srcBase)
+/* LZ4_getIndexOnHash() :
+ * Index of match position registered in hash table.
+ * hash position must be calculated by using base+index, or dictBase+index.
+ * Assumption 1 : only valid if tableType == byU32 or byU16.
+ * Assumption 2 : h is presumed valid (within limits of hash table)
+ */
+static FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void *tableBase,
+ tableType_t tableType)
+{
+ LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
+ if (tableType == byU32) {
+ const U32 *const hashTable = (const U32 *)tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE - 2)));
+ return hashTable[h];
+ }
+ if (tableType == byU16) {
+ const U16 *const hashTable = (const U16 *)tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE - 1)));
+ return hashTable[h];
+ }
+ assert(0);
+ return 0; /* forbidden case */
+}
+
+static const BYTE *LZ4_getPositionOnHash(U32 h, void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
{
if (tableType == byPtr) {
- const BYTE **hashTable = (const BYTE **) tableBase;
+ const BYTE **hashTable = (const BYTE **)tableBase;
return hashTable[h];
}
if (tableType == byU32) {
- const U32 * const hashTable = (U32 *) tableBase;
+ const U32 *const hashTable = (U32 *)tableBase;
return hashTable[h] + srcBase;
}
{
/* default, to ensure a return */
- const U16 * const hashTable = (U16 *) tableBase;
+ const U16 *const hashTable = (U16 *)tableBase;
return hashTable[h] + srcBase;
}
}
-static FORCE_INLINE const BYTE *LZ4_getPosition(
- const BYTE *p,
- void *tableBase,
- tableType_t tableType,
- const BYTE *srcBase)
+static FORCE_INLINE const BYTE *LZ4_getPosition(const BYTE *p, void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
-
-/*
- * LZ4_compress_generic() :
- * inlined, to ensure branches are decided at compilation time
- */
-static FORCE_INLINE int LZ4_compress_generic(
- LZ4_stream_t_internal * const dictPtr,
- const char * const source,
- char * const dest,
- const int inputSize,
- const int maxOutputSize,
- const limitedOutput_directive outputLimited,
- const tableType_t tableType,
- const dict_directive dict,
- const dictIssue_directive dictIssue,
- const U32 acceleration)
+static FORCE_INLINE void LZ4_prepareTable(LZ4_stream_t_internal *const cctx,
+ const int inputSize,
+ const tableType_t tableType)
{
- const BYTE *ip = (const BYTE *) source;
- const BYTE *base;
+ /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
+ * therefore safe to use no matter what mode we're in. Otherwise, we figure
+ * out if it's safe to leave as is or whether it needs to be reset.
+ */
+ if ((tableType_t)cctx->tableType != clearedTable) {
+ assert(inputSize >= 0);
+ if ((tableType_t)cctx->tableType != tableType ||
+ ((tableType == byU16) &&
+ cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) ||
+ ((tableType == byU32) && cctx->currentOffset > 1 * GB) ||
+ tableType == byPtr || inputSize >= 4 * KB) {
+ DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p",
+ cctx);
+ memset(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
+ cctx->currentOffset = 0;
+ cctx->tableType = (U32)clearedTable;
+ } else {
+ DEBUGLOG(
+ 4,
+ "LZ4_prepareTable: Re-use hash table (no reset)");
+ }
+ }
+
+ /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,
+ * is faster than compressing without a gap.
+ * However, compressing with currentOffset == 0 is faster still,
+ * so we preserve that case.
+ */
+ if (cctx->currentOffset != 0 && tableType == byU32) {
+ DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
+ cctx->currentOffset += 64 * KB;
+ }
+
+ /* Finally, clear history */
+ cctx->dictCtx = NULL;
+ cctx->dictionary = NULL;
+ cctx->dictSize = 0;
+}
+
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time.
+ * Presumed already validated at this stage:
+ * - source != NULL
+ * - inputSize > 0
+ */
+static FORCE_INLINE int LZ4_compress_generic_validated(
+ LZ4_stream_t_internal *const cctx, const char *const source,
+ char *const dest, const int inputSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int maxOutputSize, const limitedOutput_directive outputDirective,
+ const tableType_t tableType, const dict_directive dictDirective,
+ const dictIssue_directive dictIssue, const int acceleration)
+{
+ int result;
+ const BYTE *ip = (const BYTE *)source;
+
+ U32 const startIndex = cctx->currentOffset;
+ const BYTE *base = (const BYTE *)source - startIndex;
const BYTE *lowLimit;
- const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
- const BYTE * const dictionary = dictPtr->dictionary;
- const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
- const size_t dictDelta = dictEnd - (const BYTE *)source;
- const BYTE *anchor = (const BYTE *) source;
- const BYTE * const iend = ip + inputSize;
- const BYTE * const mflimit = iend - MFLIMIT;
- const BYTE * const matchlimit = iend - LASTLITERALS;
+
+ const LZ4_stream_t_internal *dictCtx =
+ (const LZ4_stream_t_internal *)cctx->dictCtx;
+ const BYTE *const dictionary = dictDirective == usingDictCtx ?
+ dictCtx->dictionary :
+ cctx->dictionary;
+ const U32 dictSize = dictDirective == usingDictCtx ? dictCtx->dictSize :
+ cctx->dictSize;
+ const U32 dictDelta =
+ (dictDirective == usingDictCtx) ?
+ startIndex - dictCtx->currentOffset :
+ 0; /* make indexes in dictCtx comparable with index in current context */
+
+ int const maybe_extMem = (dictDirective == usingExtDict) ||
+ (dictDirective == usingDictCtx);
+ U32 const prefixIdxLimit =
+ startIndex -
+ dictSize; /* used when dictDirective == dictSmall */
+ const BYTE *const dictEnd = dictionary ? dictionary + dictSize :
+ dictionary;
+ const BYTE *anchor = (const BYTE *)source;
+ const BYTE *const iend = ip + inputSize;
+ const BYTE *const mflimitPlusOne = iend - MFLIMIT + 1;
+ const BYTE *const matchlimit = iend - LASTLITERALS;
- BYTE *op = (BYTE *) dest;
- BYTE * const olimit = op + maxOutputSize;
+ /* the dictCtx currentOffset is indexed on the start of the dictionary,
+ * while a dictionary in the current context precedes the currentOffset */
+ const BYTE *dictBase =
+ (dictionary == NULL) ?
+ NULL :
+ (dictDirective == usingDictCtx) ?
+ dictionary + dictSize - dictCtx->currentOffset :
+ dictionary + dictSize - startIndex;
+
+ BYTE *op = (BYTE *)dest;
+ BYTE *const olimit = op + maxOutputSize;
+ U32 offset = 0;
U32 forwardH;
- size_t refDelta = 0;
- /* Init conditions */
- if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
- /* Unsupported inputSize, too large (or negative) */
+ DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u",
+ inputSize, tableType);
+ assert(ip != NULL);
+ /* If init conditions are not met, we don't have to mark stream
+ * as having dirty context, since no action was taken yet */
+ if (outputDirective == fillOutput && maxOutputSize < 1) {
return 0;
- }
-
- switch (dict) {
- case noDict:
- default:
- base = (const BYTE *)source;
- lowLimit = (const BYTE *)source;
- break;
- case withPrefix64k:
- base = (const BYTE *)source - dictPtr->currentOffset;
- lowLimit = (const BYTE *)source - dictPtr->dictSize;
- break;
- case usingExtDict:
- base = (const BYTE *)source - dictPtr->currentOffset;
- lowLimit = (const BYTE *)source;
- break;
- }
-
- if ((tableType == byU16)
- && (inputSize >= LZ4_64Klimit)) {
- /* Size too large (not within 64K limit) */
+ } /* Impossible to store anything */
+ if ((tableType == byU16) && (inputSize >= LZ4_64Klimit)) {
return 0;
+ } /* Size too large (not within 64K limit) */
+ if (tableType == byPtr)
+ assert(dictDirective ==
+ noDict); /* only supported use case with byPtr */
+ assert(acceleration >= 1);
+
+ lowLimit = (const BYTE *)source -
+ (dictDirective == withPrefix64k ? dictSize : 0);
+
+ /* Update context state */
+ if (dictDirective == usingDictCtx) {
+ /* Subsequent linked blocks can't use the dictionary. */
+ /* Instead, they use the block we just compressed. */
+ cctx->dictCtx = NULL;
+ cctx->dictSize = (U32)inputSize;
+ } else {
+ cctx->dictSize += (U32)inputSize;
}
+ cctx->currentOffset += (U32)inputSize;
+ cctx->tableType = (U32)tableType;
- if (inputSize < LZ4_minLength) {
- /* Input too small, no compression (all literals) */
- goto _last_literals;
- }
+ if (inputSize < LZ4_minLength)
+ goto _last_literals; /* Input too small, no compression (all literals) */
/* First Byte */
- LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
ip++;
forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
- for ( ; ; ) {
+ for (;;) {
const BYTE *match;
BYTE *token;
+ const BYTE *filledIp;
/* Find a match */
- {
+ if (tableType == byPtr) {
const BYTE *forwardIp = ip;
- unsigned int step = 1;
- unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
-
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
do {
U32 const h = forwardH;
-
ip = forwardIp;
forwardIp += step;
- step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
- if (unlikely(forwardIp > mflimit))
+ if (unlikely(forwardIp > mflimitPlusOne))
goto _last_literals;
+ assert(ip < mflimitPlusOne);
- match = LZ4_getPositionOnHash(h,
- dictPtr->hashTable,
- tableType, base);
+ match = LZ4_getPositionOnHash(
+ h, cctx->hashTable, tableType, base);
+ forwardH =
+ LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable,
+ tableType, base);
- if (dict == usingExtDict) {
- if (match < (const BYTE *)source) {
- refDelta = dictDelta;
+ } while ((match + LZ4_DISTANCE_MAX < ip) ||
+ (LZ4_read32(match) != LZ4_read32(ip)));
+
+ } else { /* byU32, byU16 */
+
+ const BYTE *forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ U32 const cur = (U32)(forwardIp - base);
+ U32 matchIndex = LZ4_getIndexOnHash(
+ h, cctx->hashTable, tableType);
+ assert(matchIndex <= cur);
+ assert(forwardIp - base <
+ (ptrdiff_t)(2 * GB - 1));
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne))
+ goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ assert(tableType == byU32);
+ matchIndex = LZ4_getIndexOnHash(
+ h, dictCtx->hashTable,
+ byU32);
+ match = dictBase + matchIndex;
+ matchIndex +=
+ dictDelta; /* make dictCtx index comparable with current context */
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE *)source;
+ }
+ } else if (dictDirective == usingExtDict) {
+ if (matchIndex < startIndex) {
+ DEBUGLOG(
+ 7,
+ "extDict candidate: matchIndex=%5u < startIndex=%5u",
+ matchIndex, startIndex);
+ assert(startIndex -
+ matchIndex >=
+ MINMATCH);
+ assert(dictBase);
+ match = dictBase + matchIndex;
lowLimit = dictionary;
} else {
- refDelta = 0;
+ match = base + matchIndex;
lowLimit = (const BYTE *)source;
- } }
-
- forwardH = LZ4_hashPosition(forwardIp,
- tableType);
+ }
+ } else { /* single continuous memory segment */
+ match = base + matchIndex;
+ }
+ forwardH =
+ LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putIndexOnHash(cur, h, cctx->hashTable,
+ tableType);
- LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
- tableType, base);
- } while (((dictIssue == dictSmall)
- ? (match < lowRefLimit)
- : 0)
- || ((tableType == byU16)
- ? 0
- : (match + MAX_DISTANCE < ip))
- || (LZ4_read32(match + refDelta)
- != LZ4_read32(ip)));
+ DEBUGLOG(7,
+ "candidate at pos=%u (offset=%u \n",
+ matchIndex, cur - matchIndex);
+ if ((dictIssue == dictSmall) &&
+ (matchIndex < prefixIdxLimit)) {
+ continue;
+ } /* match outside of valid area */
+ assert(matchIndex < cur);
+ if (((tableType != byU16) ||
+ (LZ4_DISTANCE_MAX <
+ LZ4_DISTANCE_ABSOLUTE_MAX)) &&
+ (matchIndex + LZ4_DISTANCE_MAX < cur)) {
+ continue;
+ } /* too far */
+ assert((cur - matchIndex) <=
+ LZ4_DISTANCE_MAX); /* match now expected within distance */
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ if (maybe_extMem)
+ offset = cur - matchIndex;
+ break; /* match found */
+ }
+
+ } while (1);
}
/* Catch up */
- while (((ip > anchor) & (match + refDelta > lowLimit))
- && (unlikely(ip[-1] == match[refDelta - 1]))) {
+ filledIp = ip;
+ while (((ip > anchor) & (match > lowLimit)) &&
+ (unlikely(ip[-1] == match[-1]))) {
ip--;
match--;
}
/* Encode Literals */
{
- unsigned const int litLength = (unsigned int)(ip - anchor);
-
+ unsigned const litLength = (unsigned)(ip - anchor);
token = op++;
-
- if ((outputLimited) &&
- /* Check output buffer overflow */
- (unlikely(op + litLength +
- (2 + 1 + LASTLITERALS) +
- (litLength / 255) > olimit)))
- return 0;
-
+ if ((outputDirective ==
+ limitedOutput) && /* Check output buffer overflow */
+ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) +
+ (litLength / 255) >
+ olimit))) {
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ if ((outputDirective == fillOutput) &&
+ (unlikely(
+ op + (litLength + 240) / 255 /* litlen */ +
+ litLength /* literals */ +
+ 2 /* offset */ + 1 /* token */ +
+ MFLIMIT -
+ MINMATCH /* min last literals so last match is <= end - MFLIMIT */
+ > olimit))) {
+ op--;
+ goto _last_literals;
+ }
if (litLength >= RUN_MASK) {
- int len = (int)litLength - RUN_MASK;
-
+ int len = (int)(litLength - RUN_MASK);
*token = (RUN_MASK << ML_BITS);
-
for (; len >= 255; len -= 255)
*op++ = 255;
*op++ = (BYTE)len;
@@ -322,103 +530,222 @@
*token = (BYTE)(litLength << ML_BITS);
/* Copy Literals */
- LZ4_wildCopy(op, anchor, op + litLength);
+ LZ4_wildCopy8(op, anchor, op + litLength);
op += litLength;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor - (const BYTE *)source),
+ litLength, (int)(ip - (const BYTE *)source));
}
_next_match:
+ /* at this stage, the following variables must be correctly set :
+ * - ip : at start of LZ operation
+ * - match : at start of previous pattern occurrence; can be within current prefix, or within extDict
+ * - offset : if maybe_ext_memSegment==1 (constant)
+ * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
+ * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
+ */
+
+ if ((outputDirective == fillOutput) &&
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT -
+ MINMATCH /* min last literals so last match is <= end - MFLIMIT */
+ > olimit)) {
+ /* the match was too close to the end, rewind and go to last literals */
+ op = token;
+ goto _last_literals;
+ }
+
/* Encode Offset */
- LZ4_writeLE16(op, (U16)(ip - match));
- op += 2;
+ if (maybe_extMem) { /* static test */
+ DEBUGLOG(6,
+ " with offset=%u (ext if > %i)",
+ offset, (int)(ip - (const BYTE *)source));
+ assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
+ LZ4_writeLE16(op, (U16)offset);
+ op += 2;
+ } else {
+ DEBUGLOG(6,
+ " with offset=%u (same segment)",
+ (U32)(ip - match));
+ assert(ip - match <= LZ4_DISTANCE_MAX);
+ LZ4_writeLE16(op, (U16)(ip - match));
+ op += 2;
+ }
/* Encode MatchLength */
{
- unsigned int matchCode;
-
- if ((dict == usingExtDict)
- && (lowLimit == dictionary)) {
- const BYTE *limit;
+ unsigned matchCode;
- match += refDelta;
- limit = ip + (dictEnd - match);
-
+ if ((dictDirective == usingExtDict ||
+ dictDirective == usingDictCtx) &&
+ (lowLimit ==
+ dictionary) /* match within extDict */) {
+ const BYTE *limit = ip + (dictEnd - match);
+ assert(dictEnd > match);
if (limit > matchlimit)
limit = matchlimit;
-
matchCode = LZ4_count(ip + MINMATCH,
- match + MINMATCH, limit);
-
- ip += MINMATCH + matchCode;
-
+ match + MINMATCH, limit);
+ ip += (size_t)matchCode + MINMATCH;
if (ip == limit) {
- unsigned const int more = LZ4_count(ip,
- (const BYTE *)source,
+ unsigned const more = LZ4_count(
+ limit, (const BYTE *)source,
matchlimit);
-
matchCode += more;
ip += more;
}
+ DEBUGLOG(
+ 6,
+ " with matchLength=%u starting in extDict",
+ matchCode + MINMATCH);
} else {
matchCode = LZ4_count(ip + MINMATCH,
- match + MINMATCH, matchlimit);
- ip += MINMATCH + matchCode;
+ match + MINMATCH,
+ matchlimit);
+ ip += (size_t)matchCode + MINMATCH;
+ DEBUGLOG(6, " with matchLength=%u",
+ matchCode + MINMATCH);
}
- if (outputLimited &&
- /* Check output buffer overflow */
- (unlikely(op +
- (1 + LASTLITERALS) +
- (matchCode >> 8) > olimit)))
- return 0;
-
+ if ((outputDirective) && /* Check output buffer overflow */
+ (unlikely(op + (1 + LASTLITERALS) +
+ (matchCode + 240) / 255 >
+ olimit))) {
+ if (outputDirective == fillOutput) {
+ /* Match description too long : reduce it */
+ U32 newMatchCode =
+ 15 /* in token */ -
+ 1 /* to avoid needing a zero byte */ +
+ ((U32)(olimit - op) - 1 -
+ LASTLITERALS) *
+ 255;
+ ip -= matchCode - newMatchCode;
+ assert(newMatchCode < matchCode);
+ matchCode = newMatchCode;
+ if (unlikely(ip <= filledIp)) {
+ /* We have already filled up to filledIp so if ip ends up less than filledIp
+ * we have positions in the hash table beyond the current position. This is
+ * a problem if we reuse the hash table. So we have to remove these positions
+ * from the hash table.
+ */
+ const BYTE *ptr;
+ DEBUGLOG(
+ 5,
+ "Clearing %u positions",
+ (U32)(filledIp - ip));
+ for (ptr = ip; ptr <= filledIp;
+ ++ptr) {
+ U32 const h =
+ LZ4_hashPosition(
+ ptr,
+ tableType);
+ LZ4_clearHash(
+ h,
+ cctx->hashTable,
+ tableType);
+ }
+ }
+ } else {
+ assert(outputDirective ==
+ limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
if (matchCode >= ML_MASK) {
*token += ML_MASK;
matchCode -= ML_MASK;
LZ4_write32(op, 0xFFFFFFFF);
-
while (matchCode >= 4 * 255) {
op += 4;
LZ4_write32(op, 0xFFFFFFFF);
matchCode -= 4 * 255;
}
-
op += matchCode / 255;
*op++ = (BYTE)(matchCode % 255);
} else
*token += (BYTE)(matchCode);
}
+ /* Ensure we have enough space for the last literals. */
+ assert(!(outputDirective == fillOutput &&
+ op + 1 + LASTLITERALS > olimit));
anchor = ip;
/* Test end of chunk */
- if (ip > mflimit)
+ if (ip >= mflimitPlusOne)
break;
/* Fill table */
- LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
+ LZ4_putPosition(ip - 2, cctx->hashTable, tableType, base);
/* Test next position */
- match = LZ4_getPosition(ip, dictPtr->hashTable,
- tableType, base);
+ if (tableType == byPtr) {
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType,
+ base);
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ if ((match + LZ4_DISTANCE_MAX >= ip) &&
+ (LZ4_read32(match) == LZ4_read32(ip))) {
+ token = op++;
+ *token = 0;
+ goto _next_match;
+ }
- if (dict == usingExtDict) {
- if (match < (const BYTE *)source) {
- refDelta = dictDelta;
- lowLimit = dictionary;
- } else {
- refDelta = 0;
- lowLimit = (const BYTE *)source;
- }
- }
-
- LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
-
- if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
- && (match + MAX_DISTANCE >= ip)
- && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
- token = op++;
- *token = 0;
- goto _next_match;
+ } else { /* byU32, byU16 */
+
+ U32 const h = LZ4_hashPosition(ip, tableType);
+ U32 const cur = (U32)(ip - base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable,
+ tableType);
+ assert(matchIndex < cur);
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ matchIndex = LZ4_getIndexOnHash(
+ h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ lowLimit =
+ dictionary; /* required for match length counter */
+ matchIndex += dictDelta;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE *)
+ source; /* required for match length counter */
+ }
+ } else if (dictDirective == usingExtDict) {
+ if (matchIndex < startIndex) {
+ assert(dictBase);
+ match = dictBase + matchIndex;
+ lowLimit =
+ dictionary; /* required for match length counter */
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE *)
+ source; /* required for match length counter */
+ }
+ } else { /* single memory segment */
+ match = base + matchIndex;
+ }
+ LZ4_putIndexOnHash(cur, h, cctx->hashTable, tableType);
+ assert(matchIndex < cur);
+ if (((dictIssue == dictSmall) ?
+ (matchIndex >= prefixIdxLimit) :
+ 1) &&
+ (((tableType == byU16) &&
+ (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ?
+ 1 :
+ (matchIndex + LZ4_DISTANCE_MAX >= cur)) &&
+ (LZ4_read32(match) == LZ4_read32(ip))) {
+ token = op++;
+ *token = 0;
+ if (maybe_extMem)
+ offset = cur - matchIndex;
+ DEBUGLOG(
+ 6,
+ "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor - (const BYTE *)source), 0,
+ (int)(ip - (const BYTE *)source));
+ goto _next_match;
+ }
}
/* Prepare next loop */
@@ -428,398 +755,286 @@
_last_literals:
/* Encode Last Literals */
{
- size_t const lastRun = (size_t)(iend - anchor);
-
- if ((outputLimited) &&
- /* Check output buffer overflow */
- ((op - (BYTE *)dest) + lastRun + 1 +
- ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
- return 0;
-
+ size_t lastRun = (size_t)(iend - anchor);
+ if ((outputDirective) && /* Check output buffer overflow */
+ (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
+ olimit)) {
+ if (outputDirective == fillOutput) {
+ /* adapt lastRun to fill 'dst' */
+ assert(olimit >= op);
+ lastRun = (size_t)(olimit - op) - 1 /*token*/;
+ lastRun -= (lastRun + 256 - RUN_MASK) /
+ 256; /*additional length tokens*/
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
if (lastRun >= RUN_MASK) {
size_t accumulator = lastRun - RUN_MASK;
*op++ = RUN_MASK << ML_BITS;
for (; accumulator >= 255; accumulator -= 255)
*op++ = 255;
- *op++ = (BYTE) accumulator;
+ *op++ = (BYTE)accumulator;
} else {
*op++ = (BYTE)(lastRun << ML_BITS);
}
-
LZ4_memcpy(op, anchor, lastRun);
-
+ ip = anchor + lastRun;
op += lastRun;
}
- /* End */
- return (int) (((char *)op) - dest);
+ if (outputDirective == fillOutput) {
+ *inputConsumed = (int)(((const char *)ip) - source);
+ }
+ result = (int)(((char *)op) - dest);
+ assert(result > 0);
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes",
+ inputSize, result);
+ return result;
}
-static int LZ4_compress_fast_extState(
- void *state,
- const char *source,
- char *dest,
- int inputSize,
- int maxOutputSize,
- int acceleration)
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time;
+ * takes care of src == (NULL, 0)
+ * and forward the rest to LZ4_compress_generic_validated */
+static FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal *const cctx, const char *const src,
+ char *const dst, const int srcSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int dstCapacity, const limitedOutput_directive outputDirective,
+ const tableType_t tableType, const dict_directive dictDirective,
+ const dictIssue_directive dictIssue, const int acceleration)
+{
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i", srcSize,
+ dstCapacity);
+
+ if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) {
+ return 0;
+ } /* Unsupported srcSize, too large (or negative) */
+ if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
+ if (outputDirective != notLimited && dstCapacity <= 0)
+ return 0; /* no output, can't write anything */
+ DEBUGLOG(5, "Generating an empty block");
+ assert(outputDirective == notLimited || dstCapacity >= 1);
+ assert(dst != NULL);
+ dst[0] = 0;
+ if (outputDirective == fillOutput) {
+ assert(inputConsumed != NULL);
+ *inputConsumed = 0;
+ }
+ return 1;
+ }
+ assert(src != NULL);
+
+ return LZ4_compress_generic_validated(
+ cctx, src, dst, srcSize,
+ inputConsumed, /* only written into if outputDirective == fillOutput */
+ dstCapacity, outputDirective, tableType, dictDirective,
+ dictIssue, acceleration);
+}
+
+int LZ4_compress_fast_extState(void *state, const char *source, char *dest,
+ int inputSize, int maxOutputSize,
+ int acceleration)
{
- LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
-#if LZ4_ARCH64
- const tableType_t tableType = byU32;
-#else
- const tableType_t tableType = byPtr;
-#endif
-
- LZ4_resetStream((LZ4_stream_t *)state);
-
+ LZ4_stream_t_internal *const ctx =
+ &LZ4_initStream(state, sizeof(LZ4_stream_t))->internal_donotuse;
+ assert(ctx != NULL);
if (acceleration < 1)
acceleration = LZ4_ACCELERATION_DEFAULT;
-
- if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
- if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(ctx, source,
- dest, inputSize, 0,
- noLimit, byU16, noDict,
- noDictIssue, acceleration);
- else
- return LZ4_compress_generic(ctx, source,
- dest, inputSize, 0,
- noLimit, tableType, noDict,
- noDictIssue, acceleration);
+ if (acceleration > LZ4_ACCELERATION_MAX)
+ acceleration = LZ4_ACCELERATION_MAX;
+ if (maxOutputSize >= LZ4_compressBound(inputSize)) {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest,
+ inputSize, NULL, 0,
+ notLimited, byU16, noDict,
+ noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType =
+ ((sizeof(void *) == 4) &&
+ ((uptrval)source > LZ4_DISTANCE_MAX)) ?
+ byPtr :
+ byU32;
+ return LZ4_compress_generic(ctx, source, dest,
+ inputSize, NULL, 0,
+ notLimited, tableType,
+ noDict, noDictIssue,
+ acceleration);
+ }
} else {
- if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(ctx, source,
- dest, inputSize,
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(
+ ctx, source, dest, inputSize, NULL,
maxOutputSize, limitedOutput, byU16, noDict,
noDictIssue, acceleration);
- else
- return LZ4_compress_generic(ctx, source,
- dest, inputSize,
+ } else {
+ const tableType_t tableType =
+ ((sizeof(void *) == 4) &&
+ ((uptrval)source > LZ4_DISTANCE_MAX)) ?
+ byPtr :
+ byU32;
+ return LZ4_compress_generic(
+ ctx, source, dest, inputSize, NULL,
maxOutputSize, limitedOutput, tableType, noDict,
noDictIssue, acceleration);
+ }
}
}
int LZ4_compress_fast(const char *source, char *dest, int inputSize,
- int maxOutputSize, int acceleration, void *wrkmem)
+ int maxOutputSize, int acceleration, void *wrkmem)
{
return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
- maxOutputSize, acceleration);
+ maxOutputSize, acceleration);
}
EXPORT_SYMBOL(LZ4_compress_fast);
int LZ4_compress_default(const char *source, char *dest, int inputSize,
- int maxOutputSize, void *wrkmem)
+ int maxOutputSize, void *wrkmem)
{
- return LZ4_compress_fast(source, dest, inputSize,
- maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
+ return LZ4_compress_fast(source, dest, inputSize, maxOutputSize,
+ LZ4_ACCELERATION_DEFAULT, wrkmem);
}
EXPORT_SYMBOL(LZ4_compress_default);
-/*-******************************
- * *_destSize() variant
- ********************************/
-static int LZ4_compress_destSize_generic(
- LZ4_stream_t_internal * const ctx,
- const char * const src,
- char * const dst,
- int * const srcSizePtr,
- const int targetDstSize,
- const tableType_t tableType)
-{
- const BYTE *ip = (const BYTE *) src;
- const BYTE *base = (const BYTE *) src;
- const BYTE *lowLimit = (const BYTE *) src;
- const BYTE *anchor = ip;
- const BYTE * const iend = ip + *srcSizePtr;
- const BYTE * const mflimit = iend - MFLIMIT;
- const BYTE * const matchlimit = iend - LASTLITERALS;
-
- BYTE *op = (BYTE *) dst;
- BYTE * const oend = op + targetDstSize;
- BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
- - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
- BYTE * const oMaxMatch = op + targetDstSize
- - (LASTLITERALS + 1 /* token */);
- BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
-
- U32 forwardH;
-
- /* Init conditions */
- /* Impossible to store anything */
- if (targetDstSize < 1)
- return 0;
- /* Unsupported input size, too large (or negative) */
- if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
- return 0;
- /* Size too large (not within 64K limit) */
- if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
- return 0;
- /* Input too small, no compression (all literals) */
- if (*srcSizePtr < LZ4_minLength)
- goto _last_literals;
-
- /* First Byte */
- *srcSizePtr = 0;
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
- ip++; forwardH = LZ4_hashPosition(ip, tableType);
-
- /* Main Loop */
- for ( ; ; ) {
- const BYTE *match;
- BYTE *token;
-
- /* Find a match */
- {
- const BYTE *forwardIp = ip;
- unsigned int step = 1;
- unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
-
- do {
- U32 h = forwardH;
-
- ip = forwardIp;
- forwardIp += step;
- step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
-
- if (unlikely(forwardIp > mflimit))
- goto _last_literals;
-
- match = LZ4_getPositionOnHash(h, ctx->hashTable,
- tableType, base);
- forwardH = LZ4_hashPosition(forwardIp,
- tableType);
- LZ4_putPositionOnHash(ip, h,
- ctx->hashTable, tableType,
- base);
-
- } while (((tableType == byU16)
- ? 0
- : (match + MAX_DISTANCE < ip))
- || (LZ4_read32(match) != LZ4_read32(ip)));
- }
-
- /* Catch up */
- while ((ip > anchor)
- && (match > lowLimit)
- && (unlikely(ip[-1] == match[-1]))) {
- ip--;
- match--;
- }
-
- /* Encode Literal length */
- {
- unsigned int litLength = (unsigned int)(ip - anchor);
-
- token = op++;
- if (op + ((litLength + 240) / 255)
- + litLength > oMaxLit) {
- /* Not enough space for a last match */
- op--;
- goto _last_literals;
- }
- if (litLength >= RUN_MASK) {
- unsigned int len = litLength - RUN_MASK;
- *token = (RUN_MASK<= 255; len -= 255)
- *op++ = 255;
- *op++ = (BYTE)len;
- } else
- *token = (BYTE)(litLength << ML_BITS);
-
- /* Copy Literals */
- LZ4_wildCopy(op, anchor, op + litLength);
- op += litLength;
- }
-
-_next_match:
- /* Encode Offset */
- LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
-
- /* Encode MatchLength */
- {
- size_t matchLength = LZ4_count(ip + MINMATCH,
- match + MINMATCH, matchlimit);
-
- if (op + ((matchLength + 240)/255) > oMaxMatch) {
- /* Match description too long : reduce it */
- matchLength = (15 - 1) + (oMaxMatch - op) * 255;
- }
- ip += MINMATCH + matchLength;
-
- if (matchLength >= ML_MASK) {
- *token += ML_MASK;
- matchLength -= ML_MASK;
- while (matchLength >= 255) {
- matchLength -= 255;
- *op++ = 255;
- }
- *op++ = (BYTE)matchLength;
- } else
- *token += (BYTE)(matchLength);
- }
-
- anchor = ip;
-
- /* Test end of block */
- if (ip > mflimit)
- break;
- if (op > oMaxSeq)
- break;
-
- /* Fill table */
- LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
-
- /* Test next position */
- match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
-
- if ((match + MAX_DISTANCE >= ip)
- && (LZ4_read32(match) == LZ4_read32(ip))) {
- token = op++; *token = 0;
- goto _next_match;
- }
-
- /* Prepare next loop */
- forwardH = LZ4_hashPosition(++ip, tableType);
- }
-
-_last_literals:
- /* Encode Last Literals */
- {
- size_t lastRunSize = (size_t)(iend - anchor);
-
- if (op + 1 /* token */
- + ((lastRunSize + 240) / 255) /* litLength */
- + lastRunSize /* literals */ > oend) {
- /* adapt lastRunSize to fill 'dst' */
- lastRunSize = (oend - op) - 1;
- lastRunSize -= (lastRunSize + 240) / 255;
- }
- ip = anchor + lastRunSize;
-
- if (lastRunSize >= RUN_MASK) {
- size_t accumulator = lastRunSize - RUN_MASK;
-
- *op++ = RUN_MASK << ML_BITS;
- for (; accumulator >= 255; accumulator -= 255)
- *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRunSize<= LZ4_COMPRESSBOUND(*srcSizePtr)) {
- /* compression success is guaranteed */
- return LZ4_compress_fast_extState(
- state, src, dst, *srcSizePtr,
- targetDstSize, 1);
+ if (targetDstSize >=
+ LZ4_compressBound(
+ *srcSizePtr)) { /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr,
+ targetDstSize, 1);
} else {
- if (*srcSizePtr < LZ4_64Klimit)
- return LZ4_compress_destSize_generic(
- &state->internal_donotuse,
- src, dst, srcSizePtr,
- targetDstSize, byU16);
- else
- return LZ4_compress_destSize_generic(
- &state->internal_donotuse,
- src, dst, srcSizePtr,
- targetDstSize, tableType);
+ if (*srcSizePtr < LZ4_64Klimit) {
+ return LZ4_compress_generic(&state->internal_donotuse,
+ src, dst, *srcSizePtr,
+ srcSizePtr, targetDstSize,
+ fillOutput, byU16, noDict,
+ noDictIssue, 1);
+ } else {
+ tableType_t const addrMode =
+ ((sizeof(void *) == 4) &&
+ ((uptrval)src > LZ4_DISTANCE_MAX)) ?
+ byPtr :
+ byU32;
+ return LZ4_compress_generic(&state->internal_donotuse,
+ src, dst, *srcSizePtr,
+ srcSizePtr, targetDstSize,
+ fillOutput, addrMode,
+ noDict, noDictIssue, 1);
+ }
}
}
-
-int LZ4_compress_destSize(
- const char *src,
- char *dst,
- int *srcSizePtr,
- int targetDstSize,
- void *wrkmem)
+int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr,
+ int targetDstSize, void *wrkmem)
{
return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
- targetDstSize);
+ targetDstSize);
}
EXPORT_SYMBOL(LZ4_compress_destSize);
/*-******************************
* Streaming functions
********************************/
+static size_t LZ4_stream_t_alignment(void)
+{
+ typedef struct {
+ char c;
+ LZ4_stream_t t;
+ } t_a;
+ return sizeof(t_a) - sizeof(LZ4_stream_t);
+}
+
+static int LZ4_isAligned(const void *ptr, size_t alignment)
+{
+ return ((size_t)ptr & (alignment - 1)) == 0;
+}
+
+LZ4_stream_t *LZ4_initStream(void *buffer, size_t size)
+{
+ DEBUGLOG(5, "LZ4_initStream");
+ if (buffer == NULL) {
+ return NULL;
+ }
+ if (size < sizeof(LZ4_stream_t)) {
+ return NULL;
+ }
+ if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment()))
+ return NULL;
+ memset(buffer, 0, sizeof(LZ4_stream_t_internal));
+ return (LZ4_stream_t *)buffer;
+}
+
void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
{
- memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
+ memset(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
}
-int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
- const char *dictionary, int dictSize)
+int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
{
LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
+ const tableType_t tableType = byU32;
const BYTE *p = (const BYTE *)dictionary;
- const BYTE * const dictEnd = p + dictSize;
+ const BYTE *const dictEnd = p + dictSize;
const BYTE *base;
- if ((dict->initCheck)
- || (dict->currentOffset > 1 * GB)) {
- /* Uninitialized structure, or reuse overflow */
- LZ4_resetStream(LZ4_dict);
- }
+ DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize,
+ dictionary, LZ4_dict);
+
+ /* It's necessary to reset the context,
+ * and not just continue it with prepareTable()
+ * to avoid any risk of generating overflowing matchIndex
+ * when compressing using this dictionary */
+ LZ4_resetStream(LZ4_dict);
+
+ /* We always increment the offset by 64 KB, since, if the dict is longer,
+ * we truncate it to the last 64k, and if it's shorter, we still want to
+ * advance by a whole window length so we can provide the guarantee that
+ * there are only valid offsets in the window, which allows an optimization
+ * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
+ * dictionary isn't a full 64k. */
+ dict->currentOffset += 64 * KB;
if (dictSize < (int)HASH_UNIT) {
- dict->dictionary = NULL;
- dict->dictSize = 0;
return 0;
}
if ((dictEnd - p) > 64 * KB)
p = dictEnd - 64 * KB;
- dict->currentOffset += 64 * KB;
- base = p - dict->currentOffset;
+ base = dictEnd - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
- dict->currentOffset += dict->dictSize;
+ dict->tableType = (U32)tableType;
while (p <= dictEnd - HASH_UNIT) {
- LZ4_putPosition(p, dict->hashTable, byU32, base);
+ LZ4_putPosition(p, dict->hashTable, tableType, base);
p += 3;
}
- return dict->dictSize;
+ return (int)dict->dictSize;
}
EXPORT_SYMBOL(LZ4_loadDict);
-static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
- const BYTE *src)
+static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, int nextSize)
{
- if ((LZ4_dict->currentOffset > 0x80000000) ||
- ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
- /* address space overflow */
+ assert(nextSize >= 0);
+ if (LZ4_dict->currentOffset + (unsigned)nextSize >
+ 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
/* rescale hash table */
U32 const delta = LZ4_dict->currentOffset - 64 * KB;
const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
int i;
-
+ DEBUGLOG(4, "LZ4_renormDictT");
for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
if (LZ4_dict->hashTable[i] < delta)
LZ4_dict->hashTable[i] = 0;
@@ -835,17 +1050,27 @@
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
{
- LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
- const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
+ LZ4_stream_t_internal *const dict = &LZ4_dict->internal_donotuse;
+
+ DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize,
+ safeBuffer);
if ((U32)dictSize > 64 * KB) {
- /* useless to define a dictionary > 64 * KB */
dictSize = 64 * KB;
+ } /* useless to define a dictionary > 64 KB */
+ if ((U32)dictSize > dict->dictSize) {
+ dictSize = (int)dict->dictSize;
}
- if ((U32)dictSize > dict->dictSize)
- dictSize = dict->dictSize;
- memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+ if (safeBuffer == NULL)
+ assert(dictSize == 0);
+ if (dictSize > 0) {
+ const BYTE *const previousDictEnd =
+ dict->dictionary + dict->dictSize;
+ assert(dict->dictionary);
+ LZ4_memmove(safeBuffer, previousDictEnd - dictSize,
+ (size_t)dictSize);
+ }
dict->dictionary = (const BYTE *)safeBuffer;
dict->dictSize = (U32)dictSize;
@@ -855,86 +1080,125 @@
EXPORT_SYMBOL(LZ4_saveDict);
int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
- char *dest, int inputSize, int maxOutputSize, int acceleration)
+ char *dest, int inputSize, int maxOutputSize,
+ int acceleration)
{
- LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
- const BYTE * const dictEnd = streamPtr->dictionary
- + streamPtr->dictSize;
-
- const BYTE *smallest = (const BYTE *) source;
+ const tableType_t tableType = byU32;
+ LZ4_stream_t_internal *const streamPtr = &LZ4_stream->internal_donotuse;
+ const char *dictEnd = streamPtr->dictSize ?
+ (const char *)streamPtr->dictionary +
+ streamPtr->dictSize :
+ NULL;
- if (streamPtr->initCheck) {
- /* Uninitialized structure detected */
- return 0;
- }
+ DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)",
+ inputSize, streamPtr->dictSize);
- if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
- smallest = dictEnd;
-
- LZ4_renormDictT(streamPtr, smallest);
-
+ LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */
if (acceleration < 1)
acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX)
+ acceleration = LZ4_ACCELERATION_MAX;
+
+ /* invalidate tiny dictionaries */
+ if ((streamPtr->dictSize <
+ 4) /* tiny dictionary : not enough for a hash */
+ && (dictEnd != source) /* prefix mode */
+ &&
+ (inputSize >
+ 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */
+ && (streamPtr->dictCtx == NULL) /* usingDictCtx */
+ ) {
+ DEBUGLOG(
+ 5,
+ "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small",
+ streamPtr->dictSize, streamPtr->dictionary);
+ /* remove dictionary existence from history, to employ faster prefix mode */
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = (const BYTE *)source;
+ dictEnd = source;
+ }
/* Check overlapping input/dictionary space */
{
- const BYTE *sourceEnd = (const BYTE *) source + inputSize;
-
- if ((sourceEnd > streamPtr->dictionary)
- && (sourceEnd < dictEnd)) {
+ const char *const sourceEnd = source + inputSize;
+ if ((sourceEnd > (const char *)streamPtr->dictionary) &&
+ (sourceEnd < dictEnd)) {
streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
if (streamPtr->dictSize > 64 * KB)
streamPtr->dictSize = 64 * KB;
if (streamPtr->dictSize < 4)
streamPtr->dictSize = 0;
- streamPtr->dictionary = dictEnd - streamPtr->dictSize;
+ streamPtr->dictionary =
+ (const BYTE *)dictEnd - streamPtr->dictSize;
}
}
/* prefix mode : source data follows dictionary */
- if (dictEnd == (const BYTE *)source) {
- int result;
-
+ if (dictEnd == source) {
if ((streamPtr->dictSize < 64 * KB) &&
- (streamPtr->dictSize < streamPtr->currentOffset)) {
- result = LZ4_compress_generic(
- streamPtr, source, dest, inputSize,
- maxOutputSize, limitedOutput, byU32,
- withPrefix64k, dictSmall, acceleration);
- } else {
- result = LZ4_compress_generic(
- streamPtr, source, dest, inputSize,
- maxOutputSize, limitedOutput, byU32,
- withPrefix64k, noDictIssue, acceleration);
- }
- streamPtr->dictSize += (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
- return result;
- }
-
- /* external dictionary mode */
- {
- int result;
-
- if ((streamPtr->dictSize < 64 * KB) &&
- (streamPtr->dictSize < streamPtr->currentOffset)) {
- result = LZ4_compress_generic(
- streamPtr, source, dest, inputSize,
- maxOutputSize, limitedOutput, byU32,
- usingExtDict, dictSmall, acceleration);
- } else {
- result = LZ4_compress_generic(
- streamPtr, source, dest, inputSize,
- maxOutputSize, limitedOutput, byU32,
- usingExtDict, noDictIssue, acceleration);
+ (streamPtr->dictSize < streamPtr->currentOffset))
+ return LZ4_compress_generic(
+ streamPtr, source, dest, inputSize, NULL,
+ maxOutputSize, limitedOutput, tableType,
+ withPrefix64k, dictSmall, acceleration);
+ else
+ return LZ4_compress_generic(
+ streamPtr, source, dest, inputSize, NULL,
+ maxOutputSize, limitedOutput, tableType,
+ withPrefix64k, noDictIssue, acceleration);
+ }
+
+ /* external dictionary mode */
+ {
+ int result;
+ if (streamPtr->dictCtx) {
+ /* We depend here on the fact that dictCtx'es (produced by
+ * LZ4_loadDict) guarantee that their tables contain no references
+ * to offsets between dictCtx->currentOffset - 64 KB and
+ * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
+ * to use noDictIssue even when the dict isn't a full 64 KB.
+ */
+ if (inputSize > 4 * KB) {
+ /* For compressing large blobs, it is faster to pay the setup
+ * cost to copy the dictionary's tables into the active context,
+ * so that the compression loop is only looking into one table.
+ */
+ LZ4_memcpy(streamPtr, streamPtr->dictCtx,
+ sizeof(*streamPtr));
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ NULL, maxOutputSize, limitedOutput,
+ tableType, usingExtDict, noDictIssue,
+ acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ NULL, maxOutputSize, limitedOutput,
+ tableType, usingDictCtx, noDictIssue,
+ acceleration);
+ }
+ } else { /* small data <= 4 KB */
+ if ((streamPtr->dictSize < 64 * KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ NULL, maxOutputSize, limitedOutput,
+ tableType, usingExtDict, dictSmall,
+ acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ NULL, maxOutputSize, limitedOutput,
+ tableType, usingExtDict, noDictIssue,
+ acceleration);
+ }
}
streamPtr->dictionary = (const BYTE *)source;
streamPtr->dictSize = (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
return result;
}
}
EXPORT_SYMBOL(LZ4_compress_fast_continue);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4 compressor");
+MODULE_DESCRIPTION("LZ4 compressor");
\ No newline at end of file
Index: lib/lz4/lz4_decompress.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
--- a/lib/lz4/lz4_decompress.c (revision 550dad3df59e8c39fe93484e6f207d4110619483)
+++ b/lib/lz4/lz4_decompress.c (revision 30c5a157bf8495708813366f0ba3fc350f8a9657)
@@ -44,12 +44,281 @@
* Decompression functions
*******************************/
-#define DEBUGLOG(l, ...) {} /* disabled */
+#define LZ4_FAST_DEC_LOOP 1
+
+static const unsigned inc32table[8] = { 0, 1, 2, 1, 0, 4, 4, 4 };
+static const int dec64table[8] = { 0, 0, 0, -1, -4, 1, 2, 3 };
-#ifndef assert
-#define assert(condition) ((void)0)
+#if LZ4_FAST_DEC_LOOP
+
+static FORCE_INLINE void LZ4_memcpy_using_offset_base(BYTE *dstPtr,
+ const BYTE *srcPtr,
+ BYTE *dstEnd,
+ const size_t offset)
+{
+ assert(srcPtr + offset == dstPtr);
+ if (offset < 8) {
+ LZ4_write32(dstPtr,
+ 0); /* silence an msan warning when offset==0 */
+ dstPtr[0] = srcPtr[0];
+ dstPtr[1] = srcPtr[1];
+ dstPtr[2] = srcPtr[2];
+ dstPtr[3] = srcPtr[3];
+ srcPtr += inc32table[offset];
+ LZ4_memcpy(dstPtr + 4, srcPtr, 4);
+ srcPtr -= dec64table[offset];
+ dstPtr += 8;
+ } else {
+ LZ4_memcpy(dstPtr, srcPtr, 8);
+ dstPtr += 8;
+ srcPtr += 8;
+ }
+
+ LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
+}
+
+/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
+ * this version copies two times 16 bytes (instead of one time 32 bytes)
+ * because it must be compatible with offsets >= 16. */
+static FORCE_INLINE void LZ4_wildCopy32(void *dstPtr, const void *srcPtr,
+ void *dstEnd)
+{
+ BYTE *d = (BYTE *)dstPtr;
+ const BYTE *s = (const BYTE *)srcPtr;
+ BYTE *const e = (BYTE *)dstEnd;
+
+ do {
+ LZ4_memcpy(d, s, 16);
+ LZ4_memcpy(d + 16, s + 16, 16);
+ d += 32;
+ s += 32;
+ } while (d < e);
+}
+
+/* LZ4_memcpy_using_offset() presumes :
+ * - dstEnd >= dstPtr + MINMATCH
+ * - there is at least 8 bytes available to write after dstEnd */
+static FORCE_INLINE void LZ4_memcpy_using_offset(BYTE *dstPtr,
+ const BYTE *srcPtr,
+ BYTE *dstEnd,
+ const size_t offset)
+{
+ BYTE v[8];
+
+ assert(dstEnd >= dstPtr + MINMATCH);
+
+ switch (offset) {
+ case 1:
+ memset(v, *srcPtr, 8);
+ break;
+ case 2:
+ LZ4_memcpy(v, srcPtr, 2);
+ LZ4_memcpy(&v[2], srcPtr, 2);
+ LZ4_memcpy(&v[4], v, 4);
+ break;
+ case 4:
+ LZ4_memcpy(v, srcPtr, 4);
+ LZ4_memcpy(&v[4], srcPtr, 4);
+ break;
+ default:
+ LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
+ return;
+ }
+
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ while (dstPtr < dstEnd) {
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ }
+}
#endif
+/* variant for decompress_unsafe()
+ * does not know end of input
+ * presumes input is well formed
+ * note : will consume at least one byte */
+size_t read_long_length_no_check(const BYTE **pp)
+{
+ size_t b, l = 0;
+ do {
+ b = **pp;
+ (*pp)++;
+ l += b;
+ } while (b == 255);
+ DEBUGLOG(6,
+ "read_long_length_no_check: +length=%zu using %zu input bytes",
+ l, l / 255 + 1)
+ return l;
+}
+
+/* core decoder variant for LZ4_decompress_fast*()
+ * for legacy support only : these entry points are deprecated.
+ * - Presumes input is correctly formed (no defense vs malformed inputs)
+ * - Does not know input size (presume input buffer is "large enough")
+ * - Decompress a full block (only)
+ * @return : nb of bytes read from input.
+ * Note : this variant is not optimized for speed, just for maintenance.
+ * the goal is to remove support of decompress_fast*() variants by v2.0
+**/
+FORCE_INLINE int LZ4_decompress_unsafe_generic(
+ const BYTE *const istart, BYTE *const ostart, int decompressedSize,
+ size_t prefixSize,
+ const BYTE *const dictStart, /* only if dict==usingExtDict */
+ const size_t dictSize /* note: =0 if dictStart==NULL */
+)
+{
+ const BYTE *ip = istart;
+ BYTE *op = (BYTE *)ostart;
+ BYTE *const oend = ostart + decompressedSize;
+ const BYTE *const prefixStart = ostart - prefixSize;
+
+ DEBUGLOG(5, "LZ4_decompress_unsafe_generic");
+ if (dictStart == NULL)
+ assert(dictSize == 0);
+
+ while (1) {
+ /* start new sequence */
+ unsigned token = *ip++;
+
+ /* literals */
+ {
+ size_t ll = token >> ML_BITS;
+ if (ll == 15) {
+ /* long literal length */
+ ll += read_long_length_no_check(&ip);
+ }
+ if ((size_t)(oend - op) < ll)
+ return -1; /* output buffer overflow */
+ LZ4_memmove(op, ip,
+ ll); /* support in-place decompression */
+ op += ll;
+ ip += ll;
+ if ((size_t)(oend - op) < MFLIMIT) {
+ if (op == oend)
+ break; /* end of block */
+ DEBUGLOG(
+ 5,
+ "invalid: literals end at distance %zi from end of block",
+ oend - op);
+ /* incorrect end of block :
+ * last match must start at least MFLIMIT==12 bytes before end of output block */
+ return -1;
+ }
+ }
+
+ /* match */
+ {
+ size_t ml = token & 15;
+ size_t const offset = LZ4_readLE16(ip);
+ ip += 2;
+
+ if (ml == 15) {
+ /* long literal length */
+ ml += read_long_length_no_check(&ip);
+ }
+ ml += MINMATCH;
+
+ if ((size_t)(oend - op) < ml)
+ return -1; /* output buffer overflow */
+
+ {
+ const BYTE *match = op - offset;
+
+ /* out of range */
+ if (offset >
+ (size_t)(op - prefixStart) + dictSize) {
+ DEBUGLOG(6, "offset out of range");
+ return -1;
+ }
+
+ /* check special case : extDict */
+ if (offset > (size_t)(op - prefixStart)) {
+ /* extDict scenario */
+ const BYTE *const dictEnd =
+ dictStart + dictSize;
+ const BYTE *extMatch =
+ dictEnd -
+ (offset -
+ (size_t)(op - prefixStart));
+ size_t const extml =
+ (size_t)(dictEnd - extMatch);
+ if (extml > ml) {
+ /* match entirely within extDict */
+ LZ4_memmove(op, extMatch, ml);
+ op += ml;
+ ml = 0;
+ } else {
+ /* match split between extDict & prefix */
+ LZ4_memmove(op, extMatch,
+ extml);
+ op += extml;
+ ml -= extml;
+ }
+ match = prefixStart;
+ }
+
+ /* match copy - slow variant, supporting overlap copy */
+ {
+ size_t u;
+ for (u = 0; u < ml; u++) {
+ op[u] = match[u];
+ }
+ }
+ }
+ op += ml;
+ if ((size_t)(oend - op) < LASTLITERALS) {
+ DEBUGLOG(
+ 5,
+ "invalid: match ends at distance %zi from end of block",
+ oend - op);
+ /* incorrect end of block :
+ * last match must stop at least LASTLITERALS==5 bytes before end of output block */
+ return -1;
+ }
+ } /* match */
+ } /* main loop */
+ return (int)(ip - istart);
+}
+
+/* Read the variable-length literal or match length.
+ *
+ * @ip : input pointer
+ * @ilimit : position after which if length is not decoded, the input is necessarily corrupted.
+ * @initial_check - check ip >= ipmax before start of loop. Returns initial_error if so.
+ * @error (output) - error code. Must be set to 0 before call.
+**/
+typedef size_t Rvl_t;
+static const Rvl_t rvl_error = (Rvl_t)(-1);
+static FORCE_INLINE Rvl_t read_variable_length(const BYTE **ip,
+ const BYTE *ilimit,
+ int initial_check)
+{
+ Rvl_t s, length = 0;
+ assert(ip != NULL);
+ assert(*ip != NULL);
+ assert(ilimit != NULL);
+ if (initial_check &&
+ unlikely((*ip) >= ilimit)) { /* read limit reached */
+ return rvl_error;
+ }
+ do {
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (unlikely((*ip) > ilimit)) { /* read limit reached */
+ return rvl_error;
+ }
+ /* accumulator overflow detection (32-bit mode only) */
+ if ((sizeof(length) < 8) &&
+ unlikely(length > ((Rvl_t)(-1) / 2))) {
+ return rvl_error;
+ }
+ } while (s == 255);
+
+ return length;
+}
+
/*
* LZ4_decompress_generic() :
* This generic decompression function covers all use cases.
@@ -57,430 +326,585 @@
* Note that it is important for performance that this function really get inlined,
* in order to remove useless branches during compilation optimization.
*/
-static FORCE_INLINE int LZ4_decompress_generic(
- const char * const src,
- char * const dst,
- int srcSize,
- /*
+static FORCE_INLINE int
+LZ4_decompress_generic(const char *const src, char *const dst, int srcSize,
+ /*
* If endOnInput == endOnInputSize,
* this value is `dstCapacity`
*/
- int outputSize,
- /* endOnOutputSize, endOnInputSize */
- endCondition_directive endOnInput,
- /* full, partial */
- earlyEnd_directive partialDecoding,
- /* noDict, withPrefix64k, usingExtDict */
- dict_directive dict,
- /* always <= dst, == dst when no prefix */
- const BYTE * const lowPrefix,
- /* only if dict == usingExtDict */
- const BYTE * const dictStart,
- /* note : = 0 if noDict */
- const size_t dictSize
- )
+ int outputSize,
+ /* endOnOutputSize, endOnInputSize */
+ earlyEnd_directive partialDecoding,
+ /* noDict, withPrefix64k, usingExtDict */
+ dict_directive dict,
+ /* always <= dst, == dst when no prefix */
+ const BYTE *const lowPrefix,
+ /* only if dict == usingExtDict */
+ const BYTE *const dictStart,
+ /* note : = 0 if noDict */
+ const size_t dictSize)
{
- const BYTE *ip = (const BYTE *) src;
- const BYTE * const iend = ip + srcSize;
+ if ((src == NULL) || (outputSize < 0)) {
+ return -1;
+ }
+
+ {
+ const BYTE *ip = (const BYTE *)src;
+ const BYTE *const iend = ip + srcSize;
- BYTE *op = (BYTE *) dst;
- BYTE * const oend = op + outputSize;
- BYTE *cpy;
+ BYTE *op = (BYTE *)dst;
+ BYTE *const oend = op + outputSize;
+ BYTE *cpy;
- const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
- static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
- static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
+ const BYTE *const dictEnd =
+ (dictStart == NULL) ? NULL : dictStart + dictSize;
- const int safeDecode = (endOnInput == endOnInputSize);
- const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
+ const int checkOffset = (dictSize < (int)(64 * KB));
- /* Set up the "end" pointers for the shortcut. */
- const BYTE *const shortiend = iend -
- (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
- const BYTE *const shortoend = oend -
- (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE *const shortiend =
+ iend - 14 /*maxLL*/ - 2 /*offset*/;
+ const BYTE *const shortoend =
+ oend - 14 /*maxLL*/ - 18 /*maxML*/;
- DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
- srcSize, outputSize);
+ const BYTE *match;
+ size_t offset;
+ unsigned token;
+ size_t length;
+
+ DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)",
+ srcSize, outputSize);
- /* Special cases */
- assert(lowPrefix <= op);
- assert(src != NULL);
-
- /* Empty output buffer */
- if ((endOnInput) && (unlikely(outputSize == 0)))
- return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
-
- if ((!endOnInput) && (unlikely(outputSize == 0)))
- return (*ip == 0 ? 1 : -1);
-
- if ((endOnInput) && unlikely(srcSize == 0))
- return -1;
+ /* Special cases */
+ assert(lowPrefix <= op);
+ if (unlikely(outputSize == 0)) {
+ /* Empty output buffer */
+ if (partialDecoding)
+ return 0;
+ return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
+ }
+ if (unlikely(srcSize == 0)) {
+ return -1;
+ }
- /* Main Loop : decode sequences */
- while (1) {
- size_t length;
- const BYTE *match;
- size_t offset;
+ /* LZ4_FAST_DEC_LOOP:
+ * designed for modern OoO performance cpus,
+ * where copying reliably 32-bytes is preferable to an unpredictable branch.
+ * note : fast loop may show a regression for some client arm chips. */
+#if LZ4_FAST_DEC_LOOP
+ if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
+ DEBUGLOG(6, "skip fast decode loop");
+ goto safe_decode;
+ }
+
+ /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
+ assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
+ assert(ip < iend);
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ size_t const addl = read_variable_length(
+ &ip, iend - RUN_MASK, 1);
+ if (addl == rvl_error) {
+ goto _output_error;
+ }
+ length += addl;
+ if (unlikely((uptrval)(op) + length <
+ (uptrval)(op))) {
+ goto _output_error;
+ } /* overflow detection */
+ if (unlikely((uptrval)(ip) + length <
+ (uptrval)(ip))) {
+ goto _output_error;
+ } /* overflow detection */
- /* get literal length */
- unsigned int const token = *ip++;
- length = token>>ML_BITS;
+ /* copy literals */
+ cpy = op + length;
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if ((cpy > oend - 32) ||
+ (ip + length > iend - 32)) {
+ goto safe_literal_copy;
+ }
+ LZ4_wildCopy32(op, ip, cpy);
+ ip += length;
+ op = cpy;
+ } else {
+ cpy = op + length;
+ DEBUGLOG(7,
+ "copy %u bytes in a 16-bytes stripe",
+ (unsigned)length);
+ /* We don't need to check oend, since we check it once for each loop below */
+ if (ip >
+ iend - (16 +
+ 1 /*max lit + offset + nextToken*/)) {
+ goto safe_literal_copy;
+ }
+ /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
+ LZ4_memcpy(op, ip, 16);
+ ip += length;
+ op = cpy;
+ }
- /* ip < iend before the increment */
- assert(!endOnInput || ip <= iend);
+ /* get offset */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+ assert(match <= op); /* overflow check */
- /*
- * A two-stage shortcut for the most common case:
- * 1) If the literal length is 0..14, and there is enough
- * space, enter the shortcut and copy 16 bytes on behalf
- * of the literals (in the fast mode, only 8 bytes can be
- * safely copied this way).
- * 2) Further if the match length is 4..18, copy 18 bytes
- * in a similar manner; but we ensure that there's enough
- * space in the output for those 18 bytes earlier, upon
- * entering the shortcut (in other words, there is a
- * combined check for both stages).
- *
- * The & in the likely() below is intentionally not && so that
- * some compilers can produce better parallelized runtime code
- */
- if ((endOnInput ? length != RUN_MASK : length <= 8)
- /*
- * strictly "less than" on input, to re-enter
- * the loop with at least one byte
- */
- && likely((endOnInput ? ip < shortiend : 1) &
- (op <= shortoend))) {
- /* Copy the literals */
- LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
- op += length; ip += length;
+ /* get matchlength */
+ length = token & ML_MASK;
+
+ if (length == ML_MASK) {
+ size_t const addl = read_variable_length(
+ &ip, iend - LASTLITERALS + 1, 0);
+ if (addl == rvl_error) {
+ goto _output_error;
+ }
+ length += addl;
+ length += MINMATCH;
+ if (unlikely((uptrval)(op) + length <
+ (uptrval)op)) {
+ goto _output_error;
+ } /* overflow detection */
+ if ((checkOffset) &&
+ (unlikely(match + dictSize < lowPrefix))) {
+ goto _output_error;
+ } /* Error : offset outside buffers */
+ if (op + length >=
+ oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+ } else {
+ length += MINMATCH;
+ if (op + length >=
+ oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+
+ /* Fastpath check: skip LZ4_wildCopy32 when true */
+ if ((dict == withPrefix64k) ||
+ (match >= lowPrefix)) {
+ if (offset >= 8) {
+ assert(match >= lowPrefix);
+ assert(match <= op);
+ assert(op + 18 <= oend);
+
+ LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op + 8, match + 8,
+ 8);
+ LZ4_memcpy(op + 16, match + 16,
+ 2);
+ op += length;
+ continue;
+ }
+ }
+ }
+
+ if (checkOffset &&
+ (unlikely(match + dictSize < lowPrefix))) {
+ goto _output_error;
+ } /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict == usingExtDict) && (match < lowPrefix)) {
+ assert(dictEnd != NULL);
+ if (unlikely(op + length >
+ oend - LASTLITERALS)) {
+ if (partialDecoding) {
+ DEBUGLOG(
+ 7,
+ "partialDecoding: dictionary match, close to dstEnd");
+ length = min(
+ length,
+ (size_t)(oend - op));
+ } else {
+ goto _output_error; /* end-of-block condition violated */
+ }
+ }
+
+ if (length <= (size_t)(lowPrefix - match)) {
+ /* match fits entirely within external dictionary : just copy */
+ LZ4_memmove(op,
+ dictEnd -
+ (lowPrefix - match),
+ length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize =
+ (size_t)(lowPrefix - match);
+ size_t const restSize =
+ length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize,
+ copySize);
+ op += copySize;
+ if (restSize >
+ (size_t)(op -
+ lowPrefix)) { /* overlap copy */
+ BYTE *const endOfMatch =
+ op + restSize;
+ const BYTE *copyFrom =
+ lowPrefix;
+ while (op < endOfMatch) {
+ *op++ = *copyFrom++;
+ }
+ } else {
+ LZ4_memcpy(op, lowPrefix,
+ restSize);
+ op += restSize;
+ }
+ }
+ continue;
+ }
+
+ /* copy match within block */
+ cpy = op + length;
+
+ assert((op <= oend) && (oend - op >= 32));
+ if (unlikely(offset < 16)) {
+ LZ4_memcpy_using_offset(op, match, cpy, offset);
+ } else {
+ LZ4_wildCopy32(op, match, cpy);
+ }
+
+ op = cpy; /* wildcopy correction */
+ }
+safe_decode:
+#endif
+
+ /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ assert(ip < iend);
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ /* A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough space,
+ * enter the shortcut and copy 16 bytes on behalf of the literals
+ * (in the fast mode, only 8 bytes can be safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ * manner; but we ensure that there's enough space in the output for
+ * those 18 bytes earlier, upon entering the shortcut (in other words,
+ * there is a combined check for both stages).
+ */
+ if ((length != RUN_MASK)
+ /* strictly "less than" on input, to re-enter the loop with at least one byte */
+ && likely((ip < shortiend) & (op <= shortoend))) {
+ /* Copy the literals */
+ LZ4_memcpy(op, ip, 16);
+ op += length;
+ ip += length;
- /*
- * The second stage:
- * prepare for match copying, decode full info.
- * If it doesn't work out, the info won't be wasted.
- */
- length = token & ML_MASK; /* match length */
- offset = LZ4_readLE16(ip);
- ip += 2;
- match = op - offset;
- assert(match <= op); /* check overflow */
+ /* The second stage: prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted. */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
- /* Do not deal with overlapping matches. */
- if ((length != ML_MASK) &&
- (offset >= 8) &&
- (dict == withPrefix64k || match >= lowPrefix)) {
- /* Copy the match. */
- LZ4_memcpy(op + 0, match + 0, 8);
- LZ4_memcpy(op + 8, match + 8, 8);
- LZ4_memcpy(op + 16, match + 16, 2);
- op += length + MINMATCH;
- /* Both stages worked, load the next token. */
- continue;
- }
+ /* Do not deal with overlapping matches. */
+ if ((length != ML_MASK) && (offset >= 8) &&
+ (dict == withPrefix64k ||
+ match >= lowPrefix)) {
+ /* Copy the match. */
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op + 16, match + 16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
- /*
- * The second stage didn't work out, but the info
- * is ready. Propel it right to the point of match
- * copying.
- */
- goto _copy_match;
- }
+ /* The second stage didn't work out, but the info is ready.
+ * Propel it right to the point of match copying. */
+ goto _copy_match;
+ }
- /* decode literal length */
- if (length == RUN_MASK) {
- unsigned int s;
-
- if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
- /* overflow detection */
- goto _output_error;
- }
- do {
- s = *ip++;
- length += s;
- } while (likely(endOnInput
- ? ip < iend - RUN_MASK
- : 1) & (s == 255));
-
- if ((safeDecode)
- && unlikely((uptrval)(op) +
- length < (uptrval)(op))) {
- /* overflow detection */
- goto _output_error;
- }
- if ((safeDecode)
- && unlikely((uptrval)(ip) +
- length < (uptrval)(ip))) {
- /* overflow detection */
- goto _output_error;
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ size_t const addl = read_variable_length(
+ &ip, iend - RUN_MASK, 1);
+ if (addl == rvl_error) {
+ goto _output_error;
+ }
+ length += addl;
+ if (unlikely((uptrval)(op) + length <
+ (uptrval)(op))) {
+ goto _output_error;
+ } /* overflow detection */
+ if (unlikely((uptrval)(ip) + length <
+ (uptrval)(ip))) {
+ goto _output_error;
+ } /* overflow detection */
}
- }
- /* copy literals */
- cpy = op + length;
- LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
-
- if (((endOnInput) && ((cpy > oend - MFLIMIT)
- || (ip + length > iend - (2 + 1 + LASTLITERALS))))
- || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
- if (partialDecoding) {
- if (cpy > oend) {
- /*
- * Partial decoding :
- * stop in the middle of literal segment
- */
- cpy = oend;
- length = oend - op;
- }
- if ((endOnInput)
- && (ip + length > iend)) {
- /*
- * Error :
- * read attempt beyond
- * end of input buffer
- */
- goto _output_error;
- }
- } else {
- if ((!endOnInput)
- && (cpy != oend)) {
- /*
- * Error :
- * block decoding must
- * stop exactly there
- */
- goto _output_error;
- }
- if ((endOnInput)
- && ((ip + length != iend)
- || (cpy > oend))) {
- /*
- * Error :
- * input must be consumed
- */
- goto _output_error;
- }
- }
-
- /*
- * supports overlapping memory regions; only matters
- * for in-place decompression scenarios
- */
- LZ4_memmove(op, ip, length);
- ip += length;
- op += length;
-
- /* Necessarily EOF when !partialDecoding.
- * When partialDecoding, it is EOF if we've either
- * filled the output buffer or
- * can't proceed with reading an offset for following match.
- */
- if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
- break;
- } else {
- /* may overwrite up to WILDCOPYLENGTH beyond cpy */
- LZ4_wildCopy(op, ip, cpy);
- ip += length;
- op = cpy;
- }
+ /* copy literals */
+ cpy = op + length;
+#if LZ4_FAST_DEC_LOOP
+safe_literal_copy:
+#endif
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if ((cpy > oend - MFLIMIT) ||
+ (ip + length > iend - (2 + 1 + LASTLITERALS))) {
+ /* We've either hit the input parsing restriction or the output parsing restriction.
+ * In the normal scenario, decoding a full block, it must be the last sequence,
+ * otherwise it's an error (invalid input or dimensions).
+ * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
+ */
+ if (partialDecoding) {
+ /* Since we are partial decoding we may be in this block because of the output parsing
+ * restriction, which is not valid since the output buffer is allowed to be undersized.
+ */
+ DEBUGLOG(
+ 7,
+ "partialDecoding: copying literals, close to input or output end")
+ DEBUGLOG(
+ 7,
+ "partialDecoding: literal length = %u",
+ (unsigned)length);
+ DEBUGLOG(
+ 7,
+ "partialDecoding: remaining space in dstBuffer : %i",
+ (int)(oend - op));
+ DEBUGLOG(
+ 7,
+ "partialDecoding: remaining space in srcBuffer : %i",
+ (int)(iend - ip));
+ /* Finishing in the middle of a literals segment,
+ * due to lack of input.
+ */
+ if (ip + length > iend) {
+ length = (size_t)(iend - ip);
+ cpy = op + length;
+ }
+ /* Finishing in the middle of a literals segment,
+ * due to lack of output space.
+ */
+ if (cpy > oend) {
+ cpy = oend;
+ assert(op <= oend);
+ length = (size_t)(oend - op);
+ }
+ } else {
+ /* We must be on the last sequence (or invalid) because of the parsing limitations
+ * so check that we exactly consume the input and don't overrun the output buffer.
+ */
+ if ((ip + length != iend) ||
+ (cpy > oend)) {
+ DEBUGLOG(
+ 6,
+ "should have been last run of literals")
+ DEBUGLOG(
+ 6,
+ "ip(%p) + length(%i) = %p != iend (%p)",
+ ip, (int)length,
+ ip + length, iend);
+ DEBUGLOG(
+ 6,
+ "or cpy(%p) > oend(%p)",
+ cpy, oend);
+ goto _output_error;
+ }
+ }
+ LZ4_memmove(
+ op, ip,
+ length); /* supports overlapping memory regions, for in-place decompression scenarios */
+ ip += length;
+ op += length;
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
+ */
+ if (!partialDecoding || (cpy == oend) ||
+ (ip >= (iend - 2))) {
+ break;
+ }
+ } else {
+ LZ4_wildCopy8(
+ op, ip,
+ cpy); /* can overwrite up to 8 bytes beyond cpy */
+ ip += length;
+ op = cpy;
+ }
- /* get offset */
- offset = LZ4_readLE16(ip);
- ip += 2;
- match = op - offset;
+ /* get offset */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
- /* get matchlength */
- length = token & ML_MASK;
+ /* get matchlength */
+ length = token & ML_MASK;
_copy_match:
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
- /* Error : offset outside buffers */
- goto _output_error;
- }
-
- /* costs ~1%; silence an msan warning when offset == 0 */
- /*
- * note : when partialDecoding, there is no guarantee that
- * at least 4 bytes remain available in output buffer
- */
- if (!partialDecoding) {
- assert(oend > op);
- assert(oend - op >= 4);
-
- LZ4_write32(op, (U32)offset);
- }
-
- if (length == ML_MASK) {
- unsigned int s;
-
- do {
- s = *ip++;
-
- if ((endOnInput) && (ip > iend - LASTLITERALS))
+ if (length == ML_MASK) {
+ size_t const addl = read_variable_length(
+ &ip, iend - LASTLITERALS + 1, 0);
+ if (addl == rvl_error) {
goto _output_error;
-
- length += s;
- } while (s == 255);
-
- if ((safeDecode)
- && unlikely(
- (uptrval)(op) + length < (uptrval)op)) {
- /* overflow detection */
- goto _output_error;
- }
- }
-
- length += MINMATCH;
+ }
+ length += addl;
+ if (unlikely((uptrval)(op) + length <
+ (uptrval)op))
+ goto _output_error; /* overflow detection */
+ }
+ length += MINMATCH;
- /* match starting within external dictionary */
- if ((dict == usingExtDict) && (match < lowPrefix)) {
- if (unlikely(op + length > oend - LASTLITERALS)) {
- /* doesn't respect parsing restriction */
- if (!partialDecoding)
- goto _output_error;
- length = min(length, (size_t)(oend - op));
- }
+#if LZ4_FAST_DEC_LOOP
+safe_match_copy:
+#endif
+ if ((checkOffset) &&
+ (unlikely(match + dictSize < lowPrefix)))
+ goto _output_error; /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict == usingExtDict) && (match < lowPrefix)) {
+ assert(dictEnd != NULL);
+ if (unlikely(op + length >
+ oend - LASTLITERALS)) {
+ if (partialDecoding)
+ length = min(
+ length,
+ (size_t)(oend - op));
+ else
+ goto _output_error; /* doesn't respect parsing restriction */
+ }
- if (length <= (size_t)(lowPrefix - match)) {
- /*
- * match fits entirely within external
- * dictionary : just copy
- */
- memmove(op, dictEnd - (lowPrefix - match),
- length);
- op += length;
- } else {
- /*
- * match stretches into both external
- * dictionary and current block
- */
- size_t const copySize = (size_t)(lowPrefix - match);
- size_t const restSize = length - copySize;
-
- LZ4_memcpy(op, dictEnd - copySize, copySize);
- op += copySize;
- if (restSize > (size_t)(op - lowPrefix)) {
- /* overlap copy */
- BYTE * const endOfMatch = op + restSize;
- const BYTE *copyFrom = lowPrefix;
-
- while (op < endOfMatch)
- *op++ = *copyFrom++;
- } else {
- LZ4_memcpy(op, lowPrefix, restSize);
- op += restSize;
- }
- }
- continue;
- }
+ if (length <= (size_t)(lowPrefix - match)) {
+ /* match fits entirely within external dictionary : just copy */
+ LZ4_memmove(op,
+ dictEnd -
+ (lowPrefix - match),
+ length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize =
+ (size_t)(lowPrefix - match);
+ size_t const restSize =
+ length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize,
+ copySize);
+ op += copySize;
+ if (restSize >
+ (size_t)(op -
+ lowPrefix)) { /* overlap copy */
+ BYTE *const endOfMatch =
+ op + restSize;
+ const BYTE *copyFrom =
+ lowPrefix;
+ while (op < endOfMatch)
+ *op++ = *copyFrom++;
+ } else {
+ LZ4_memcpy(op, lowPrefix,
+ restSize);
+ op += restSize;
+ }
+ }
+ continue;
+ }
+ assert(match >= lowPrefix);
- /* copy match within block */
- cpy = op + length;
+ /* copy match within block */
+ cpy = op + length;
- /*
- * partialDecoding :
- * may not respect endBlock parsing restrictions
- */
- assert(op <= oend);
- if (partialDecoding &&
- (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
- size_t const mlen = min(length, (size_t)(oend - op));
- const BYTE * const matchEnd = match + mlen;
- BYTE * const copyEnd = op + mlen;
-
- if (matchEnd > op) {
- /* overlap copy */
- while (op < copyEnd)
- *op++ = *match++;
- } else {
- LZ4_memcpy(op, match, mlen);
- }
- op = copyEnd;
- if (op == oend)
- break;
- continue;
- }
+ /* partialDecoding : may end anywhere within the block */
+ assert(op <= oend);
+ if (partialDecoding &&
+ (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen =
+ min(length, (size_t)(oend - op));
+ const BYTE *const matchEnd = match + mlen;
+ BYTE *const copyEnd = op + mlen;
+ if (matchEnd > op) { /* overlap copy */
+ while (op < copyEnd) {
+ *op++ = *match++;
+ }
+ } else {
+ LZ4_memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend) {
+ break;
+ }
+ continue;
+ }
- if (unlikely(offset < 8)) {
- op[0] = match[0];
- op[1] = match[1];
- op[2] = match[2];
- op[3] = match[3];
- match += inc32table[offset];
- LZ4_memcpy(op + 4, match, 4);
- match -= dec64table[offset];
- } else {
- LZ4_copy8(op, match);
- match += 8;
- }
-
- op += 8;
+ if (unlikely(offset < 8)) {
+ LZ4_write32(
+ op,
+ 0); /* silence msan warning when offset==0 */
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += inc32table[offset];
+ LZ4_memcpy(op + 4, match, 4);
+ match -= dec64table[offset];
+ } else {
+ LZ4_memcpy(op, match, 8);
+ match += 8;
+ }
+ op += 8;
- if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
- BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
-
- if (cpy > oend - LASTLITERALS) {
- /*
- * Error : last LASTLITERALS bytes
- * must be literals (uncompressed)
- */
- goto _output_error;
- }
-
- if (op < oCopyLimit) {
- LZ4_wildCopy(op, match, oCopyLimit);
- match += oCopyLimit - op;
- op = oCopyLimit;
- }
- while (op < cpy)
- *op++ = *match++;
- } else {
- LZ4_copy8(op, match);
- if (length > 16)
- LZ4_wildCopy(op + 8, match + 8, cpy);
- }
- op = cpy; /* wildcopy correction */
- }
+ if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ BYTE *const oCopyLimit =
+ oend - (WILDCOPYLENGTH - 1);
+ if (cpy > oend - LASTLITERALS) {
+ goto _output_error;
+ } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
+ if (op < oCopyLimit) {
+ LZ4_wildCopy8(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+ while (op < cpy) {
+ *op++ = *match++;
+ }
+ } else {
+ LZ4_memcpy(op, match, 8);
+ if (length > 16) {
+ LZ4_wildCopy8(op + 8, match + 8, cpy);
+ }
+ }
+ op = cpy; /* wildcopy correction */
+ }
- /* end of decoding */
- if (endOnInput) {
- /* Nb of output bytes decoded */
- return (int) (((char *)op) - dst);
- } else {
- /* Nb of input bytes read */
- return (int) (((const char *)ip) - src);
- }
+ /* end of decoding */
+ DEBUGLOG(5, "decoded %i bytes", (int)(((char *)op) - dst));
+ return (int)(((char *)op) -
+ dst); /* Nb of output bytes decoded */
- /* Overflow error detected */
+ /* Overflow error detected */
_output_error:
- return (int) (-(((const char *)ip) - src)) - 1;
+ return (int)(-(((const char *)ip) - src)) - 1;
+ }
}
-int LZ4_decompress_safe(const char *source, char *dest,
- int compressedSize, int maxDecompressedSize)
+int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
+ int maxDecompressedSize)
{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxDecompressedSize,
- endOnInputSize, decode_full_block,
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxDecompressedSize, decode_full_block,
noDict, (BYTE *)dest, NULL, 0);
}
-int LZ4_decompress_safe_partial(const char *src, char *dst,
- int compressedSize, int targetOutputSize, int dstCapacity)
+int LZ4_decompress_safe_partial(const char *src, char *dst, int compressedSize,
+ int targetOutputSize, int dstCapacity)
{
dstCapacity = min(targetOutputSize, dstCapacity);
return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
- endOnInputSize, partial_decode,
- noDict, (BYTE *)dst, NULL, 0);
+ partial_decode, noDict, (BYTE *)dst, NULL,
+ 0);
}
int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
{
- return LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, decode_full_block,
- withPrefix64k,
- (BYTE *)dest - 64 * KB, NULL, 0);
+ return LZ4_decompress_unsafe_generic((const BYTE *)source, (BYTE *)dest,
+ originalSize, 0, NULL, 0);
}
/* ===== Instantiate a few more decoding cases, used more than once. ===== */
@@ -488,11 +912,10 @@
static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
int compressedSize, int maxOutputSize)
{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block,
- withPrefix64k,
- (BYTE *)dest - 64 * KB, NULL, 0);
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, decode_full_block,
+ withPrefix64k, (BYTE *)dest - 64 * KB,
+ NULL, 0);
}
static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
@@ -500,10 +923,8 @@
int maxOutputSize,
size_t prefixSize)
{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block,
- noDict,
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, decode_full_block, noDict,
(BYTE *)dest - prefixSize, NULL, 0);
}
@@ -511,22 +932,19 @@
int compressedSize, int maxOutputSize,
const void *dictStart, size_t dictSize)
{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block,
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, decode_full_block,
usingExtDict, (BYTE *)dest,
(const BYTE *)dictStart, dictSize);
}
static int LZ4_decompress_fast_extDict(const char *source, char *dest,
- int originalSize,
- const void *dictStart, size_t dictSize)
+ int originalSize, const void *dictStart,
+ size_t dictSize)
{
- return LZ4_decompress_generic(source, dest,
- 0, originalSize,
- endOnOutputSize, decode_full_block,
- usingExtDict, (BYTE *)dest,
- (const BYTE *)dictStart, dictSize);
+ return LZ4_decompress_unsafe_generic((const BYTE *)source, (BYTE *)dest,
+ originalSize, 0,
+ (const BYTE *)dictStart, dictSize);
}
/*
@@ -534,43 +952,39 @@
* of the dictionary is passed as prefix, and the second via dictStart + dictSize.
* These routines are used only once, in LZ4_decompress_*_continue().
*/
-static FORCE_INLINE
-int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- size_t prefixSize,
- const void *dictStart, size_t dictSize)
+static FORCE_INLINE int LZ4_decompress_safe_doubleDict(
+ const char *source, char *dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize, const void *dictStart, size_t dictSize)
{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block,
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, decode_full_block,
usingExtDict, (BYTE *)dest - prefixSize,
(const BYTE *)dictStart, dictSize);
}
-static FORCE_INLINE
-int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
- int originalSize, size_t prefixSize,
- const void *dictStart, size_t dictSize)
+static FORCE_INLINE int
+LZ4_decompress_fast_doubleDict(const char *source, char *dest, int originalSize,
+ size_t prefixSize, const void *dictStart,
+ size_t dictSize)
{
- return LZ4_decompress_generic(source, dest,
- 0, originalSize,
- endOnOutputSize, decode_full_block,
- usingExtDict, (BYTE *)dest - prefixSize,
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ decode_full_block, usingExtDict,
+ (BYTE *)dest - prefixSize,
(const BYTE *)dictStart, dictSize);
}
/* ===== streaming decompression functions ===== */
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
- const char *dictionary, int dictSize)
+ const char *dictionary, int dictSize)
{
LZ4_streamDecode_t_internal *lz4sd =
&LZ4_streamDecode->internal_donotuse;
- lz4sd->prefixSize = (size_t) dictSize;
- lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
+ lz4sd->prefixSize = (size_t)dictSize;
+ lz4sd->prefixEnd = (const BYTE *)dictionary + dictSize;
lz4sd->externalDict = NULL;
- lz4sd->extDictSize = 0;
+ lz4sd->extDictSize = 0;
return 1;
}
@@ -585,7 +999,8 @@
* and indicate where it stands using LZ4_setStreamDecode()
*/
int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
- const char *source, char *dest, int compressedSize, int maxOutputSize)
+ const char *source, char *dest,
+ int compressedSize, int maxOutputSize)
{
LZ4_streamDecode_t_internal *lz4sd =
&LZ4_streamDecode->internal_donotuse;
@@ -594,85 +1009,90 @@
if (lz4sd->prefixSize == 0) {
/* The first call, no dictionary yet. */
assert(lz4sd->extDictSize == 0);
- result = LZ4_decompress_safe(source, dest,
- compressedSize, maxOutputSize);
+ result = LZ4_decompress_safe(source, dest, compressedSize,
+ maxOutputSize);
if (result <= 0)
return result;
- lz4sd->prefixSize = result;
+ lz4sd->prefixSize = (size_t)result;
lz4sd->prefixEnd = (BYTE *)dest + result;
} else if (lz4sd->prefixEnd == (BYTE *)dest) {
/* They're rolling the current segment. */
if (lz4sd->prefixSize >= 64 * KB - 1)
- result = LZ4_decompress_safe_withPrefix64k(source, dest,
- compressedSize, maxOutputSize);
+ result = LZ4_decompress_safe_withPrefix64k(
+ source, dest, compressedSize, maxOutputSize);
else if (lz4sd->extDictSize == 0)
- result = LZ4_decompress_safe_withSmallPrefix(source,
- dest, compressedSize, maxOutputSize,
+ result = LZ4_decompress_safe_withSmallPrefix(
+ source, dest, compressedSize, maxOutputSize,
lz4sd->prefixSize);
else
- result = LZ4_decompress_safe_doubleDict(source, dest,
- compressedSize, maxOutputSize,
- lz4sd->prefixSize,
- lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_safe_doubleDict(
+ source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize, lz4sd->externalDict,
+ lz4sd->extDictSize);
if (result <= 0)
return result;
- lz4sd->prefixSize += result;
- lz4sd->prefixEnd += result;
+ lz4sd->prefixSize += (size_t)result;
+ lz4sd->prefixEnd += result;
} else {
- /*
- * The buffer wraps around, or they're
- * switching to another buffer.
- */
+ /* The buffer wraps around, or they're switching to another buffer. */
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_safe_forceExtDict(source, dest,
- compressedSize, maxOutputSize,
+ result = LZ4_decompress_safe_forceExtDict(
+ source, dest, compressedSize, maxOutputSize,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
- lz4sd->prefixSize = result;
- lz4sd->prefixEnd = (BYTE *)dest + result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
}
return result;
}
int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
- const char *source, char *dest, int originalSize)
+ const char *source, char *dest,
+ int originalSize)
{
- LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ LZ4_streamDecode_t_internal *const lz4sd =
+ (assert(LZ4_streamDecode != NULL),
+ &LZ4_streamDecode->internal_donotuse);
int result;
+ DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)",
+ originalSize);
+ assert(originalSize >= 0);
+
if (lz4sd->prefixSize == 0) {
+ DEBUGLOG(5, "first invocation : no prefix nor extDict");
assert(lz4sd->extDictSize == 0);
result = LZ4_decompress_fast(source, dest, originalSize);
if (result <= 0)
return result;
- lz4sd->prefixSize = originalSize;
+ lz4sd->prefixSize = (size_t)originalSize;
lz4sd->prefixEnd = (BYTE *)dest + originalSize;
} else if (lz4sd->prefixEnd == (BYTE *)dest) {
- if (lz4sd->prefixSize >= 64 * KB - 1 ||
- lz4sd->extDictSize == 0)
- result = LZ4_decompress_fast(source, dest,
- originalSize);
- else
- result = LZ4_decompress_fast_doubleDict(source, dest,
- originalSize, lz4sd->prefixSize,
- lz4sd->externalDict, lz4sd->extDictSize);
+ DEBUGLOG(5, "continue using existing prefix");
+ result = LZ4_decompress_unsafe_generic(
+ (const BYTE *)source, (BYTE *)dest, originalSize,
+ lz4sd->prefixSize, lz4sd->externalDict,
+ lz4sd->extDictSize);
if (result <= 0)
return result;
- lz4sd->prefixSize += originalSize;
- lz4sd->prefixEnd += originalSize;
+ lz4sd->prefixSize += (size_t)originalSize;
+ lz4sd->prefixEnd += originalSize;
} else {
+ DEBUGLOG(5, "prefix becomes extDict");
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_fast_extDict(source, dest,
- originalSize, lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_fast_extDict(source, dest, originalSize,
+ lz4sd->externalDict,
+ lz4sd->extDictSize);
if (result <= 0)
return result;
- lz4sd->prefixSize = originalSize;
+ lz4sd->prefixSize = (size_t)originalSize;
lz4sd->prefixEnd = (BYTE *)dest + originalSize;
}
+
return result;
}
@@ -681,28 +1101,31 @@
const char *dictStart, int dictSize)
{
if (dictSize == 0)
- return LZ4_decompress_safe(source, dest,
- compressedSize, maxOutputSize);
- if (dictStart+dictSize == dest) {
+ return LZ4_decompress_safe(source, dest, compressedSize,
+ maxOutputSize);
+ if (dictStart + dictSize == dest) {
if (dictSize >= 64 * KB - 1)
- return LZ4_decompress_safe_withPrefix64k(source, dest,
- compressedSize, maxOutputSize);
- return LZ4_decompress_safe_withSmallPrefix(source, dest,
- compressedSize, maxOutputSize, dictSize);
+ return LZ4_decompress_safe_withPrefix64k(
+ source, dest, compressedSize, maxOutputSize);
+ return LZ4_decompress_safe_withSmallPrefix(
+ source, dest, compressedSize, maxOutputSize, dictSize);
}
- return LZ4_decompress_safe_forceExtDict(source, dest,
- compressedSize, maxOutputSize, dictStart, dictSize);
+ return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize,
+ maxOutputSize, dictStart,
+ dictSize);
}
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
- int originalSize,
- const char *dictStart, int dictSize)
+ int originalSize, const char *dictStart,
+ int dictSize)
{
if (dictSize == 0 || dictStart + dictSize == dest)
- return LZ4_decompress_fast(source, dest, originalSize);
+ return LZ4_decompress_unsafe_generic((const BYTE *)source,
+ (BYTE *)dest, originalSize,
+ (size_t)dictSize, NULL, 0);
return LZ4_decompress_fast_extDict(source, dest, originalSize,
- dictStart, dictSize);
+ dictStart, dictSize);
}
#ifndef STATIC
Index: lib/lz4/lz4defs.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
--- a/lib/lz4/lz4defs.h (revision 550dad3df59e8c39fe93484e6f207d4110619483)
+++ b/lib/lz4/lz4defs.h (revision 30c5a157bf8495708813366f0ba3fc350f8a9657)
@@ -38,7 +38,7 @@
#include
#include
-#include /* memset, memcpy */
+#include /* memset, memcpy */
#define FORCE_INLINE __always_inline
@@ -47,10 +47,10 @@
**************************************/
#include
-typedef uint8_t BYTE;
+typedef uint8_t BYTE;
typedef uint16_t U16;
typedef uint32_t U32;
-typedef int32_t S32;
+typedef int32_t S32;
typedef uint64_t U64;
typedef uintptr_t uptrval;
@@ -69,22 +69,28 @@
#define LZ4_LITTLE_ENDIAN 0
#endif
+#define DEBUGLOG(l, ...) \
+ { \
+ } /* disabled */
+
+#ifndef assert
+#define assert(condition) ((void)0)
+#endif
+
/*-************************************
* Constants
**************************************/
+#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
+#define LZ4_DISTANCE_MAX 65535
#define MINMATCH 4
#define WILDCOPYLENGTH 8
-#define LASTLITERALS 5
-#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
-/*
- * ensure it's possible to write 2 x wildcopyLength
- * without overflowing output buffer
- */
-#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
-
-/* Increase this value ==> compression run slower on incompressible data */
-#define LZ4_SKIPTRIGGER 6
+#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MATCH_SAFEGUARD_DISTANCE \
+ ((2 * WILDCOPYLENGTH) - \
+ MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
+#define FASTLOOP_SAFE_DISTANCE 64
#define HASH_UNIT sizeof(size_t)
@@ -92,12 +98,18 @@
#define MB (1 << 20)
#define GB (1U << 30)
+#if defined(__x86_64__)
+typedef U64 reg_t; /* 64-bits in x32 mode */
+#else
+typedef size_t reg_t; /* 32-bits in x32 mode */
+#endif
+
#define MAXD_LOG 16
#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
#define STEPSIZE sizeof(size_t)
-#define ML_BITS 4
-#define ML_MASK ((1U << ML_BITS) - 1)
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
#define RUN_BITS (8 - ML_BITS)
#define RUN_MASK ((1U << RUN_BITS) - 1)
@@ -150,40 +162,22 @@
#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
-static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
-{
-#if LZ4_ARCH64
- U64 a = get_unaligned((const U64 *)src);
-
- put_unaligned(a, (U64 *)dst);
-#else
- U32 a = get_unaligned((const U32 *)src);
- U32 b = get_unaligned((const U32 *)src + 1);
-
- put_unaligned(a, (U32 *)dst);
- put_unaligned(b, (U32 *)dst + 1);
-#endif
-}
-
-/*
- * customized variant of memcpy,
- * which can overwrite up to 7 bytes beyond dstEnd
- */
-static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
- const void *srcPtr, void *dstEnd)
+/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
+static FORCE_INLINE void LZ4_wildCopy8(void *dstPtr, const void *srcPtr,
+ void *dstEnd)
{
BYTE *d = (BYTE *)dstPtr;
const BYTE *s = (const BYTE *)srcPtr;
BYTE *const e = (BYTE *)dstEnd;
do {
- LZ4_copy8(d, s);
+ LZ4_memcpy(d, s, 8);
d += 8;
s += 8;
} while (d < e);
}
-static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
+static FORCE_INLINE unsigned int LZ4_NbCommonBytes(reg_t val)
{
#if LZ4_LITTLE_ENDIAN
return __ffs(val) >> 3;
@@ -192,56 +186,64 @@
#endif
}
-static FORCE_INLINE unsigned int LZ4_count(
- const BYTE *pIn,
- const BYTE *pMatch,
- const BYTE *pInLimit)
+static FORCE_INLINE unsigned int LZ4_count(const BYTE *pIn, const BYTE *pMatch,
+ const BYTE *pInLimit)
{
const BYTE *const pStart = pIn;
+ if (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) {
+ pIn += STEPSIZE;
+ pMatch += STEPSIZE;
+ } else {
+ return LZ4_NbCommonBytes(diff);
+ }
+ }
+
while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
- size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
-
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
if (!diff) {
pIn += STEPSIZE;
pMatch += STEPSIZE;
continue;
}
-
pIn += LZ4_NbCommonBytes(diff);
-
- return (unsigned int)(pIn - pStart);
+ return (unsigned)(pIn - pStart);
}
-#if LZ4_ARCH64
- if ((pIn < (pInLimit - 3))
- && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
+ if ((STEPSIZE == 8) && (pIn < (pInLimit - 3)) &&
+ (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
pIn += 4;
pMatch += 4;
}
-#endif
-
- if ((pIn < (pInLimit - 1))
- && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
+ if ((pIn < (pInLimit - 1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
pIn += 2;
pMatch += 2;
}
-
if ((pIn < pInLimit) && (*pMatch == *pIn))
pIn++;
-
- return (unsigned int)(pIn - pStart);
+ return (unsigned)(pIn - pStart);
}
-typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
-typedef enum { byPtr, byU32, byU16 } tableType_t;
+typedef enum {
+ notLimited = 0,
+ limitedOutput = 1,
+ fillOutput = 2
+} limitedOutput_directive;
+typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
-typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum {
+ noDict = 0,
+ withPrefix64k,
+ usingExtDict,
+ usingDictCtx
+} dict_directive;
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
-#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
+#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
-#endif
+#endif
\ No newline at end of file
Index: lib/lz4/lz4hc_compress.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
--- a/lib/lz4/lz4hc_compress.c (revision 550dad3df59e8c39fe93484e6f207d4110619483)
+++ b/lib/lz4/lz4hc_compress.c (revision 30c5a157bf8495708813366f0ba3fc350f8a9657)
@@ -293,7 +293,7 @@
*token = (BYTE)(length<UTF-8
===================================================================
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
--- a/fs/f2fs/Makefile (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
+++ b/fs/f2fs/Makefile (revision b2497e4243461a835c25469028cd355bfc2e993f)
@@ -10,6 +10,3 @@
f2fs-$(CONFIG_FS_VERITY) += verity.o
f2fs-$(CONFIG_F2FS_FS_COMPRESSION) += compress.o
f2fs-$(CONFIG_F2FS_IOSTAT) += iostat.o
-ifeq ($(CONFIG_F2FS_FS_COMPRESSION_FIXED_OUTPUT),y)
-f2fs-$(CONFIG_ARM64) += $(addprefix lz4armv8/, lz4accel.o lz4armv8.o)
-endif
Index: fs/f2fs/compress.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
--- a/fs/f2fs/compress.c (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
+++ b/fs/f2fs/compress.c (revision b2497e4243461a835c25469028cd355bfc2e993f)
@@ -20,7 +20,6 @@
#include "segment.h"
#include
#if defined(CONFIG_F2FS_FS_COMPRESSION_FIXED_OUTPUT) || defined(__ARCH_HAS_LZ4_ACCELERATOR)
-#include "lz4armv8/lz4accel.h"
#include "f2fs_lz4.h"
#endif
Index: include/linux/lz4.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
--- a/include/linux/lz4.h (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
+++ b/include/linux/lz4.h (revision b2497e4243461a835c25469028cd355bfc2e993f)
@@ -1,648 +1,17 @@
-/* LZ4 Kernel Interface
- *
- * Copyright (C) 2013, LG Electronics, Kyungsik Lee
- * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This file is based on the original header file
- * for LZ4 - Fast LZ compression algorithm.
- *
- * LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011-2016, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * You can contact the author at :
- * - LZ4 homepage : http://www.lz4.org
- * - LZ4 source repository : https://github.com/lz4/lz4
- */
-
-#ifndef __LZ4_H__
-#define __LZ4_H__
-
-#include
-#include /* memset, memcpy */
-
-/*-************************************************************************
- * CONSTANTS
- **************************************************************************/
-/*
- * LZ4_MEMORY_USAGE :
- * Memory usage formula : N->2^N Bytes
- * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
- * Increasing memory usage improves compression ratio
- * Reduced memory usage can improve speed, due to cache effect
- * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
- */
-#define LZ4_MEMORY_USAGE 14
-
-#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
-#define LZ4_COMPRESSBOUND(isize) (\
- (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \
- ? 0 \
- : (isize) + ((isize)/255) + 16)
-
-#define LZ4_ACCELERATION_DEFAULT 1
-#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
-#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
-#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG)
-
-#define LZ4HC_MIN_CLEVEL 3
-#define LZ4HC_DEFAULT_CLEVEL 9
-#define LZ4HC_MAX_CLEVEL 16
-
-#define LZ4HC_DICTIONARY_LOGSIZE 16
-#define LZ4HC_MAXD (1<= LZ4_compressBound(inputSize).
- * It also runs faster, so it's a recommended setting.
- * If the function cannot compress 'source' into a more limited 'dest' budget,
- * compression stops *immediately*, and the function result is zero.
- * As a consequence, 'dest' content is not valid.
- *
- * Return: Number of bytes written into buffer 'dest'
- * (necessarily <= maxOutputSize) or 0 if compression fails
- */
-int LZ4_compress_default(const char *source, char *dest, int inputSize,
- int maxOutputSize, void *wrkmem);
-
-/**
- * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param
- * @source: source address of the original data
- * @dest: output buffer address of the compressed data
- * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
- * @maxOutputSize: full or partial size of buffer 'dest'
- * which must be already allocated
- * @acceleration: acceleration factor
- * @wrkmem: address of the working memory.
- * This requires 'workmem' of LZ4_MEM_COMPRESS.
- *
- * Same as LZ4_compress_default(), but allows to select an "acceleration"
- * factor. The larger the acceleration value, the faster the algorithm,
- * but also the lesser the compression. It's a trade-off. It can be fine tuned,
- * with each successive value providing roughly +~3% to speed.
- * An acceleration value of "1" is the same as regular LZ4_compress_default()
- * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1.
- *
- * Return: Number of bytes written into buffer 'dest'
- * (necessarily <= maxOutputSize) or 0 if compression fails
- */
-int LZ4_compress_fast(const char *source, char *dest, int inputSize,
- int maxOutputSize, int acceleration, void *wrkmem);
-
-/**
- * LZ4_compress_destSize() - Compress as much data as possible
- * from source to dest
- * @source: source address of the original data
- * @dest: output buffer address of the compressed data
- * @sourceSizePtr: will be modified to indicate how many bytes where read
- * from 'source' to fill 'dest'. New value is necessarily <= old value.
- * @targetDestSize: Size of buffer 'dest' which must be already allocated
- * @wrkmem: address of the working memory.
- * This requires 'workmem' of LZ4_MEM_COMPRESS.
- *
- * Reverse the logic, by compressing as much data as possible
- * from 'source' buffer into already allocated buffer 'dest'
- * of size 'targetDestSize'.
- * This function either compresses the entire 'source' content into 'dest'
- * if it's large enough, or fill 'dest' buffer completely with as much data as
- * possible from 'source'.
- *
- * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize)
- * or 0 if compression fails
- */
-int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr,
- int targetDestSize, void *wrkmem);
-
-/*-************************************************************************
- * Decompression Functions
- **************************************************************************/
-
-/**
- * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest'
- * @source: source address of the compressed data
- * @dest: output buffer address of the uncompressed data
- * which must be already allocated with 'originalSize' bytes
- * @originalSize: is the original and therefore uncompressed size
- *
- * Decompresses data from 'source' into 'dest'.
- * This function fully respect memory boundaries for properly formed
- * compressed data.
- * It is a bit faster than LZ4_decompress_safe().
- * However, it does not provide any protection against intentionally
- * modified data stream (malicious input).
- * Use this function in trusted environment only
- * (data to decode comes from a trusted source).
- *
- * Return: number of bytes read from the source buffer
- * or a negative result if decompression fails.
- */
-int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
-
-/**
- * LZ4_decompress_safe() - Decompression protected against buffer overflow
- * @source: source address of the compressed data
- * @dest: output buffer address of the uncompressed data
- * which must be already allocated
- * @compressedSize: is the precise full size of the compressed block
- * @maxDecompressedSize: is the size of 'dest' buffer
- *
- * Decompresses data from 'source' into 'dest'.
- * If the source stream is detected malformed, the function will
- * stop decoding and return a negative result.
- * This function is protected against buffer overflow exploits,
- * including malicious data packets. It never writes outside output buffer,
- * nor reads outside input buffer.
- *
- * Return: number of bytes decompressed into destination buffer
- * (necessarily <= maxDecompressedSize)
- * or a negative result in case of error
- */
-int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
- int maxDecompressedSize);
-
-/**
- * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize'
- * at position 'source' into buffer 'dest'
- * @source: source address of the compressed data
- * @dest: output buffer address of the decompressed data which must be
- * already allocated
- * @compressedSize: is the precise full size of the compressed block.
- * @targetOutputSize: the decompression operation will try
- * to stop as soon as 'targetOutputSize' has been reached
- * @maxDecompressedSize: is the size of destination buffer
- *
- * This function decompresses a compressed block of size 'compressedSize'
- * at position 'source' into destination buffer 'dest'
- * of size 'maxDecompressedSize'.
- * The function tries to stop decompressing operation as soon as
- * 'targetOutputSize' has been reached, reducing decompression time.
- * This function never writes outside of output buffer,
- * and never reads outside of input buffer.
- * It is therefore protected against malicious data packets.
- *
- * Return: the number of bytes decoded in the destination buffer
- * (necessarily <= maxDecompressedSize)
- * or a negative result in case of error
- *
- */
-int LZ4_decompress_safe_partial(const char *source, char *dest,
- int compressedSize, int targetOutputSize, int maxDecompressedSize);
-
-/*-************************************************************************
- * LZ4 HC Compression
- **************************************************************************/
-
-/**
- * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm
- * @src: source address of the original data
- * @dst: output buffer address of the compressed data
- * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
- * @dstCapacity: full or partial size of buffer 'dst',
- * which must be already allocated
- * @compressionLevel: Recommended values are between 4 and 9, although any
- * value between 1 and LZ4HC_MAX_CLEVEL will work.
- * Values >LZ4HC_MAX_CLEVEL behave the same as 16.
- * @wrkmem: address of the working memory.
- * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS.
- *
- * Compress data from 'src' into 'dst', using the more powerful
- * but slower "HC" algorithm. Compression is guaranteed to succeed if
- * `dstCapacity >= LZ4_compressBound(srcSize)
- *
- * Return : the number of bytes written into 'dst' or 0 if compression fails.
- */
-int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity,
- int compressionLevel, void *wrkmem);
-
-/**
- * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure
- * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure
- * @compressionLevel: Recommended values are between 4 and 9, although any
- * value between 1 and LZ4HC_MAX_CLEVEL will work.
- * Values >LZ4HC_MAX_CLEVEL behave the same as 16.
- *
- * An LZ4_streamHC_t structure can be allocated once
- * and re-used multiple times.
- * Use this function to init an allocated `LZ4_streamHC_t` structure
- * and start a new compression.
- */
-void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel);
-
-/**
- * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC
- * @streamHCPtr: pointer to the LZ4HC_stream_t
- * @dictionary: dictionary to load
- * @dictSize: size of dictionary
- *
- * Use this function to load a static dictionary into LZ4HC_stream.
- * Any previous data will be forgotten, only 'dictionary'
- * will remain in memory.
- * Loading a size of 0 is allowed.
- *
- * Return : dictionary size, in bytes (necessarily <= 64 KB)
- */
-int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary,
- int dictSize);
-
-/**
- * LZ4_compress_HC_continue() - Compress 'src' using data from previously
- * compressed blocks as a dictionary using the HC algorithm
- * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure
- * @src: source address of the original data
- * @dst: output buffer address of the compressed data,
- * which must be already allocated
- * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
- * @maxDstSize: full or partial size of buffer 'dest'
- * which must be already allocated
- *
- * These functions compress data in successive blocks of any size, using
- * previous blocks as dictionary. One key assumption is that previous
- * blocks (up to 64 KB) remain read-accessible while
- * compressing next blocks. There is an exception for ring buffers,
- * which can be smaller than 64 KB.
- * Ring buffers scenario is automatically detected and handled by
- * LZ4_compress_HC_continue().
- * Before starting compression, state must be properly initialized,
- * using LZ4_resetStreamHC().
- * A first "fictional block" can then be designated as
- * initial dictionary, using LZ4_loadDictHC() (Optional).
- * Then, use LZ4_compress_HC_continue()
- * to compress each successive block. Previous memory blocks
- * (including initial dictionary when present) must remain accessible
- * and unmodified during compression.
- * 'dst' buffer should be sized to handle worst case scenarios, using
- * LZ4_compressBound(), to ensure operation success.
- * If, for any reason, previous data blocks can't be preserved unmodified
- * in memory during next compression block,
- * you must save it to a safer memory space, using LZ4_saveDictHC().
- * Return value of LZ4_saveDictHC() is the size of dictionary
- * effectively saved into 'safeBuffer'.
- *
- * Return: Number of bytes written into buffer 'dst' or 0 if compression fails
- */
-int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src,
- char *dst, int srcSize, int maxDstSize);
-
-/**
- * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream
- * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure
- * @safeBuffer: buffer to save dictionary to, must be already allocated
- * @maxDictSize: size of 'safeBuffer'
- *
- * If previously compressed data block is not guaranteed
- * to remain available at its memory location,
- * save it into a safer place (char *safeBuffer).
- * Note : you don't need to call LZ4_loadDictHC() afterwards,
- * dictionary is immediately usable, you can therefore call
- * LZ4_compress_HC_continue().
- *
- * Return : saved dictionary size in bytes (necessarily <= maxDictSize),
- * or 0 if error.
- */
-int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer,
- int maxDictSize);
-
-/*-*********************************************
- * Streaming Compression Functions
- ***********************************************/
-
-/**
- * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure
- * @LZ4_stream: pointer to the 'LZ4_stream_t' structure
- *
- * An LZ4_stream_t structure can be allocated once
- * and re-used multiple times.
- * Use this function to init an allocated `LZ4_stream_t` structure
- * and start a new compression.
- */
-void LZ4_resetStream(LZ4_stream_t *LZ4_stream);
-
-/**
- * LZ4_loadDict() - Load a static dictionary into LZ4_stream
- * @streamPtr: pointer to the LZ4_stream_t
- * @dictionary: dictionary to load
- * @dictSize: size of dictionary
- *
- * Use this function to load a static dictionary into LZ4_stream.
- * Any previous data will be forgotten, only 'dictionary'
- * will remain in memory.
- * Loading a size of 0 is allowed.
- *
- * Return : dictionary size, in bytes (necessarily <= 64 KB)
- */
-int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary,
- int dictSize);
-
-/**
- * LZ4_saveDict() - Save static dictionary from LZ4_stream
- * @streamPtr: pointer to the 'LZ4_stream_t' structure
- * @safeBuffer: buffer to save dictionary to, must be already allocated
- * @dictSize: size of 'safeBuffer'
- *
- * If previously compressed data block is not guaranteed
- * to remain available at its memory location,
- * save it into a safer place (char *safeBuffer).
- * Note : you don't need to call LZ4_loadDict() afterwards,
- * dictionary is immediately usable, you can therefore call
- * LZ4_compress_fast_continue().
- *
- * Return : saved dictionary size in bytes (necessarily <= dictSize),
- * or 0 if error.
- */
-int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize);
-
-/**
- * LZ4_compress_fast_continue() - Compress 'src' using data from previously
- * compressed blocks as a dictionary
- * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure
- * @src: source address of the original data
- * @dst: output buffer address of the compressed data,
- * which must be already allocated
- * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
- * @maxDstSize: full or partial size of buffer 'dest'
- * which must be already allocated
- * @acceleration: acceleration factor
- *
- * Compress buffer content 'src', using data from previously compressed blocks
- * as dictionary to improve compression ratio.
- * Important : Previous data blocks are assumed to still
- * be present and unmodified !
- * If maxDstSize >= LZ4_compressBound(srcSize),
- * compression is guaranteed to succeed, and runs faster.
- *
- * Return: Number of bytes written into buffer 'dst' or 0 if compression fails
- */
-int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src,
- char *dst, int srcSize, int maxDstSize, int acceleration);
-
-/**
- * LZ4_setStreamDecode() - Instruct where to find dictionary
- * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
- * @dictionary: dictionary to use
- * @dictSize: size of dictionary
- *
- * Use this function to instruct where to find the dictionary.
- * Setting a size of 0 is allowed (same effect as reset).
- *
- * Return: 1 if OK, 0 if error
- */
-int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
- const char *dictionary, int dictSize);
-
-/**
- * LZ4_decompress_safe_continue() - Decompress blocks in streaming mode
- * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
- * @source: source address of the compressed data
- * @dest: output buffer address of the uncompressed data
- * which must be already allocated
- * @compressedSize: is the precise full size of the compressed block
- * @maxDecompressedSize: is the size of 'dest' buffer
- *
- * This decoding function allows decompression of multiple blocks
- * in "streaming" mode.
- * Previously decoded blocks *must* remain available at the memory position
- * where they were decoded (up to 64 KB)
- * In the case of a ring buffers, decoding buffer must be either :
- * - Exactly same size as encoding buffer, with same update rule
- * (block boundaries at same positions) In which case,
- * the decoding & encoding ring buffer can have any size,
- * including very small ones ( < 64 KB).
- * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
- * maxBlockSize is implementation dependent.
- * It's the maximum size you intend to compress into a single block.
- * In which case, encoding and decoding buffers do not need
- * to be synchronized, and encoding ring buffer can have any size,
- * including small ones ( < 64 KB).
- * - _At least_ 64 KB + 8 bytes + maxBlockSize.
- * In which case, encoding and decoding buffers do not need to be
- * synchronized, and encoding ring buffer can have any size,
- * including larger than decoding buffer. W
- * Whenever these conditions are not possible, save the last 64KB of decoded
- * data into a safe buffer, and indicate where it is saved
- * using LZ4_setStreamDecode()
- *
- * Return: number of bytes decompressed into destination buffer
- * (necessarily <= maxDecompressedSize)
- * or a negative result in case of error
- */
-int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
- const char *source, char *dest, int compressedSize,
- int maxDecompressedSize);
-
-/**
- * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
- * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
- * @source: source address of the compressed data
- * @dest: output buffer address of the uncompressed data
- * which must be already allocated with 'originalSize' bytes
- * @originalSize: is the original and therefore uncompressed size
- *
- * This decoding function allows decompression of multiple blocks
- * in "streaming" mode.
- * Previously decoded blocks *must* remain available at the memory position
- * where they were decoded (up to 64 KB)
- * In the case of a ring buffers, decoding buffer must be either :
- * - Exactly same size as encoding buffer, with same update rule
- * (block boundaries at same positions) In which case,
- * the decoding & encoding ring buffer can have any size,
- * including very small ones ( < 64 KB).
- * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
- * maxBlockSize is implementation dependent.
- * It's the maximum size you intend to compress into a single block.
- * In which case, encoding and decoding buffers do not need
- * to be synchronized, and encoding ring buffer can have any size,
- * including small ones ( < 64 KB).
- * - _At least_ 64 KB + 8 bytes + maxBlockSize.
- * In which case, encoding and decoding buffers do not need to be
- * synchronized, and encoding ring buffer can have any size,
- * including larger than decoding buffer. W
- * Whenever these conditions are not possible, save the last 64KB of decoded
- * data into a safe buffer, and indicate where it is saved
- * using LZ4_setStreamDecode()
- *
- * Return: number of bytes decompressed into destination buffer
- * (necessarily <= maxDecompressedSize)
- * or a negative result in case of error
- */
-int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
- const char *source, char *dest, int originalSize);
-
-/**
- * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode()
- * followed by LZ4_decompress_safe_continue()
- * @source: source address of the compressed data
- * @dest: output buffer address of the uncompressed data
- * which must be already allocated
- * @compressedSize: is the precise full size of the compressed block
- * @maxDecompressedSize: is the size of 'dest' buffer
- * @dictStart: pointer to the start of the dictionary in memory
- * @dictSize: size of dictionary
- *
- * This decoding function works the same as
- * a combination of LZ4_setStreamDecode() followed by
- * LZ4_decompress_safe_continue()
- * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
- *
- * Return: number of bytes decompressed into destination buffer
- * (necessarily <= maxDecompressedSize)
- * or a negative result in case of error
- */
-int LZ4_decompress_safe_usingDict(const char *source, char *dest,
- int compressedSize, int maxDecompressedSize, const char *dictStart,
- int dictSize);
-
-/**
- * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode()
- * followed by LZ4_decompress_fast_continue()
- * @source: source address of the compressed data
- * @dest: output buffer address of the uncompressed data
- * which must be already allocated with 'originalSize' bytes
- * @originalSize: is the original and therefore uncompressed size
- * @dictStart: pointer to the start of the dictionary in memory
- * @dictSize: size of dictionary
- *
- * This decoding function works the same as
- * a combination of LZ4_setStreamDecode() followed by
- * LZ4_decompress_fast_continue()
- * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
- *
- * Return: number of bytes decompressed into destination buffer
- * (necessarily <= maxDecompressedSize)
- * or a negative result in case of error
- */
-int LZ4_decompress_fast_usingDict(const char *source, char *dest,
- int originalSize, const char *dictStart, int dictSize);
+#define LZ4HC_MIN_CLEVEL LZ4HC_CLEVEL_MIN
+#define LZ4HC_DEFAULT_CLEVEL LZ4HC_CLEVEL_DEFAULT
+#define LZ4HC_MAX_CLEVEL LZ4HC_CLEVEL_MAX
#endif
Index: lib/lz4/Makefile
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile
--- a/lib/lz4/Makefile (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
+++ b/lib/lz4/Makefile (revision b2497e4243461a835c25469028cd355bfc2e993f)
@@ -1,6 +1,7 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y += -O3
+ccflags-y += -O3 \
+ -DLZ4_FREESTANDING=1 \
+ -DLZ4_FAST_DEC_LOOP=1
-obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o
-obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o
-obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o
+obj-y += lz4.o lz4hc.o
+
+obj-$(CONFIG_ARM64) += $(addprefix lz4armv8/, lz4accel.o lz4armv8.o)
Index: lib/lz4/lz4.c
===================================================================
diff --git a/lib/lz4/lz4.c b/lib/lz4/lz4.c
new file mode 100644
--- /dev/null (revision b2497e4243461a835c25469028cd355bfc2e993f)
+++ b/lib/lz4/lz4.c (revision b2497e4243461a835c25469028cd355bfc2e993f)
@@ -0,0 +1,3484 @@
+/*
+ LZ4 - Fast LZ compression algorithm
+ Copyright (C) 2011-2023, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
+*/
+
+/*-************************************
+* Tuning parameters
+**************************************/
+/*
+ * LZ4_HEAPMODE :
+ * Select how stateless compression functions like `LZ4_compress_default()`
+ * allocate memory for their hash table,
+ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ */
+#ifndef LZ4_HEAPMODE
+#define LZ4_HEAPMODE 1
+#endif
+
+/*-************************************
+* CPU Feature Detection
+**************************************/
+/* LZ4_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets which assembly generation depends on alignment.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
+#if defined(__GNUC__) && \
+ (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__))
+#define LZ4_FORCE_MEMORY_ACCESS 2
+#elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || \
+ defined(_MSC_VER)
+#define LZ4_FORCE_MEMORY_ACCESS 1
+#endif
+#endif
+
+/*
+ * LZ4_FORCE_SW_BITCOUNT
+ * Define this parameter if your target system or compiler does not support hardware bit count
+ */
+#if defined(_MSC_VER) && \
+ defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
+#undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
+#define LZ4_FORCE_SW_BITCOUNT
+#endif
+
+/*-************************************
+* Dependency
+**************************************/
+/*
+ * LZ4_SRC_INCLUDED:
+ * Amalgamation flag, whether lz4.c is included
+ */
+#ifndef LZ4_SRC_INCLUDED
+#define LZ4_SRC_INCLUDED 1
+#endif
+
+#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
+#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
+#endif
+
+#ifndef LZ4_STATIC_LINKING_ONLY
+#define LZ4_STATIC_LINKING_ONLY
+#endif
+#include "lz4.h"
+/* see also "memory routines" below */
+
+/*-************************************
+* Compiler Options
+**************************************/
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
+#include /* only present in VS2005+ */
+#pragma warning( \
+ disable : 4127) /* disable: C4127: conditional expression is constant */
+#pragma warning( \
+ disable : 6237) /* disable: C6237: conditional expression is always 0 */
+#pragma warning( \
+ disable : 6239) /* disable: C6239: ( && ) always evaluates to the result of */
+#pragma warning( \
+ disable : 6240) /* disable: C6240: ( && ) always evaluates to the result of */
+#pragma warning( \
+ disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */
+#endif /* _MSC_VER */
+
+#ifndef LZ4_FORCE_INLINE
+#if defined(_MSC_VER) && !defined(__clang__) /* MSVC */
+#define LZ4_FORCE_INLINE static __forceinline
+#else
+#if defined(__cplusplus) || \
+ defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+#if defined(__GNUC__) || defined(__clang__)
+#define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
+#else
+#define LZ4_FORCE_INLINE static inline
+#endif
+#else
+#define LZ4_FORCE_INLINE static
+#endif /* __STDC_VERSION__ */
+#endif /* _MSC_VER */
+#endif /* LZ4_FORCE_INLINE */
+
+/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
+ * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
+ * together with a simple 8-byte copy loop as a fall-back path.
+ * However, this optimization hurts the decompression speed by >30%,
+ * because the execution does not go to the optimized loop
+ * for typical compressible data, and all of the preamble checks
+ * before going to the fall-back path become useless overhead.
+ * This optimization happens only with the -O3 flag, and -O2 generates
+ * a simple 8-byte copy loop.
+ * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
+ * functions are annotated with __attribute__((optimize("O2"))),
+ * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
+ * of LZ4_wildCopy8 does not affect the compression speed.
+ */
+#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && \
+ !defined(__clang__)
+#define LZ4_FORCE_O2 __attribute__((optimize("O2")))
+#undef LZ4_FORCE_INLINE
+#define LZ4_FORCE_INLINE \
+ static __inline __attribute__((optimize("O2"), always_inline))
+#else
+#define LZ4_FORCE_O2
+#endif
+
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) || \
+ (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \
+ defined(__clang__)
+#define expect(expr, value) (__builtin_expect((expr), (value)))
+#else
+#define expect(expr, value) (expr)
+#endif
+
+/* Should the alignment test prove unreliable, for some reason,
+ * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
+#ifndef LZ4_ALIGN_TEST /* can be externally provided */
+#define LZ4_ALIGN_TEST 1
+#endif
+
+/*-************************************
+* Memory routines
+**************************************/
+
+#if !LZ4_FREESTANDING
+#include /* memset, memcpy */
+#endif
+#if !defined(LZ4_memset)
+#define LZ4_memset(p, v, s) memset((p), (v), (s))
+#endif
+#define MEM_INIT(p, v, s) LZ4_memset((p), (v), (s))
+
+/*-************************************
+* Common Constants
+**************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MATCH_SAFEGUARD_DISTANCE \
+ ((2 * WILDCOPYLENGTH) - \
+ MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
+#define FASTLOOP_SAFE_DISTANCE 64
+static const int LZ4_minLength = (MFLIMIT + 1);
+
+#define KB *(1 << 10)
+#define MB *(1 << 20)
+#define GB *(1U << 30)
+
+#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
+#if (LZ4_DISTANCE_MAX > \
+ LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
+#error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
+#endif
+
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
+#define RUN_BITS (8 - ML_BITS)
+#define RUN_MASK ((1U << RUN_BITS) - 1)
+
+/*-************************************
+* Error detection
+**************************************/
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 1)
+#include
+#else
+#ifndef assert
+#define assert(condition) ((void)0)
+#endif
+#endif
+
+#define LZ4_STATIC_ASSERT(c) \
+ { \
+ enum { LZ4_static_assert = 1 / (int)(!!(c)) }; \
+ } /* use after variable declarations */
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2)
+#include
+static int g_debuglog_enable = 1;
+#define DEBUGLOG(l, ...) \
+ { \
+ if ((g_debuglog_enable) && (l <= LZ4_DEBUG)) { \
+ fprintf(stderr, __FILE__ " %i: ", __LINE__); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } \
+ }
+#else
+#define DEBUGLOG(l, ...) \
+ { \
+ } /* disabled */
+#endif
+
+static int LZ4_isAligned(const void *ptr, size_t alignment)
+{
+ return ((size_t)ptr & (alignment - 1)) == 0;
+}
+
+/*-************************************
+* Types
+**************************************/
+#include
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef uintptr_t uptrval;
+
+#if defined(__x86_64__)
+typedef U64 reg_t; /* 64-bits in x32 mode */
+#else
+typedef size_t reg_t; /* 32-bits in x32 mode */
+#endif
+
+typedef enum {
+ notLimited = 0,
+ limitedOutput = 1,
+ fillOutput = 2
+} limitedOutput_directive;
+
+static unsigned LZ4_isLittleEndian(void)
+{
+ const union {
+ U32 u;
+ BYTE c[4];
+ } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#define LZ4_PACK(__Declaration__) __Declaration__ __attribute__((__packed__))
+#elif defined(_MSC_VER)
+#define LZ4_PACK(__Declaration__) \
+ __pragma(pack(push, 1)) __Declaration__ __pragma(pack(pop))
+#endif
+
+#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 2)
+/* lie to the compiler about data alignment; use with caution */
+
+static U16 LZ4_read16(const void *memPtr)
+{
+ return *(const U16 *)memPtr;
+}
+static U32 LZ4_read32(const void *memPtr)
+{
+ return *(const U32 *)memPtr;
+}
+static reg_t LZ4_read_ARCH(const void *memPtr)
+{
+ return *(const reg_t *)memPtr;
+}
+
+static void LZ4_write16(void *memPtr, U16 value)
+{
+ *(U16 *)memPtr = value;
+}
+static void LZ4_write32(void *memPtr, U32 value)
+{
+ *(U32 *)memPtr = value;
+}
+
+#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;
+LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;
+LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;
+
+static U16 LZ4_read16(const void *ptr)
+{
+ return ((const LZ4_unalign16 *)ptr)->u16;
+}
+static U32 LZ4_read32(const void *ptr)
+{
+ return ((const LZ4_unalign32 *)ptr)->u32;
+}
+static reg_t LZ4_read_ARCH(const void *ptr)
+{
+ return ((const LZ4_unalignST *)ptr)->uArch;
+}
+
+static void LZ4_write16(void *memPtr, U16 value)
+{
+ ((LZ4_unalign16 *)memPtr)->u16 = value;
+}
+static void LZ4_write32(void *memPtr, U32 value)
+{
+ ((LZ4_unalign32 *)memPtr)->u32 = value;
+}
+
+#else /* safe and portable access using memcpy() */
+
+static U16 LZ4_read16(const void *memPtr)
+{
+ U16 val;
+ LZ4_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+static U32 LZ4_read32(const void *memPtr)
+{
+ U32 val;
+ LZ4_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+static reg_t LZ4_read_ARCH(const void *memPtr)
+{
+ reg_t val;
+ LZ4_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+static void LZ4_write16(void *memPtr, U16 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+static void LZ4_write32(void *memPtr, U32 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* LZ4_FORCE_MEMORY_ACCESS */
+
+static U16 LZ4_readLE16(const void *memPtr)
+{
+ if (LZ4_isLittleEndian()) {
+ return LZ4_read16(memPtr);
+ } else {
+ const BYTE *p = (const BYTE *)memPtr;
+ return (U16)((U16)p[0] | (p[1] << 8));
+ }
+}
+
+#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
+static U32 LZ4_readLE32(const void *memPtr)
+{
+ if (LZ4_isLittleEndian()) {
+ return LZ4_read32(memPtr);
+ } else {
+ const BYTE *p = (const BYTE *)memPtr;
+ return (U32)p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
+ }
+}
+#endif
+
+static void LZ4_writeLE16(void *memPtr, U16 value)
+{
+ if (LZ4_isLittleEndian()) {
+ LZ4_write16(memPtr, value);
+ } else {
+ BYTE *p = (BYTE *)memPtr;
+ p[0] = (BYTE)value;
+ p[1] = (BYTE)(value >> 8);
+ }
+}
+
+/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
+LZ4_FORCE_INLINE
+void LZ4_wildCopy8(void *dstPtr, const void *srcPtr, void *dstEnd)
+{
+ BYTE *d = (BYTE *)dstPtr;
+ const BYTE *s = (const BYTE *)srcPtr;
+ BYTE *const e = (BYTE *)dstEnd;
+
+ do {
+ LZ4_memcpy(d, s, 8);
+ d += 8;
+ s += 8;
+ } while (d < e);
+}
+
+static const unsigned inc32table[8] = { 0, 1, 2, 1, 0, 4, 4, 4 };
+static const int dec64table[8] = { 0, 0, 0, -1, -4, 1, 2, 3 };
+
+#ifndef LZ4_FAST_DEC_LOOP
+#if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
+#define LZ4_FAST_DEC_LOOP 1
+#elif defined(__aarch64__) && defined(__APPLE__)
+#define LZ4_FAST_DEC_LOOP 1
+#elif defined(__aarch64__) && !defined(__clang__)
+/* On non-Apple aarch64, we disable this optimization for clang because
+ * on certain mobile chipsets, performance is reduced with clang. For
+ * more information refer to https://github.com/lz4/lz4/pull/707 */
+#define LZ4_FAST_DEC_LOOP 1
+#else
+#define LZ4_FAST_DEC_LOOP 0
+#endif
+#endif
+
+#if LZ4_FAST_DEC_LOOP
+
+LZ4_FORCE_INLINE void LZ4_memcpy_using_offset_base(BYTE *dstPtr,
+ const BYTE *srcPtr,
+ BYTE *dstEnd,
+ const size_t offset)
+{
+ assert(srcPtr + offset == dstPtr);
+ if (offset < 8) {
+ LZ4_write32(dstPtr,
+ 0); /* silence an msan warning when offset==0 */
+ dstPtr[0] = srcPtr[0];
+ dstPtr[1] = srcPtr[1];
+ dstPtr[2] = srcPtr[2];
+ dstPtr[3] = srcPtr[3];
+ srcPtr += inc32table[offset];
+ LZ4_memcpy(dstPtr + 4, srcPtr, 4);
+ srcPtr -= dec64table[offset];
+ dstPtr += 8;
+ } else {
+ LZ4_memcpy(dstPtr, srcPtr, 8);
+ dstPtr += 8;
+ srcPtr += 8;
+ }
+
+ LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
+}
+
+/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
+ * this version copies two times 16 bytes (instead of one time 32 bytes)
+ * because it must be compatible with offsets >= 16. */
+LZ4_FORCE_INLINE void LZ4_wildCopy32(void *dstPtr, const void *srcPtr,
+ void *dstEnd)
+{
+ BYTE *d = (BYTE *)dstPtr;
+ const BYTE *s = (const BYTE *)srcPtr;
+ BYTE *const e = (BYTE *)dstEnd;
+
+ do {
+ LZ4_memcpy(d, s, 16);
+ LZ4_memcpy(d + 16, s + 16, 16);
+ d += 32;
+ s += 32;
+ } while (d < e);
+}
+
+/* LZ4_memcpy_using_offset() presumes :
+ * - dstEnd >= dstPtr + MINMATCH
+ * - there is at least 12 bytes available to write after dstEnd */
+LZ4_FORCE_INLINE void LZ4_memcpy_using_offset(BYTE *dstPtr, const BYTE *srcPtr,
+ BYTE *dstEnd, const size_t offset)
+{
+ BYTE v[8];
+
+ assert(dstEnd >= dstPtr + MINMATCH);
+
+ switch (offset) {
+ case 1:
+ MEM_INIT(v, *srcPtr, 8);
+ break;
+ case 2:
+ LZ4_memcpy(v, srcPtr, 2);
+ LZ4_memcpy(&v[2], srcPtr, 2);
+#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
+#pragma warning(push)
+#pragma warning( \
+ disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
+#endif
+ LZ4_memcpy(&v[4], v, 4);
+#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
+#pragma warning(pop)
+#endif
+ break;
+ case 4:
+ LZ4_memcpy(v, srcPtr, 4);
+ LZ4_memcpy(&v[4], srcPtr, 4);
+ break;
+ default:
+ LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
+ return;
+ }
+
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ while (dstPtr < dstEnd) {
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ }
+}
+#endif
+
+/*-************************************
+* Common functions
+**************************************/
+static unsigned LZ4_NbCommonBytes(reg_t val)
+{
+ assert(val != 0);
+ if (LZ4_isLittleEndian()) {
+ if (sizeof(val) == 8) {
+#if defined(_MSC_VER) && (_MSC_VER >= 1800) && \
+ (defined(_M_AMD64) && !defined(_M_ARM64EC)) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+/*-*************************************************************************************************
+* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.
+* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics
+* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.
+****************************************************************************************************/
+#if defined(__clang__) && (__clang_major__ < 10)
+ /* Avoid undefined clang-cl intrinsics issue.
+ * See https://github.com/lz4/lz4/pull/1017 for details. */
+ return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;
+#else
+ /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
+ return (unsigned)_tzcnt_u64(val) >> 3;
+#endif
+#elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanForward64(&r, (U64)val);
+ return (unsigned)r >> 3;
+#elif (defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctzll((U64)val) >> 3;
+#else
+ const U64 m = 0x0101010101010101ULL;
+ val ^= val - 1;
+ return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
+#endif
+ } else /* 32 bits */ {
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward(&r, (U32)val);
+ return (unsigned)r >> 3;
+#elif (defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctz((U32)val) >> 3;
+#else
+ const U32 m = 0x01010101;
+ return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >>
+ 24;
+#endif
+ }
+ } else /* Big Endian CPU */ {
+ if (sizeof(val) == 8) {
+#if (defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clzll((U64)val) >> 3;
+#else
+#if 1
+ /* this method is probably faster,
+ * but adds a 128 bytes lookup table */
+ static const unsigned char ctz7_tab[128] = {
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ };
+ U64 const mask = 0x0101010101010101ULL;
+ U64 const t = (((val >> 8) - mask) | val) & mask;
+ return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
+#else
+ /* this method doesn't consume memory space like the previous one,
+ * but it contains several branches,
+ * that may end up slowing execution */
+ static const U32 by32 =
+ sizeof(val) *
+ 4; /* 32 on 64 bits (goal), 16 on 32 bits.
+ Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
+ Note that this code path is never triggered in 32-bits mode. */
+ unsigned r;
+ if (!(val >> by32)) {
+ r = 4;
+ } else {
+ r = 0;
+ val >>= by32;
+ }
+ if (!(val >> 16)) {
+ r += 2;
+ val >>= 8;
+ } else {
+ val >>= 24;
+ }
+ r += (!val);
+ return r;
+#endif
+#endif
+ } else /* 32 bits */ {
+#if (defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clz((U32)val) >> 3;
+#else
+ val >>= 8;
+ val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
+ (val + 0x00FF0000)) >>
+ 24;
+ return (unsigned)val ^ 3;
+#endif
+ }
+ }
+}
+
+#define STEPSIZE sizeof(reg_t)
+LZ4_FORCE_INLINE
+unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit)
+{
+ const BYTE *const pStart = pIn;
+
+ if (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) {
+ pIn += STEPSIZE;
+ pMatch += STEPSIZE;
+ } else {
+ return LZ4_NbCommonBytes(diff);
+ }
+ }
+
+ while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) {
+ pIn += STEPSIZE;
+ pMatch += STEPSIZE;
+ continue;
+ }
+ pIn += LZ4_NbCommonBytes(diff);
+ return (unsigned)(pIn - pStart);
+ }
+
+ if ((STEPSIZE == 8) && (pIn < (pInLimit - 3)) &&
+ (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
+ pIn += 4;
+ pMatch += 4;
+ }
+ if ((pIn < (pInLimit - 1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
+ pIn += 2;
+ pMatch += 2;
+ }
+ if ((pIn < pInLimit) && (*pMatch == *pIn))
+ pIn++;
+ return (unsigned)(pIn - pStart);
+}
+
+#ifndef LZ4_COMMONDEFS_ONLY
+/*-************************************
+* Local Constants
+**************************************/
+static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT - 1));
+static const U32 LZ4_skipTrigger =
+ 6; /* Increase this value ==> compression run slower on incompressible data */
+
+/*-************************************
+* Local Structures and types
+**************************************/
+typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
+
+/**
+ * This enum distinguishes several different modes of accessing previous
+ * content in the stream.
+ *
+ * - noDict : There is no preceding content.
+ * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
+ * blob being compressed are valid and refer to the preceding
+ * content (of length ctx->dictSize), which is available
+ * contiguously preceding in memory the content currently
+ * being compressed.
+ * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
+ * else in memory, starting at ctx->dictionary with length
+ * ctx->dictSize.
+ * - usingDictCtx : Everything concerning the preceding content is
+ * in a separate context, pointed to by ctx->dictCtx.
+ * ctx->dictionary, ctx->dictSize, and table entries
+ * in the current context that refer to positions
+ * preceding the beginning of the current compression are
+ * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
+ * ->dictSize describe the location and size of the preceding
+ * content, and matches are found by looking in the ctx
+ * ->dictCtx->hashTable.
+ */
+typedef enum {
+ noDict = 0,
+ withPrefix64k,
+ usingExtDict,
+ usingDictCtx
+} dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+/*-************************************
+* Local Utils
+**************************************/
+int LZ4_versionNumber(void)
+{
+ return LZ4_VERSION_NUMBER;
+}
+const char *LZ4_versionString(void)
+{
+ return LZ4_VERSION_STRING;
+}
+int LZ4_compressBound(int isize)
+{
+ return LZ4_COMPRESSBOUND(isize);
+}
+int LZ4_sizeofState(void)
+{
+ return sizeof(LZ4_stream_t);
+}
+
+/*-****************************************
+* Internal Definitions, used only in Tests
+*******************************************/
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source,
+ char *dest, int srcSize);
+
+int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const void *dictStart, size_t dictSize);
+int LZ4_decompress_safe_partial_forceExtDict(const char *source, char *dest,
+ int compressedSize,
+ int targetOutputSize,
+ int dstCapacity,
+ const void *dictStart,
+ size_t dictSize);
+#if defined(__cplusplus)
+}
+#endif
+
+/*-******************************
+* Compression functions
+********************************/
+LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
+{
+ if (tableType == byU16)
+ return ((sequence * 2654435761U) >>
+ ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
+ else
+ return ((sequence * 2654435761U) >>
+ ((MINMATCH * 8) - LZ4_HASHLOG));
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
+{
+ const U32 hashLog =
+ (tableType == byU16) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG;
+ if (LZ4_isLittleEndian()) {
+ const U64 prime5bytes = 889523592379ULL;
+ return (U32)(((sequence << 24) * prime5bytes) >>
+ (64 - hashLog));
+ } else {
+ const U64 prime8bytes = 11400714785074694791ULL;
+ return (U32)(((sequence >> 24) * prime8bytes) >>
+ (64 - hashLog));
+ }
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void *const p,
+ tableType_t const tableType)
+{
+ if ((sizeof(reg_t) == 8) && (tableType != byU16))
+ return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+
+#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
+ return LZ4_hash4(LZ4_readLE32(p), tableType);
+#else
+ return LZ4_hash4(LZ4_read32(p), tableType);
+#endif
+}
+
+LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void *tableBase,
+ tableType_t const tableType)
+{
+ switch (tableType) {
+ default: /* fallthrough */
+ case clearedTable: { /* illegal! */
+ assert(0);
+ return;
+ }
+ case byPtr: {
+ const BYTE **hashTable = (const BYTE **)tableBase;
+ hashTable[h] = NULL;
+ return;
+ }
+ case byU32: {
+ U32 *hashTable = (U32 *)tableBase;
+ hashTable[h] = 0;
+ return;
+ }
+ case byU16: {
+ U16 *hashTable = (U16 *)tableBase;
+ hashTable[h] = 0;
+ return;
+ }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void *tableBase,
+ tableType_t const tableType)
+{
+ switch (tableType) {
+ default: /* fallthrough */
+ case clearedTable: /* fallthrough */
+ case byPtr: { /* illegal! */
+ assert(0);
+ return;
+ }
+ case byU32: {
+ U32 *hashTable = (U32 *)tableBase;
+ hashTable[h] = idx;
+ return;
+ }
+ case byU16: {
+ U16 *hashTable = (U16 *)tableBase;
+ assert(idx < 65536);
+ hashTable[h] = (U16)idx;
+ return;
+ }
+ }
+}
+
+/* LZ4_putPosition*() : only used in byPtr mode */
+LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE *p, U32 h,
+ void *tableBase,
+ tableType_t const tableType)
+{
+ const BYTE **const hashTable = (const BYTE **)tableBase;
+ assert(tableType == byPtr);
+ (void)tableType;
+ hashTable[h] = p;
+}
+
+LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase,
+ tableType_t tableType)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+ LZ4_putPositionOnHash(p, h, tableBase, tableType);
+}
+
+/* LZ4_getIndexOnHash() :
+ * Index of match position registered in hash table.
+ * hash position must be calculated by using base+index, or dictBase+index.
+ * Assumption 1 : only valid if tableType == byU32 or byU16.
+ * Assumption 2 : h is presumed valid (within limits of hash table)
+ */
+LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void *tableBase,
+ tableType_t tableType)
+{
+ LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
+ if (tableType == byU32) {
+ const U32 *const hashTable = (const U32 *)tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE - 2)));
+ return hashTable[h];
+ }
+ if (tableType == byU16) {
+ const U16 *const hashTable = (const U16 *)tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE - 1)));
+ return hashTable[h];
+ }
+ assert(0);
+ return 0; /* forbidden case */
+}
+
+static const BYTE *LZ4_getPositionOnHash(U32 h, const void *tableBase,
+ tableType_t tableType)
+{
+ assert(tableType == byPtr);
+ (void)tableType;
+ {
+ const BYTE *const *hashTable = (const BYTE *const *)tableBase;
+ return hashTable[h];
+ }
+}
+
+LZ4_FORCE_INLINE const BYTE *
+LZ4_getPosition(const BYTE *p, const void *tableBase, tableType_t tableType)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+ return LZ4_getPositionOnHash(h, tableBase, tableType);
+}
+
+LZ4_FORCE_INLINE void LZ4_prepareTable(LZ4_stream_t_internal *const cctx,
+ const int inputSize,
+ const tableType_t tableType)
+{
+ /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
+ * therefore safe to use no matter what mode we're in. Otherwise, we figure
+ * out if it's safe to leave as is or whether it needs to be reset.
+ */
+ if ((tableType_t)cctx->tableType != clearedTable) {
+ assert(inputSize >= 0);
+ if ((tableType_t)cctx->tableType != tableType ||
+ ((tableType == byU16) &&
+ cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) ||
+ ((tableType == byU32) && cctx->currentOffset > 1 GB) ||
+ tableType == byPtr || inputSize >= 4 KB) {
+ DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p",
+ cctx);
+ MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
+ cctx->currentOffset = 0;
+ cctx->tableType = (U32)clearedTable;
+ } else {
+ DEBUGLOG(
+ 4,
+ "LZ4_prepareTable: Re-use hash table (no reset)");
+ }
+ }
+
+ /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,
+ * is faster than compressing without a gap.
+ * However, compressing with currentOffset == 0 is faster still,
+ * so we preserve that case.
+ */
+ if (cctx->currentOffset != 0 && tableType == byU32) {
+ DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
+ cctx->currentOffset += 64 KB;
+ }
+
+ /* Finally, clear history */
+ cctx->dictCtx = NULL;
+ cctx->dictionary = NULL;
+ cctx->dictSize = 0;
+}
+
+/** LZ4_compress_generic_validated() :
+ * inlined, to ensure branches are decided at compilation time.
+ * The following conditions are presumed already validated:
+ * - source != NULL
+ * - inputSize > 0
+ */
+LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
+ LZ4_stream_t_internal *const cctx, const char *const source,
+ char *const dest, const int inputSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int maxOutputSize, const limitedOutput_directive outputDirective,
+ const tableType_t tableType, const dict_directive dictDirective,
+ const dictIssue_directive dictIssue, const int acceleration)
+{
+ int result;
+ const BYTE *ip = (const BYTE *)source;
+
+ U32 const startIndex = cctx->currentOffset;
+ const BYTE *base = (const BYTE *)source - startIndex;
+ const BYTE *lowLimit;
+
+ const LZ4_stream_t_internal *dictCtx =
+ (const LZ4_stream_t_internal *)cctx->dictCtx;
+ const BYTE *const dictionary = dictDirective == usingDictCtx ?
+ dictCtx->dictionary :
+ cctx->dictionary;
+ const U32 dictSize = dictDirective == usingDictCtx ? dictCtx->dictSize :
+ cctx->dictSize;
+ const U32 dictDelta =
+ (dictDirective == usingDictCtx) ?
+ startIndex - dictCtx->currentOffset :
+ 0; /* make indexes in dictCtx comparable with indexes in current context */
+
+ int const maybe_extMem = (dictDirective == usingExtDict) ||
+ (dictDirective == usingDictCtx);
+ U32 const prefixIdxLimit =
+ startIndex -
+ dictSize; /* used when dictDirective == dictSmall */
+ const BYTE *const dictEnd =
+ dictionary ? dictionary + dictSize : dictionary;
+ const BYTE *anchor = (const BYTE *)source;
+ const BYTE *const iend = ip + inputSize;
+ const BYTE *const mflimitPlusOne = iend - MFLIMIT + 1;
+ const BYTE *const matchlimit = iend - LASTLITERALS;
+
+ /* the dictCtx currentOffset is indexed on the start of the dictionary,
+ * while a dictionary in the current context precedes the currentOffset */
+ const BYTE *dictBase =
+ (dictionary == NULL) ?
+ NULL :
+ (dictDirective == usingDictCtx) ?
+ dictionary + dictSize - dictCtx->currentOffset :
+ dictionary + dictSize - startIndex;
+
+ BYTE *op = (BYTE *)dest;
+ BYTE *const olimit = op + maxOutputSize;
+
+ U32 offset = 0;
+ U32 forwardH;
+
+ DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u",
+ inputSize, tableType);
+ assert(ip != NULL);
+ if (tableType == byU16)
+ assert(inputSize <
+ LZ4_64Klimit); /* Size too large (not within 64K limit) */
+ if (tableType == byPtr)
+ assert(dictDirective ==
+ noDict); /* only supported use case with byPtr */
+ /* If init conditions are not met, we don't have to mark stream
+ * as having dirty context, since no action was taken yet */
+ if (outputDirective == fillOutput && maxOutputSize < 1) {
+ return 0;
+ } /* Impossible to store anything */
+ assert(acceleration >= 1);
+
+ lowLimit = (const BYTE *)source -
+ (dictDirective == withPrefix64k ? dictSize : 0);
+
+ /* Update context state */
+ if (dictDirective == usingDictCtx) {
+ /* Subsequent linked blocks can't use the dictionary. */
+ /* Instead, they use the block we just compressed. */
+ cctx->dictCtx = NULL;
+ cctx->dictSize = (U32)inputSize;
+ } else {
+ cctx->dictSize += (U32)inputSize;
+ }
+ cctx->currentOffset += (U32)inputSize;
+ cctx->tableType = (U32)tableType;
+
+ if (inputSize < LZ4_minLength)
+ goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* First Byte */
+ {
+ U32 const h = LZ4_hashPosition(ip, tableType);
+ if (tableType == byPtr) {
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, byPtr);
+ } else {
+ LZ4_putIndexOnHash(startIndex, h, cctx->hashTable,
+ tableType);
+ }
+ }
+ ip++;
+ forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+ for (;;) {
+ const BYTE *match;
+ BYTE *token;
+ const BYTE *filledIp;
+
+ /* Find a match */
+ if (tableType == byPtr) {
+ const BYTE *forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne))
+ goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ match = LZ4_getPositionOnHash(
+ h, cctx->hashTable, tableType);
+ forwardH =
+ LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable,
+ tableType);
+
+ } while ((match + LZ4_DISTANCE_MAX < ip) ||
+ (LZ4_read32(match) != LZ4_read32(ip)));
+
+ } else { /* byU32, byU16 */
+
+ const BYTE *forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ U32 const currentPos = (U32)(forwardIp - base);
+ U32 matchIndex = LZ4_getIndexOnHash(
+ h, cctx->hashTable, tableType);
+ assert(matchIndex <= currentPos);
+ assert(forwardIp - base <
+ (ptrdiff_t)(2 GB - 1));
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne))
+ goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ assert(tableType == byU32);
+ matchIndex = LZ4_getIndexOnHash(
+ h, dictCtx->hashTable,
+ byU32);
+ match = dictBase + matchIndex;
+ matchIndex +=
+ dictDelta; /* make dictCtx index comparable with current context */
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE *)source;
+ }
+ } else if (dictDirective == usingExtDict) {
+ if (matchIndex < startIndex) {
+ DEBUGLOG(
+ 7,
+ "extDict candidate: matchIndex=%5u < startIndex=%5u",
+ matchIndex, startIndex);
+ assert(startIndex -
+ matchIndex >=
+ MINMATCH);
+ assert(dictBase);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE *)source;
+ }
+ } else { /* single continuous memory segment */
+ match = base + matchIndex;
+ }
+ forwardH =
+ LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putIndexOnHash(currentPos, h,
+ cctx->hashTable, tableType);
+
+ DEBUGLOG(7,
+ "candidate at pos=%u (offset=%u \n",
+ matchIndex, currentPos - matchIndex);
+ if ((dictIssue == dictSmall) &&
+ (matchIndex < prefixIdxLimit)) {
+ continue;
+ } /* match outside of valid area */
+ assert(matchIndex < currentPos);
+ if (((tableType != byU16) ||
+ (LZ4_DISTANCE_MAX <
+ LZ4_DISTANCE_ABSOLUTE_MAX)) &&
+ (matchIndex + LZ4_DISTANCE_MAX <
+ currentPos)) {
+ continue;
+ } /* too far */
+ assert((currentPos - matchIndex) <=
+ LZ4_DISTANCE_MAX); /* match now expected within distance */
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ if (maybe_extMem)
+ offset =
+ currentPos - matchIndex;
+ break; /* match found */
+ }
+
+ } while (1);
+ }
+
+ /* Catch up */
+ filledIp = ip;
+ assert(ip >
+ anchor); /* this is always true as ip has been advanced before entering the main loop */
+ if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {
+ do {
+ ip--;
+ match--;
+ } while (((ip > anchor) & (match > lowLimit)) &&
+ (unlikely(ip[-1] == match[-1])));
+ }
+
+ /* Encode Literals */
+ {
+ unsigned const litLength = (unsigned)(ip - anchor);
+ token = op++;
+ if ((outputDirective ==
+ limitedOutput) && /* Check output buffer overflow */
+ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) +
+ (litLength / 255) >
+ olimit))) {
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ if ((outputDirective == fillOutput) &&
+ (unlikely(
+ op + (litLength + 240) / 255 /* litlen */ +
+ litLength /* literals */ +
+ 2 /* offset */ + 1 /* token */ +
+ MFLIMIT -
+ MINMATCH /* min last literals so last match is <= end - MFLIMIT */
+ > olimit))) {
+ op--;
+ goto _last_literals;
+ }
+ if (litLength >= RUN_MASK) {
+ unsigned len = litLength - RUN_MASK;
+ *token = (RUN_MASK << ML_BITS);
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else
+ *token = (BYTE)(litLength << ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op + litLength);
+ op += litLength;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor - (const BYTE *)source),
+ litLength, (int)(ip - (const BYTE *)source));
+ }
+
+ _next_match:
+ /* at this stage, the following variables must be correctly set :
+ * - ip : at start of LZ operation
+ * - match : at start of previous pattern occurrence; can be within current prefix, or within extDict
+ * - offset : if maybe_ext_memSegment==1 (constant)
+ * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
+ * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
+ */
+
+ if ((outputDirective == fillOutput) &&
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT -
+ MINMATCH /* min last literals so last match is <= end - MFLIMIT */
+ > olimit)) {
+ /* the match was too close to the end, rewind and go to last literals */
+ op = token;
+ goto _last_literals;
+ }
+
+ /* Encode Offset */
+ if (maybe_extMem) { /* static test */
+ DEBUGLOG(6,
+ " with offset=%u (ext if > %i)",
+ offset, (int)(ip - (const BYTE *)source));
+ assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
+ LZ4_writeLE16(op, (U16)offset);
+ op += 2;
+ } else {
+ DEBUGLOG(6,
+ " with offset=%u (same segment)",
+ (U32)(ip - match));
+ assert(ip - match <= LZ4_DISTANCE_MAX);
+ LZ4_writeLE16(op, (U16)(ip - match));
+ op += 2;
+ }
+
+ /* Encode MatchLength */
+ {
+ unsigned matchCode;
+
+ if ((dictDirective == usingExtDict ||
+ dictDirective == usingDictCtx) &&
+ (lowLimit ==
+ dictionary) /* match within extDict */) {
+ const BYTE *limit = ip + (dictEnd - match);
+ assert(dictEnd > match);
+ if (limit > matchlimit)
+ limit = matchlimit;
+ matchCode = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, limit);
+ ip += (size_t)matchCode + MINMATCH;
+ if (ip == limit) {
+ unsigned const more =
+ LZ4_count(limit,
+ (const BYTE *)source,
+ matchlimit);
+ matchCode += more;
+ ip += more;
+ }
+ DEBUGLOG(
+ 6,
+ " with matchLength=%u starting in extDict",
+ matchCode + MINMATCH);
+ } else {
+ matchCode =
+ LZ4_count(ip + MINMATCH,
+ match + MINMATCH, matchlimit);
+ ip += (size_t)matchCode + MINMATCH;
+ DEBUGLOG(6, " with matchLength=%u",
+ matchCode + MINMATCH);
+ }
+
+ if ((outputDirective) && /* Check output buffer overflow */
+ (unlikely(op + (1 + LASTLITERALS) +
+ (matchCode + 240) / 255 >
+ olimit))) {
+ if (outputDirective == fillOutput) {
+ /* Match description too long : reduce it */
+ U32 newMatchCode =
+ 15 /* in token */ -
+ 1 /* to avoid needing a zero byte */ +
+ ((U32)(olimit - op) - 1 -
+ LASTLITERALS) *
+ 255;
+ ip -= matchCode - newMatchCode;
+ assert(newMatchCode < matchCode);
+ matchCode = newMatchCode;
+ if (unlikely(ip <= filledIp)) {
+ /* We have already filled up to filledIp so if ip ends up less than filledIp
+ * we have positions in the hash table beyond the current position. This is
+ * a problem if we reuse the hash table. So we have to remove these positions
+ * from the hash table.
+ */
+ const BYTE *ptr;
+ DEBUGLOG(
+ 5,
+ "Clearing %u positions",
+ (U32)(filledIp - ip));
+ for (ptr = ip; ptr <= filledIp;
+ ++ptr) {
+ U32 const h =
+ LZ4_hashPosition(
+ ptr,
+ tableType);
+ LZ4_clearHash(
+ h,
+ cctx->hashTable,
+ tableType);
+ }
+ }
+ } else {
+ assert(outputDirective ==
+ limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
+ if (matchCode >= ML_MASK) {
+ *token += ML_MASK;
+ matchCode -= ML_MASK;
+ LZ4_write32(op, 0xFFFFFFFF);
+ while (matchCode >= 4 * 255) {
+ op += 4;
+ LZ4_write32(op, 0xFFFFFFFF);
+ matchCode -= 4 * 255;
+ }
+ op += matchCode / 255;
+ *op++ = (BYTE)(matchCode % 255);
+ } else
+ *token += (BYTE)(matchCode);
+ }
+ /* Ensure we have enough space for the last literals. */
+ assert(!(outputDirective == fillOutput &&
+ op + 1 + LASTLITERALS > olimit));
+
+ anchor = ip;
+
+ /* Test end of chunk */
+ if (ip >= mflimitPlusOne)
+ break;
+
+ /* Fill table */
+ {
+ U32 const h = LZ4_hashPosition(ip - 2, tableType);
+ if (tableType == byPtr) {
+ LZ4_putPositionOnHash(ip - 2, h,
+ cctx->hashTable, byPtr);
+ } else {
+ U32 const idx = (U32)((ip - 2) - base);
+ LZ4_putIndexOnHash(idx, h, cctx->hashTable,
+ tableType);
+ }
+ }
+
+ /* Test next position */
+ if (tableType == byPtr) {
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType);
+ LZ4_putPosition(ip, cctx->hashTable, tableType);
+ if ((match + LZ4_DISTANCE_MAX >= ip) &&
+ (LZ4_read32(match) == LZ4_read32(ip))) {
+ token = op++;
+ *token = 0;
+ goto _next_match;
+ }
+
+ } else { /* byU32, byU16 */
+
+ U32 const h = LZ4_hashPosition(ip, tableType);
+ U32 const currentPos = (U32)(ip - base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable,
+ tableType);
+ assert(matchIndex < currentPos);
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ assert(tableType == byU32);
+ matchIndex = LZ4_getIndexOnHash(
+ h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ lowLimit =
+ dictionary; /* required for match length counter */
+ matchIndex += dictDelta;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE *)
+ source; /* required for match length counter */
+ }
+ } else if (dictDirective == usingExtDict) {
+ if (matchIndex < startIndex) {
+ assert(dictBase);
+ match = dictBase + matchIndex;
+ lowLimit =
+ dictionary; /* required for match length counter */
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE *)
+ source; /* required for match length counter */
+ }
+ } else { /* single memory segment */
+ match = base + matchIndex;
+ }
+ LZ4_putIndexOnHash(currentPos, h, cctx->hashTable,
+ tableType);
+ assert(matchIndex < currentPos);
+ if (((dictIssue == dictSmall) ?
+ (matchIndex >= prefixIdxLimit) :
+ 1) &&
+ (((tableType == byU16) &&
+ (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ?
+ 1 :
+ (matchIndex + LZ4_DISTANCE_MAX >=
+ currentPos)) &&
+ (LZ4_read32(match) == LZ4_read32(ip))) {
+ token = op++;
+ *token = 0;
+ if (maybe_extMem)
+ offset = currentPos - matchIndex;
+ DEBUGLOG(
+ 6,
+ "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor - (const BYTE *)source), 0,
+ (int)(ip - (const BYTE *)source));
+ goto _next_match;
+ }
+ }
+
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ {
+ size_t lastRun = (size_t)(iend - anchor);
+ if ((outputDirective) && /* Check output buffer overflow */
+ (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
+ olimit)) {
+ if (outputDirective == fillOutput) {
+ /* adapt lastRun to fill 'dst' */
+ assert(olimit >= op);
+ lastRun = (size_t)(olimit - op) - 1 /*token*/;
+ lastRun -= (lastRun + 256 - RUN_MASK) /
+ 256; /*additional length tokens*/
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
+ if (lastRun >= RUN_MASK) {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)accumulator;
+ } else {
+ *op++ = (BYTE)(lastRun << ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRun);
+ ip = anchor + lastRun;
+ op += lastRun;
+ }
+
+ if (outputDirective == fillOutput) {
+ *inputConsumed = (int)(((const char *)ip) - source);
+ }
+ result = (int)(((char *)op) - dest);
+ assert(result > 0);
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes",
+ inputSize, result);
+ return result;
+}
+
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time;
+ * takes care of src == (NULL, 0)
+ * and forward the rest to LZ4_compress_generic_validated */
+LZ4_FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal *const cctx, const char *const src,
+ char *const dst, const int srcSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int dstCapacity, const limitedOutput_directive outputDirective,
+ const tableType_t tableType, const dict_directive dictDirective,
+ const dictIssue_directive dictIssue, const int acceleration)
+{
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i", srcSize,
+ dstCapacity);
+
+ if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) {
+ return 0;
+ } /* Unsupported srcSize, too large (or negative) */
+ if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
+ if (outputDirective != notLimited && dstCapacity <= 0)
+ return 0; /* no output, can't write anything */
+ DEBUGLOG(5, "Generating an empty block");
+ assert(outputDirective == notLimited || dstCapacity >= 1);
+ assert(dst != NULL);
+ dst[0] = 0;
+ if (outputDirective == fillOutput) {
+ assert(inputConsumed != NULL);
+ *inputConsumed = 0;
+ }
+ return 1;
+ }
+ assert(src != NULL);
+
+ return LZ4_compress_generic_validated(
+ cctx, src, dst, srcSize,
+ inputConsumed, /* only written into if outputDirective == fillOutput */
+ dstCapacity, outputDirective, tableType, dictDirective,
+ dictIssue, acceleration);
+}
+
+int LZ4_compress_fast_extState(void *state, const char *source, char *dest,
+ int inputSize, int maxOutputSize,
+ int acceleration)
+{
+ LZ4_stream_t_internal *const ctx =
+ &LZ4_initStream(state, sizeof(LZ4_stream_t))->internal_donotuse;
+ assert(ctx != NULL);
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX)
+ acceleration = LZ4_ACCELERATION_MAX;
+ if (maxOutputSize >= LZ4_compressBound(inputSize)) {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest,
+ inputSize, NULL, 0,
+ notLimited, byU16, noDict,
+ noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType =
+ ((sizeof(void *) == 4) &&
+ ((uptrval)source > LZ4_DISTANCE_MAX)) ?
+ byPtr :
+ byU32;
+ return LZ4_compress_generic(ctx, source, dest,
+ inputSize, NULL, 0,
+ notLimited, tableType,
+ noDict, noDictIssue,
+ acceleration);
+ }
+ } else {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(
+ ctx, source, dest, inputSize, NULL,
+ maxOutputSize, limitedOutput, byU16, noDict,
+ noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType =
+ ((sizeof(void *) == 4) &&
+ ((uptrval)source > LZ4_DISTANCE_MAX)) ?
+ byPtr :
+ byU32;
+ return LZ4_compress_generic(
+ ctx, source, dest, inputSize, NULL,
+ maxOutputSize, limitedOutput, tableType, noDict,
+ noDictIssue, acceleration);
+ }
+ }
+}
+
+/**
+ * LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
+ * "correctly initialized").
+ */
+int LZ4_compress_fast_extState_fastReset(void *state, const char *src,
+ char *dst, int srcSize,
+ int dstCapacity, int acceleration)
+{
+ LZ4_stream_t_internal *const ctx =
+ &((LZ4_stream_t *)state)->internal_donotuse;
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX)
+ acceleration = LZ4_ACCELERATION_MAX;
+ assert(ctx != NULL);
+
+ if (dstCapacity >= LZ4_compressBound(srcSize)) {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(
+ ctx, src, dst, srcSize, NULL, 0,
+ notLimited, tableType, noDict,
+ dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(
+ ctx, src, dst, srcSize, NULL, 0,
+ notLimited, tableType, noDict,
+ noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType =
+ ((sizeof(void *) == 4) &&
+ ((uptrval)src > LZ4_DISTANCE_MAX)) ?
+ byPtr :
+ byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize,
+ NULL, 0, notLimited,
+ tableType, noDict,
+ noDictIssue, acceleration);
+ }
+ } else {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(
+ ctx, src, dst, srcSize, NULL,
+ dstCapacity, limitedOutput, tableType,
+ noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(
+ ctx, src, dst, srcSize, NULL,
+ dstCapacity, limitedOutput, tableType,
+ noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType =
+ ((sizeof(void *) == 4) &&
+ ((uptrval)src > LZ4_DISTANCE_MAX)) ?
+ byPtr :
+ byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize,
+ NULL, dstCapacity,
+ limitedOutput, tableType,
+ noDict, noDictIssue,
+ acceleration);
+ }
+ }
+}
+
+int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity,
+ int acceleration, void *wrkmem)
+{
+ return LZ4_compress_fast_extState(wrkmem, src, dest, srcSize, dstCapacity, acceleration);
+}
+EXPORT_SYMBOL(LZ4_compress_fast);
+
+int LZ4_compress_default(const char *src, char *dst, int srcSize,
+ int dstCapacity, void *wrkmem)
+{
+ return LZ4_compress_fast_extState(wrkmem, src, dst, srcSize,
+ dstCapacity, 1);
+}
+EXPORT_SYMBOL(LZ4_compress_default);
+
+/* Note!: This function leaves the stream in an unclean/broken state!
+ * It is not safe to subsequently use the same state with a _fastReset() or
+ * _continue() call without resetting it. */
+static int LZ4_compress_destSize_extState_internal(LZ4_stream_t *state,
+ const char *src, char *dst,
+ int *srcSizePtr,
+ int targetDstSize,
+ int acceleration)
+{
+ void *const s = LZ4_initStream(state, sizeof(*state));
+ assert(s != NULL);
+ (void)s;
+
+ if (targetDstSize >=
+ LZ4_compressBound(
+ *srcSizePtr)) { /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr,
+ targetDstSize, acceleration);
+ } else {
+ if (*srcSizePtr < LZ4_64Klimit) {
+ return LZ4_compress_generic(&state->internal_donotuse,
+ src, dst, *srcSizePtr,
+ srcSizePtr, targetDstSize,
+ fillOutput, byU16, noDict,
+ noDictIssue, acceleration);
+ } else {
+ tableType_t const addrMode =
+ ((sizeof(void *) == 4) &&
+ ((uptrval)src > LZ4_DISTANCE_MAX)) ?
+ byPtr :
+ byU32;
+ return LZ4_compress_generic(&state->internal_donotuse,
+ src, dst, *srcSizePtr,
+ srcSizePtr, targetDstSize,
+ fillOutput, addrMode,
+ noDict, noDictIssue,
+ acceleration);
+ }
+ }
+}
+
+int LZ4_compress_destSize_extState(void *state, const char *src, char *dst,
+ int *srcSizePtr, int targetDstSize,
+ int acceleration)
+{
+ int const r = LZ4_compress_destSize_extState_internal(
+ (LZ4_stream_t *)state, src, dst, srcSizePtr, targetDstSize,
+ acceleration);
+ /* clean the state on exit */
+ LZ4_initStream(state, sizeof(LZ4_stream_t));
+ return r;
+}
+
+int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr,
+ int targetDstSize, void *wrkmem)
+{
+ return LZ4_compress_destSize_extState_internal(
+ wrkmem, src, dst, srcSizePtr, targetDstSize, 1);
+}
+EXPORT_SYMBOL(LZ4_compress_destSize);
+
+/*-******************************
+* Streaming functions
+********************************/
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_stream_t *LZ4_createStream(void)
+{
+ LZ4_stream_t *const lz4s = (LZ4_stream_t *)ALLOC(sizeof(LZ4_stream_t));
+ LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >=
+ sizeof(LZ4_stream_t_internal));
+ DEBUGLOG(4, "LZ4_createStream %p", lz4s);
+ if (lz4s == NULL)
+ return NULL;
+ LZ4_initStream(lz4s, sizeof(*lz4s));
+ return lz4s;
+}
+#endif
+
+static size_t LZ4_stream_t_alignment(void)
+{
+#if LZ4_ALIGN_TEST
+ typedef struct {
+ char c;
+ LZ4_stream_t t;
+ } t_a;
+ return sizeof(t_a) - sizeof(LZ4_stream_t);
+#else
+ return 1; /* effectively disabled */
+#endif
+}
+
+LZ4_stream_t *LZ4_initStream(void *buffer, size_t size)
+{
+ DEBUGLOG(5, "LZ4_initStream");
+ if (buffer == NULL) {
+ return NULL;
+ }
+ if (size < sizeof(LZ4_stream_t)) {
+ return NULL;
+ }
+ if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment()))
+ return NULL;
+ MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
+ return (LZ4_stream_t *)buffer;
+}
+
+/* resetStream is now deprecated,
+ * prefer initStream() which is more general */
+void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
+{
+ DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
+ MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
+}
+
+void LZ4_resetStream_fast(LZ4_stream_t *ctx)
+{
+ LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
+}
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+int LZ4_freeStream(LZ4_stream_t *LZ4_stream)
+{
+ if (!LZ4_stream)
+ return 0; /* support free on NULL */
+ DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
+ FREEMEM(LZ4_stream);
+ return (0);
+}
+#endif
+
+typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;
+#define HASH_UNIT sizeof(reg_t)
+int LZ4_loadDict_internal(LZ4_stream_t *LZ4_dict, const char *dictionary,
+ int dictSize, LoadDict_mode_e _ld)
+{
+ LZ4_stream_t_internal *const dict = &LZ4_dict->internal_donotuse;
+ const tableType_t tableType = byU32;
+ const BYTE *p = (const BYTE *)dictionary;
+ const BYTE *const dictEnd = p + dictSize;
+ U32 idx32;
+
+ DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize,
+ dictionary, LZ4_dict);
+
+ /* It's necessary to reset the context,
+ * and not just continue it with prepareTable()
+ * to avoid any risk of generating overflowing matchIndex
+ * when compressing using this dictionary */
+ LZ4_resetStream(LZ4_dict);
+
+ /* We always increment the offset by 64 KB, since, if the dict is longer,
+ * we truncate it to the last 64k, and if it's shorter, we still want to
+ * advance by a whole window length so we can provide the guarantee that
+ * there are only valid offsets in the window, which allows an optimization
+ * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
+ * dictionary isn't a full 64k. */
+ dict->currentOffset += 64 KB;
+
+ if (dictSize < (int)HASH_UNIT) {
+ return 0;
+ }
+
+ if ((dictEnd - p) > 64 KB)
+ p = dictEnd - 64 KB;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->tableType = (U32)tableType;
+ idx32 = dict->currentOffset - dict->dictSize;
+
+ while (p <= dictEnd - HASH_UNIT) {
+ U32 const h = LZ4_hashPosition(p, tableType);
+ /* Note: overwriting => favors positions end of dictionary */
+ LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
+ p += 3;
+ idx32 += 3;
+ }
+
+ if (_ld == _ld_slow) {
+ /* Fill hash table with additional references, to improve compression capability */
+ p = dict->dictionary;
+ idx32 = dict->currentOffset - dict->dictSize;
+ while (p <= dictEnd - HASH_UNIT) {
+ U32 const h = LZ4_hashPosition(p, tableType);
+ U32 const limit = dict->currentOffset - 64 KB;
+ if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <=
+ limit) {
+ /* Note: not overwriting => favors positions beginning of dictionary */
+ LZ4_putIndexOnHash(idx32, h, dict->hashTable,
+ tableType);
+ }
+ p++;
+ idx32++;
+ }
+ }
+
+ return (int)dict->dictSize;
+}
+
+int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
+{
+ return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);
+}
+EXPORT_SYMBOL(LZ4_loadDict);
+
+int LZ4_loadDictSlow(LZ4_stream_t *LZ4_dict, const char *dictionary,
+ int dictSize)
+{
+ return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);
+}
+
+void LZ4_attach_dictionary(LZ4_stream_t *workingStream,
+ const LZ4_stream_t *dictionaryStream)
+{
+ const LZ4_stream_t_internal *dictCtx =
+ (dictionaryStream == NULL) ?
+ NULL :
+ &(dictionaryStream->internal_donotuse);
+
+ DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", workingStream,
+ dictionaryStream, dictCtx != NULL ? dictCtx->dictSize : 0);
+
+ if (dictCtx != NULL) {
+ /* If the current offset is zero, we will never look in the
+ * external dictionary context, since there is no value a table
+ * entry can take that indicate a miss. In that case, we need
+ * to bump the offset to something non-zero.
+ */
+ if (workingStream->internal_donotuse.currentOffset == 0) {
+ workingStream->internal_donotuse.currentOffset = 64 KB;
+ }
+
+ /* Don't actually attach an empty dictionary.
+ */
+ if (dictCtx->dictSize == 0) {
+ dictCtx = NULL;
+ }
+ }
+ workingStream->internal_donotuse.dictCtx = dictCtx;
+}
+
+static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, int nextSize)
+{
+ assert(nextSize >= 0);
+ if (LZ4_dict->currentOffset + (unsigned)nextSize >
+ 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
+ /* rescale hash table */
+ U32 const delta = LZ4_dict->currentOffset - 64 KB;
+ const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
+ DEBUGLOG(4, "LZ4_renormDictT");
+ for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
+ if (LZ4_dict->hashTable[i] < delta)
+ LZ4_dict->hashTable[i] = 0;
+ else
+ LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 KB;
+ if (LZ4_dict->dictSize > 64 KB)
+ LZ4_dict->dictSize = 64 KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
+
+int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
+ char *dest, int inputSize, int maxOutputSize,
+ int acceleration)
+{
+ const tableType_t tableType = byU32;
+ LZ4_stream_t_internal *const streamPtr = &LZ4_stream->internal_donotuse;
+ const char *dictEnd = streamPtr->dictSize ?
+ (const char *)streamPtr->dictionary +
+ streamPtr->dictSize :
+ NULL;
+
+ DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)",
+ inputSize, streamPtr->dictSize);
+
+ LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX)
+ acceleration = LZ4_ACCELERATION_MAX;
+
+ /* invalidate tiny dictionaries */
+ if ((streamPtr->dictSize <
+ 4) /* tiny dictionary : not enough for a hash */
+ && (dictEnd != source) /* prefix mode */
+ &&
+ (inputSize >
+ 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */
+ && (streamPtr->dictCtx == NULL) /* usingDictCtx */
+ ) {
+ DEBUGLOG(
+ 5,
+ "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small",
+ streamPtr->dictSize, streamPtr->dictionary);
+ /* remove dictionary existence from history, to employ faster prefix mode */
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = (const BYTE *)source;
+ dictEnd = source;
+ }
+
+ /* Check overlapping input/dictionary space */
+ {
+ const char *const sourceEnd = source + inputSize;
+ if ((sourceEnd > (const char *)streamPtr->dictionary) &&
+ (sourceEnd < dictEnd)) {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 KB)
+ streamPtr->dictSize = 64 KB;
+ if (streamPtr->dictSize < 4)
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary =
+ (const BYTE *)dictEnd - streamPtr->dictSize;
+ }
+ }
+
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == source) {
+ if ((streamPtr->dictSize < 64 KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset))
+ return LZ4_compress_generic(
+ streamPtr, source, dest, inputSize, NULL,
+ maxOutputSize, limitedOutput, tableType,
+ withPrefix64k, dictSmall, acceleration);
+ else
+ return LZ4_compress_generic(
+ streamPtr, source, dest, inputSize, NULL,
+ maxOutputSize, limitedOutput, tableType,
+ withPrefix64k, noDictIssue, acceleration);
+ }
+
+ /* external dictionary mode */
+ {
+ int result;
+ if (streamPtr->dictCtx) {
+ /* We depend here on the fact that dictCtx'es (produced by
+ * LZ4_loadDict) guarantee that their tables contain no references
+ * to offsets between dictCtx->currentOffset - 64 KB and
+ * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
+ * to use noDictIssue even when the dict isn't a full 64 KB.
+ */
+ if (inputSize > 4 KB) {
+ /* For compressing large blobs, it is faster to pay the setup
+ * cost to copy the dictionary's tables into the active context,
+ * so that the compression loop is only looking into one table.
+ */
+ LZ4_memcpy(streamPtr, streamPtr->dictCtx,
+ sizeof(*streamPtr));
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ NULL, maxOutputSize, limitedOutput,
+ tableType, usingExtDict, noDictIssue,
+ acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ NULL, maxOutputSize, limitedOutput,
+ tableType, usingDictCtx, noDictIssue,
+ acceleration);
+ }
+ } else { /* small data <= 4 KB */
+ if ((streamPtr->dictSize < 64 KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ NULL, maxOutputSize, limitedOutput,
+ tableType, usingExtDict, dictSmall,
+ acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ NULL, maxOutputSize, limitedOutput,
+ tableType, usingExtDict, noDictIssue,
+ acceleration);
+ }
+ }
+ streamPtr->dictionary = (const BYTE *)source;
+ streamPtr->dictSize = (U32)inputSize;
+ return result;
+ }
+}
+
+/* Hidden debug function, to force-test external dictionary mode */
+int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source,
+ char *dest, int srcSize)
+{
+ LZ4_stream_t_internal *const streamPtr = &LZ4_dict->internal_donotuse;
+ int result;
+
+ LZ4_renormDictT(streamPtr, srcSize);
+
+ if ((streamPtr->dictSize < 64 KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize,
+ NULL, 0, notLimited, byU32,
+ usingExtDict, dictSmall, 1);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize,
+ NULL, 0, notLimited, byU32,
+ usingExtDict, noDictIssue, 1);
+ }
+
+ streamPtr->dictionary = (const BYTE *)source;
+ streamPtr->dictSize = (U32)srcSize;
+
+ return result;
+}
+
+/*! LZ4_saveDict() :
+ * If previously compressed data block is not guaranteed to remain available at its memory location,
+ * save it into a safer place (char* safeBuffer).
+ * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable,
+ * one can therefore call LZ4_compress_fast_continue() right after.
+ * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
+ */
+int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal *const dict = &LZ4_dict->internal_donotuse;
+
+ DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize,
+ safeBuffer);
+
+ if ((U32)dictSize > 64 KB) {
+ dictSize = 64 KB;
+ } /* useless to define a dictionary > 64 KB */
+ if ((U32)dictSize > dict->dictSize) {
+ dictSize = (int)dict->dictSize;
+ }
+
+ if (safeBuffer == NULL)
+ assert(dictSize == 0);
+ if (dictSize > 0) {
+ const BYTE *const previousDictEnd =
+ dict->dictionary + dict->dictSize;
+ assert(dict->dictionary);
+ LZ4_memmove(safeBuffer, previousDictEnd - dictSize,
+ (size_t)dictSize);
+ }
+
+ dict->dictionary = (const BYTE *)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_saveDict);
+
+/*-*******************************
+ * Decompression functions
+ ********************************/
+
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#undef MIN
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+/* variant for decompress_unsafe()
+ * does not know end of input
+ * presumes input is well formed
+ * note : will consume at least one byte */
+static size_t read_long_length_no_check(const BYTE **pp)
+{
+ size_t b, l = 0;
+ do {
+ b = **pp;
+ (*pp)++;
+ l += b;
+ } while (b == 255);
+ DEBUGLOG(6,
+ "read_long_length_no_check: +length=%zu using %zu input bytes",
+ l, l / 255 + 1)
+ return l;
+}
+
+/* core decoder variant for LZ4_decompress_fast*()
+ * for legacy support only : these entry points are deprecated.
+ * - Presumes input is correctly formed (no defense vs malformed inputs)
+ * - Does not know input size (presume input buffer is "large enough")
+ * - Decompress a full block (only)
+ * @return : nb of bytes read from input.
+ * Note : this variant is not optimized for speed, just for maintenance.
+ * the goal is to remove support of decompress_fast*() variants by v2.0
+**/
+LZ4_FORCE_INLINE int LZ4_decompress_unsafe_generic(
+ const BYTE *const istart, BYTE *const ostart, int decompressedSize,
+
+ size_t prefixSize,
+ const BYTE *const dictStart, /* only if dict==usingExtDict */
+ const size_t dictSize /* note: =0 if dictStart==NULL */
+)
+{
+ const BYTE *ip = istart;
+ BYTE *op = (BYTE *)ostart;
+ BYTE *const oend = ostart + decompressedSize;
+ const BYTE *const prefixStart = ostart - prefixSize;
+
+ DEBUGLOG(5, "LZ4_decompress_unsafe_generic");
+ if (dictStart == NULL)
+ assert(dictSize == 0);
+
+ while (1) {
+ /* start new sequence */
+ unsigned token = *ip++;
+
+ /* literals */
+ {
+ size_t ll = token >> ML_BITS;
+ if (ll == 15) {
+ /* long literal length */
+ ll += read_long_length_no_check(&ip);
+ }
+ if ((size_t)(oend - op) < ll)
+ return -1; /* output buffer overflow */
+ LZ4_memmove(op, ip,
+ ll); /* support in-place decompression */
+ op += ll;
+ ip += ll;
+ if ((size_t)(oend - op) < MFLIMIT) {
+ if (op == oend)
+ break; /* end of block */
+ DEBUGLOG(
+ 5,
+ "invalid: literals end at distance %zi from end of block",
+ oend - op);
+ /* incorrect end of block :
+ * last match must start at least MFLIMIT==12 bytes before end of output block */
+ return -1;
+ }
+ }
+
+ /* match */
+ {
+ size_t ml = token & 15;
+ size_t const offset = LZ4_readLE16(ip);
+ ip += 2;
+
+ if (ml == 15) {
+ /* long literal length */
+ ml += read_long_length_no_check(&ip);
+ }
+ ml += MINMATCH;
+
+ if ((size_t)(oend - op) < ml)
+ return -1; /* output buffer overflow */
+
+ {
+ const BYTE *match = op - offset;
+
+ /* out of range */
+ if (offset >
+ (size_t)(op - prefixStart) + dictSize) {
+ DEBUGLOG(6, "offset out of range");
+ return -1;
+ }
+
+ /* check special case : extDict */
+ if (offset > (size_t)(op - prefixStart)) {
+ /* extDict scenario */
+ const BYTE *const dictEnd =
+ dictStart + dictSize;
+ const BYTE *extMatch =
+ dictEnd -
+ (offset -
+ (size_t)(op - prefixStart));
+ size_t const extml =
+ (size_t)(dictEnd - extMatch);
+ if (extml > ml) {
+ /* match entirely within extDict */
+ LZ4_memmove(op, extMatch, ml);
+ op += ml;
+ ml = 0;
+ } else {
+ /* match split between extDict & prefix */
+ LZ4_memmove(op, extMatch,
+ extml);
+ op += extml;
+ ml -= extml;
+ }
+ match = prefixStart;
+ }
+
+ /* match copy - slow variant, supporting overlap copy */
+ {
+ size_t u;
+ for (u = 0; u < ml; u++) {
+ op[u] = match[u];
+ }
+ }
+ }
+ op += ml;
+ if ((size_t)(oend - op) < LASTLITERALS) {
+ DEBUGLOG(
+ 5,
+ "invalid: match ends at distance %zi from end of block",
+ oend - op);
+ /* incorrect end of block :
+ * last match must stop at least LASTLITERALS==5 bytes before end of output block */
+ return -1;
+ }
+ } /* match */
+ } /* main loop */
+ return (int)(ip - istart);
+}
+
+/* Read the variable-length literal or match length.
+ *
+ * @ip : input pointer
+ * @ilimit : position after which if length is not decoded, the input is necessarily corrupted.
+ * @initial_check - check ip >= ipmax before start of loop. Returns initial_error if so.
+ * @error (output) - error code. Must be set to 0 before call.
+**/
+typedef size_t Rvl_t;
+static const Rvl_t rvl_error = (Rvl_t)(-1);
+LZ4_FORCE_INLINE Rvl_t read_variable_length(const BYTE **ip, const BYTE *ilimit,
+ int initial_check)
+{
+ Rvl_t s, length = 0;
+ assert(ip != NULL);
+ assert(*ip != NULL);
+ assert(ilimit != NULL);
+ if (initial_check &&
+ unlikely((*ip) >= ilimit)) { /* read limit reached */
+ return rvl_error;
+ }
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (unlikely((*ip) > ilimit)) { /* read limit reached */
+ return rvl_error;
+ }
+ /* accumulator overflow detection (32-bit mode only) */
+ if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1) / 2))) {
+ return rvl_error;
+ }
+ if (likely(s != 255))
+ return length;
+ do {
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (unlikely((*ip) > ilimit)) { /* read limit reached */
+ return rvl_error;
+ }
+ /* accumulator overflow detection (32-bit mode only) */
+ if ((sizeof(length) < 8) &&
+ unlikely(length > ((Rvl_t)(-1) / 2))) {
+ return rvl_error;
+ }
+ } while (s == 255);
+
+ return length;
+}
+
+/*! LZ4_decompress_generic() :
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
+ * in order to remove useless branches during compilation optimization.
+ */
+LZ4_FORCE_INLINE int __LZ4_decompress_generic(
+ const char *const src, char *const dst, const BYTE *ip, BYTE *op,
+ int srcSize,
+ int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
+
+ earlyEnd_directive partialDecoding, /* full, partial */
+ dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
+ const BYTE *const lowPrefix, /* always <= dst, == dst when no prefix */
+ const BYTE *const dictStart, /* only if dict==usingExtDict */
+ const size_t dictSize /* note : = 0 if noDict */
+)
+{
+ if ((src == NULL) || (outputSize < 0)) {
+ return -1;
+ }
+
+ {
+ const BYTE *const iend = src + srcSize;
+
+ BYTE *const oend = dst + outputSize;
+ BYTE *cpy;
+
+ const BYTE *const dictEnd =
+ (dictStart == NULL) ? NULL : dictStart + dictSize;
+
+ const int checkOffset = (dictSize < (int)(64 KB));
+
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE *const shortiend =
+ iend - 14 /*maxLL*/ - 2 /*offset*/;
+ const BYTE *const shortoend =
+ oend - 14 /*maxLL*/ - 18 /*maxML*/;
+
+ const BYTE *match;
+ size_t offset;
+ unsigned token;
+ size_t length;
+
+ DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)",
+ srcSize, outputSize);
+
+ /* Special cases */
+ assert(lowPrefix <= op);
+ if (unlikely(outputSize == 0)) {
+ /* Empty output buffer */
+ if (partialDecoding)
+ return 0;
+ return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
+ }
+ if (unlikely(srcSize == 0)) {
+ return -1;
+ }
+
+ /* LZ4_FAST_DEC_LOOP:
+ * designed for modern OoO performance cpus,
+ * where copying reliably 32-bytes is preferable to an unpredictable branch.
+ * note : fast loop may show a regression for some client arm chips. */
+#if LZ4_FAST_DEC_LOOP
+ if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
+ DEBUGLOG(6, "move to safe decode loop");
+ goto safe_decode;
+ }
+
+ /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
+ DEBUGLOG(6, "using fast decode loop");
+ while (1) {
+ /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
+ assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
+ assert(ip < iend);
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+ DEBUGLOG(7, "blockPos%6u: litLength token = %u",
+ (unsigned)(op - (BYTE *)dst),
+ (unsigned)length);
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ size_t const addl = read_variable_length(
+ &ip, iend - RUN_MASK, 1);
+ if (addl == rvl_error) {
+ DEBUGLOG(
+ 6,
+ "error reading long literal length");
+ goto _output_error;
+ }
+ length += addl;
+ if (unlikely((uptrval)(op) + length <
+ (uptrval)(op))) {
+ goto _output_error;
+ } /* overflow detection */
+ if (unlikely((uptrval)(ip) + length <
+ (uptrval)(ip))) {
+ goto _output_error;
+ } /* overflow detection */
+
+ /* copy literals */
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if ((op + length > oend - 32) ||
+ (ip + length > iend - 32)) {
+ goto safe_literal_copy;
+ }
+ LZ4_wildCopy32(op, ip, op + length);
+ ip += length;
+ op += length;
+ } else if (ip <=
+ iend - (16 +
+ 1 /*max lit + offset + nextToken*/)) {
+ /* We don't need to check oend, since we check it once for each loop below */
+ DEBUGLOG(7,
+ "copy %u bytes in a 16-bytes stripe",
+ (unsigned)length);
+ /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
+ LZ4_memcpy(op, ip, 16);
+ ip += length;
+ op += length;
+ } else {
+ goto safe_literal_copy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ DEBUGLOG(6, "blockPos%6u: offset = %u",
+ (unsigned)(op - (BYTE *)dst),
+ (unsigned)offset);
+ match = op - offset;
+ assert(match <= op); /* overflow check */
+
+ /* get matchlength */
+ length = token & ML_MASK;
+ DEBUGLOG(7, " match length token = %u (len==%u)",
+ (unsigned)length, (unsigned)length + MINMATCH);
+
+ if (length == ML_MASK) {
+ size_t const addl = read_variable_length(
+ &ip, iend - LASTLITERALS + 1, 0);
+ if (addl == rvl_error) {
+ DEBUGLOG(
+ 5,
+ "error reading long match length");
+ goto _output_error;
+ }
+ length += addl;
+ length += MINMATCH;
+ DEBUGLOG(7, " long match length == %u",
+ (unsigned)length);
+ if (unlikely((uptrval)(op) + length <
+ (uptrval)op)) {
+ goto _output_error;
+ } /* overflow detection */
+ if (op + length >=
+ oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+ } else {
+ length += MINMATCH;
+ if (op + length >=
+ oend - FASTLOOP_SAFE_DISTANCE) {
+ DEBUGLOG(
+ 7,
+ "moving to safe_match_copy (ml==%u)",
+ (unsigned)length);
+ goto safe_match_copy;
+ }
+
+ /* Fastpath check: skip LZ4_wildCopy32 when true */
+ if ((dict == withPrefix64k) ||
+ (match >= lowPrefix)) {
+ if (offset >= 8) {
+ assert(match >= lowPrefix);
+ assert(match <= op);
+ assert(op + 18 <= oend);
+
+ LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op + 8, match + 8,
+ 8);
+ LZ4_memcpy(op + 16, match + 16,
+ 2);
+ op += length;
+ continue;
+ }
+ }
+ }
+
+ if (checkOffset &&
+ (unlikely(match + dictSize < lowPrefix))) {
+ DEBUGLOG(
+ 5,
+ "Error : pos=%zi, offset=%zi => outside buffers",
+ op - lowPrefix, op - match);
+ goto _output_error;
+ }
+ /* match starting within external dictionary */
+ if ((dict == usingExtDict) && (match < lowPrefix)) {
+ assert(dictEnd != NULL);
+ if (unlikely(op + length >
+ oend - LASTLITERALS)) {
+ if (partialDecoding) {
+ DEBUGLOG(
+ 7,
+ "partialDecoding: dictionary match, close to dstEnd");
+ length = MIN(
+ length,
+ (size_t)(oend - op));
+ } else {
+ DEBUGLOG(
+ 6,
+ "end-of-block condition violated")
+ goto _output_error;
+ }
+ }
+
+ if (length <= (size_t)(lowPrefix - match)) {
+ /* match fits entirely within external dictionary : just copy */
+ LZ4_memmove(op,
+ dictEnd -
+ (lowPrefix - match),
+ length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize =
+ (size_t)(lowPrefix - match);
+ size_t const restSize =
+ length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize,
+ copySize);
+ op += copySize;
+ if (restSize >
+ (size_t)(op -
+ lowPrefix)) { /* overlap copy */
+ BYTE *const endOfMatch =
+ op + restSize;
+ const BYTE *copyFrom =
+ lowPrefix;
+ while (op < endOfMatch) {
+ *op++ = *copyFrom++;
+ }
+ } else {
+ LZ4_memcpy(op, lowPrefix,
+ restSize);
+ op += restSize;
+ }
+ }
+ continue;
+ }
+
+ /* copy match within block */
+ cpy = op + length;
+
+ assert((op <= oend) && (oend - op >= 32));
+ if (unlikely(offset < 16)) {
+ LZ4_memcpy_using_offset(op, match, cpy, offset);
+ } else {
+ LZ4_wildCopy32(op, match, cpy);
+ }
+
+ op = cpy; /* wildcopy correction */
+ }
+ safe_decode:
+#endif
+
+ /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
+ DEBUGLOG(6, "using safe decode loop");
+ while (1) {
+ assert(ip < iend);
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+ DEBUGLOG(7, "blockPos%6u: litLength token = %u",
+ (unsigned)(op - (BYTE *)dst),
+ (unsigned)length);
+
+ /* A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough space,
+ * enter the shortcut and copy 16 bytes on behalf of the literals
+ * (in the fast mode, only 8 bytes can be safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ * manner; but we ensure that there's enough space in the output for
+ * those 18 bytes earlier, upon entering the shortcut (in other words,
+ * there is a combined check for both stages).
+ */
+ if ((length != RUN_MASK)
+ /* strictly "less than" on input, to re-enter the loop with at least one byte */
+ && likely((ip < shortiend) & (op <= shortoend))) {
+ /* Copy the literals */
+ LZ4_memcpy(op, ip, 16);
+ op += length;
+ ip += length;
+
+ /* The second stage: prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted. */
+ length = token & ML_MASK; /* match length */
+ DEBUGLOG(
+ 7,
+ "blockPos%6u: matchLength token = %u (len=%u)",
+ (unsigned)(op - (BYTE *)dst),
+ (unsigned)length, (unsigned)length + 4);
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ((length != ML_MASK) && (offset >= 8) &&
+ (dict == withPrefix64k ||
+ match >= lowPrefix)) {
+ /* Copy the match. */
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op + 16, match + 16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /* The second stage didn't work out, but the info is ready.
+ * Propel it right to the point of match copying. */
+ goto _copy_match;
+ }
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ size_t const addl = read_variable_length(
+ &ip, iend - RUN_MASK, 1);
+ if (addl == rvl_error) {
+ goto _output_error;
+ }
+ length += addl;
+ if (unlikely((uptrval)(op) + length <
+ (uptrval)(op))) {
+ goto _output_error;
+ } /* overflow detection */
+ if (unlikely((uptrval)(ip) + length <
+ (uptrval)(ip))) {
+ goto _output_error;
+ } /* overflow detection */
+ }
+
+#if LZ4_FAST_DEC_LOOP
+ safe_literal_copy:
+#endif
+ /* copy literals */
+ cpy = op + length;
+
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if ((cpy > oend - MFLIMIT) ||
+ (ip + length > iend - (2 + 1 + LASTLITERALS))) {
+ /* We've either hit the input parsing restriction or the output parsing restriction.
+ * In the normal scenario, decoding a full block, it must be the last sequence,
+ * otherwise it's an error (invalid input or dimensions).
+ * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
+ */
+ if (partialDecoding) {
+ /* Since we are partial decoding we may be in this block because of the output parsing
+ * restriction, which is not valid since the output buffer is allowed to be undersized.
+ */
+ DEBUGLOG(
+ 7,
+ "partialDecoding: copying literals, close to input or output end")
+ DEBUGLOG(
+ 7,
+ "partialDecoding: literal length = %u",
+ (unsigned)length);
+ DEBUGLOG(
+ 7,
+ "partialDecoding: remaining space in dstBuffer : %i",
+ (int)(oend - op));
+ DEBUGLOG(
+ 7,
+ "partialDecoding: remaining space in srcBuffer : %i",
+ (int)(iend - ip));
+ /* Finishing in the middle of a literals segment,
+ * due to lack of input.
+ */
+ if (ip + length > iend) {
+ length = (size_t)(iend - ip);
+ cpy = op + length;
+ }
+ /* Finishing in the middle of a literals segment,
+ * due to lack of output space.
+ */
+ if (cpy > oend) {
+ cpy = oend;
+ assert(op <= oend);
+ length = (size_t)(oend - op);
+ }
+ } else {
+ /* We must be on the last sequence (or invalid) because of the parsing limitations
+ * so check that we exactly consume the input and don't overrun the output buffer.
+ */
+ if ((ip + length != iend) ||
+ (cpy > oend)) {
+ DEBUGLOG(
+ 5,
+ "should have been last run of literals")
+ DEBUGLOG(
+ 5,
+ "ip(%p) + length(%i) = %p != iend (%p)",
+ ip, (int)length,
+ ip + length, iend);
+ DEBUGLOG(
+ 5,
+ "or cpy(%p) > (oend-MFLIMIT)(%p)",
+ cpy, oend - MFLIMIT);
+ DEBUGLOG(
+ 5,
+ "after writing %u bytes / %i bytes available",
+ (unsigned)(op -
+ (BYTE *)dst),
+ outputSize);
+ goto _output_error;
+ }
+ }
+ LZ4_memmove(
+ op, ip,
+ length); /* supports overlapping memory regions, for in-place decompression scenarios */
+ ip += length;
+ op += length;
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
+ */
+ if (!partialDecoding || (cpy == oend) ||
+ (ip >= (iend - 2))) {
+ break;
+ }
+ } else {
+ LZ4_wildCopy8(
+ op, ip,
+ cpy); /* can overwrite up to 8 bytes beyond cpy */
+ ip += length;
+ op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+
+ /* get matchlength */
+ length = token & ML_MASK;
+ DEBUGLOG(7, "blockPos%6u: matchLength token = %u",
+ (unsigned)(op - (BYTE *)dst),
+ (unsigned)length);
+
+ _copy_match:
+ if (length == ML_MASK) {
+ size_t const addl = read_variable_length(
+ &ip, iend - LASTLITERALS + 1, 0);
+ if (addl == rvl_error) {
+ goto _output_error;
+ }
+ length += addl;
+ if (unlikely((uptrval)(op) + length <
+ (uptrval)op))
+ goto _output_error; /* overflow detection */
+ }
+ length += MINMATCH;
+
+#if LZ4_FAST_DEC_LOOP
+ safe_match_copy:
+#endif
+ if ((checkOffset) &&
+ (unlikely(match + dictSize < lowPrefix)))
+ goto _output_error; /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict == usingExtDict) && (match < lowPrefix)) {
+ assert(dictEnd != NULL);
+ if (unlikely(op + length >
+ oend - LASTLITERALS)) {
+ if (partialDecoding)
+ length = MIN(
+ length,
+ (size_t)(oend - op));
+ else
+ goto _output_error; /* doesn't respect parsing restriction */
+ }
+
+ if (length <= (size_t)(lowPrefix - match)) {
+ /* match fits entirely within external dictionary : just copy */
+ LZ4_memmove(op,
+ dictEnd -
+ (lowPrefix - match),
+ length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize =
+ (size_t)(lowPrefix - match);
+ size_t const restSize =
+ length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize,
+ copySize);
+ op += copySize;
+ if (restSize >
+ (size_t)(op -
+ lowPrefix)) { /* overlap copy */
+ BYTE *const endOfMatch =
+ op + restSize;
+ const BYTE *copyFrom =
+ lowPrefix;
+ while (op < endOfMatch)
+ *op++ = *copyFrom++;
+ } else {
+ LZ4_memcpy(op, lowPrefix,
+ restSize);
+ op += restSize;
+ }
+ }
+ continue;
+ }
+ assert(match >= lowPrefix);
+
+ /* copy match within block */
+ cpy = op + length;
+
+ /* partialDecoding : may end anywhere within the block */
+ assert(op <= oend);
+ if (partialDecoding &&
+ (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen =
+ MIN(length, (size_t)(oend - op));
+ const BYTE *const matchEnd = match + mlen;
+ BYTE *const copyEnd = op + mlen;
+ if (matchEnd > op) { /* overlap copy */
+ while (op < copyEnd) {
+ *op++ = *match++;
+ }
+ } else {
+ LZ4_memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend) {
+ break;
+ }
+ continue;
+ }
+
+ if (unlikely(offset < 8)) {
+ LZ4_write32(
+ op,
+ 0); /* silence msan warning when offset==0 */
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += inc32table[offset];
+ LZ4_memcpy(op + 4, match, 4);
+ match -= dec64table[offset];
+ } else {
+ LZ4_memcpy(op, match, 8);
+ match += 8;
+ }
+ op += 8;
+
+ if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ BYTE *const oCopyLimit =
+ oend - (WILDCOPYLENGTH - 1);
+ if (cpy > oend - LASTLITERALS) {
+ goto _output_error;
+ } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
+ if (op < oCopyLimit) {
+ LZ4_wildCopy8(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+ while (op < cpy) {
+ *op++ = *match++;
+ }
+ } else {
+ LZ4_memcpy(op, match, 8);
+ if (length > 16) {
+ LZ4_wildCopy8(op + 8, match + 8, cpy);
+ }
+ }
+ op = cpy; /* wildcopy correction */
+ }
+
+ /* end of decoding */
+ DEBUGLOG(5, "decoded %i bytes", (int)(((char *)op) - dst));
+ return (int)(((char *)op) -
+ dst); /* Nb of output bytes decoded */
+
+ /* Overflow error detected */
+ _output_error:
+ return (int)(-(((const char *)ip) - src)) - 1;
+ }
+}
+
+/*===== Instantiate the API decoding functions. =====*/
+
+LZ4_FORCE_INLINE int
+LZ4_decompress_generic(const char *const src, char *const dst, int srcSize,
+ /*
+ * If endOnInput == endOnInputSize,
+ * this value is `dstCapacity`
+ */
+ int outputSize,
+ /* full, partial */
+ earlyEnd_directive partialDecoding,
+ /* noDict, withPrefix64k, usingExtDict */
+ dict_directive dict,
+ /* always <= dst, == dst when no prefix */
+ const BYTE *const lowPrefix,
+ /* only if dict == usingExtDict */
+ const BYTE *const dictStart,
+ /* note : = 0 if noDict */
+ const size_t dictSize)
+{
+ return __LZ4_decompress_generic(src, dst, (const BYTE *)src,
+ (BYTE *)dst, srcSize, outputSize,
+ partialDecoding, dict, lowPrefix,
+ dictStart, dictSize);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
+ int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxDecompressedSize, decode_full_block,
+ noDict, (BYTE *)dest, NULL, 0);
+}
+EXPORT_SYMBOL(LZ4_decompress_safe);
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_partial(const char *src, char *dst, int compressedSize,
+ int targetOutputSize, int dstCapacity)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ partial_decode, noDict, (BYTE *)dst, NULL,
+ 0);
+}
+EXPORT_SYMBOL(LZ4_decompress_safe_partial);
+
+LZ4_FORCE_O2
+int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
+{
+ DEBUGLOG(5, "LZ4_decompress_fast");
+ return LZ4_decompress_unsafe_generic((const BYTE *)source, (BYTE *)dest,
+ originalSize, 0, NULL, 0);
+}
+EXPORT_SYMBOL(LZ4_decompress_fast);
+
+/*===== Instantiate a few more decoding cases, used more than once. =====*/
+
+LZ4_FORCE_O2 /* Exported, an obsolete API function. */
+ int
+ LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+ int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, decode_full_block,
+ withPrefix64k, (BYTE *)dest - 64 KB, NULL,
+ 0);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_partial_withPrefix64k(const char *source,
+ char *dest,
+ int compressedSize,
+ int targetOutputSize,
+ int dstCapacity)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
+ partial_decode, withPrefix64k,
+ (BYTE *)dest - 64 KB, NULL, 0);
+}
+
+/* Another obsolete API function, paired with the previous one. */
+int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest,
+ int originalSize)
+{
+ return LZ4_decompress_unsafe_generic((const BYTE *)source, (BYTE *)dest,
+ originalSize, 64 KB, NULL, 0);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
+ int compressedSize,
+ int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, decode_full_block, noDict,
+ (BYTE *)dest - prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_partial_withSmallPrefix(
+ const char *source, char *dest, int compressedSize,
+ int targetOutputSize, int dstCapacity, size_t prefixSize)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
+ partial_decode, noDict,
+ (BYTE *)dest - prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const void *dictStart, size_t dictSize)
+{
+ DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict");
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_partial_forceExtDict(const char *source, char *dest,
+ int compressedSize,
+ int targetOutputSize,
+ int dstCapacity,
+ const void *dictStart,
+ size_t dictSize)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
+ partial_decode, usingExtDict,
+ (BYTE *)dest, (const BYTE *)dictStart,
+ dictSize);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_fast_extDict(const char *source, char *dest,
+ int originalSize, const void *dictStart,
+ size_t dictSize)
+{
+ return LZ4_decompress_unsafe_generic((const BYTE *)source, (BYTE *)dest,
+ originalSize, 0,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+LZ4_FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ size_t prefixSize, const void *dictStart,
+ size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/*===== streaming decompression functions =====*/
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_streamDecode_t *LZ4_createStreamDecode(void)
+{
+ LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >=
+ sizeof(LZ4_streamDecode_t_internal));
+ return (LZ4_streamDecode_t *)ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
+}
+
+int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream)
+{
+ if (LZ4_stream == NULL) {
+ return 0;
+ } /* support free on NULL */
+ FREEMEM(LZ4_stream);
+ return 0;
+}
+#endif
+
+/*! LZ4_setStreamDecode() :
+ * Use this function to instruct where to find the dictionary.
+ * This function is not necessary if previous data is still available where it was decoded.
+ * Loading a size of 0 is allowed (same effect as no dictionary).
+ * @return : 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
+ lz4sd->prefixSize = (size_t)dictSize;
+ if (dictSize) {
+ assert(dictionary != NULL);
+ lz4sd->prefixEnd = (const BYTE *)dictionary + dictSize;
+ } else {
+ lz4sd->prefixEnd = (const BYTE *)dictionary;
+ }
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
+EXPORT_SYMBOL(LZ4_setStreamDecode);
+
+/*! LZ4_decoderRingBufferSize() :
+ * when setting a ring buffer for streaming decompression (optional scenario),
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * Note : in a ring buffer scenario,
+ * blocks are presumed decompressed next to each other.
+ * When not enough space remains for next block (remainingSize < maxBlockSize),
+ * decoding resumes from beginning of ring buffer.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+int LZ4_decoderRingBufferSize(int maxBlockSize)
+{
+ if (maxBlockSize < 0)
+ return 0;
+ if (maxBlockSize > LZ4_MAX_INPUT_SIZE)
+ return 0;
+ if (maxBlockSize < 16)
+ maxBlockSize = 16;
+ return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
+}
+
+/*
+*_continue() :
+ These decoding functions allow decompression of multiple blocks in "streaming" mode.
+ Previously decoded blocks must still be available at the memory position where they were decoded.
+ If it's not possible, save the relevant part of decoded data into a safe buffer,
+ and indicate where it stands using LZ4_setStreamDecode()
+*/
+LZ4_FORCE_O2
+int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest,
+ int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+#if defined(CONFIG_ARM64) && defined(CONFIG_KERNEL_MODE_NEON)
+ result = LZ4_arm64_decompress_safe(source, dest, compressedSize,
+ maxOutputSize, false);
+#else
+ result = LZ4_decompress_safe(source, dest, compressedSize,
+ maxOutputSize);
+#endif
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(
+ source, dest, compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(
+ source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(
+ source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize, lz4sd->externalDict,
+ lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize += (size_t)result;
+ lz4sd->prefixEnd += result;
+ } else {
+ /* The buffer wraps around, or they're switching to another buffer. */
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_safe_forceExtDict(
+ source, dest, compressedSize, maxOutputSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(LZ4_decompress_safe_continue);
+
+LZ4_FORCE_O2 ssize_t LZ4_arm64_decompress_safe_partial(const void *source,
+ void *dest,
+ size_t inputSize,
+ size_t outputSize,
+ bool dip)
+{
+ uint8_t *dstPtr = dest;
+ const uint8_t *srcPtr = source;
+ ssize_t ret;
+
+#ifdef __ARCH_HAS_LZ4_ACCELERATOR
+ /* Go fast if we can, keeping away from the end of buffers */
+ if (outputSize > LZ4_FAST_MARGIN && inputSize > LZ4_FAST_MARGIN &&
+ lz4_decompress_accel_enable()) {
+ ret = lz4_decompress_asm(
+ &dstPtr, dest, dest + outputSize - LZ4_FAST_MARGIN,
+ &srcPtr, source + inputSize - LZ4_FAST_MARGIN, dip);
+ if (ret)
+ return -EIO;
+ }
+#endif
+ /* Finish in safe */
+ return __LZ4_decompress_generic(source, dest, srcPtr, dstPtr, inputSize,
+ outputSize, partial_decode, noDict,
+ (BYTE *)dest, NULL, 0);
+}
+EXPORT_SYMBOL(LZ4_arm64_decompress_safe_partial);
+
+LZ4_FORCE_O2 ssize_t LZ4_arm64_decompress_safe(const void *source, void *dest,
+ size_t inputSize,
+ size_t outputSize, bool dip)
+{
+ uint8_t *dstPtr = dest;
+ const uint8_t *srcPtr = source;
+ ssize_t ret;
+
+#ifdef __ARCH_HAS_LZ4_ACCELERATOR
+ /* Go fast if we can, keeping away from the end of buffers */
+ if (outputSize > LZ4_FAST_MARGIN && inputSize > LZ4_FAST_MARGIN &&
+ lz4_decompress_accel_enable()) {
+ ret = lz4_decompress_asm(
+ &dstPtr, dest, dest + outputSize - LZ4_FAST_MARGIN,
+ &srcPtr, source + inputSize - LZ4_FAST_MARGIN, dip);
+ if (ret)
+ return -EIO;
+ }
+#endif
+ /* Finish in safe */
+ return __LZ4_decompress_generic(source, dest, srcPtr, dstPtr, inputSize,
+ outputSize, decode_full_block, noDict,
+ (BYTE *)dest, NULL, 0);
+}
+EXPORT_SYMBOL(LZ4_arm64_decompress_safe);
+
+LZ4_FORCE_O2 int
+LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal *const lz4sd =
+ (assert(LZ4_streamDecode != NULL),
+ &LZ4_streamDecode->internal_donotuse);
+ int result;
+
+ DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)",
+ originalSize);
+ assert(originalSize >= 0);
+
+ if (lz4sd->prefixSize == 0) {
+ DEBUGLOG(5, "first invocation : no prefix nor extDict");
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = (size_t)originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ DEBUGLOG(5, "continue using existing prefix");
+ result = LZ4_decompress_unsafe_generic(
+ (const BYTE *)source, (BYTE *)dest, originalSize,
+ lz4sd->prefixSize, lz4sd->externalDict,
+ lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize += (size_t)originalSize;
+ lz4sd->prefixEnd += originalSize;
+ } else {
+ DEBUGLOG(5, "prefix becomes extDict");
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_fast_extDict(source, dest, originalSize,
+ lz4sd->externalDict,
+ lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = (size_t)originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(LZ4_decompress_fast_continue);
+
+/*
+Advanced decoding functions :
+*_usingDict() :
+ These decoding functions work the same as "_continue" ones,
+ the dictionary must be explicitly provided within parameters
+*/
+
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const char *dictStart, int dictSize)
+{
+ if (dictSize == 0)
+ return LZ4_decompress_safe(source, dest, compressedSize,
+ maxOutputSize);
+ if (dictStart + dictSize == dest) {
+ if (dictSize >= 64 KB - 1) {
+ return LZ4_decompress_safe_withPrefix64k(
+ source, dest, compressedSize, maxOutputSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest,
+ compressedSize,
+ maxOutputSize,
+ (size_t)dictSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize,
+ maxOutputSize, dictStart,
+ (size_t)dictSize);
+}
+EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
+
+int LZ4_decompress_safe_partial_usingDict(const char *source, char *dest,
+ int compressedSize,
+ int targetOutputSize, int dstCapacity,
+ const char *dictStart, int dictSize)
+{
+ if (dictSize == 0)
+ return LZ4_decompress_safe_partial(source, dest, compressedSize,
+ targetOutputSize,
+ dstCapacity);
+ if (dictStart + dictSize == dest) {
+ if (dictSize >= 64 KB - 1) {
+ return LZ4_decompress_safe_partial_withPrefix64k(
+ source, dest, compressedSize, targetOutputSize,
+ dstCapacity);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_partial_withSmallPrefix(
+ source, dest, compressedSize, targetOutputSize,
+ dstCapacity, (size_t)dictSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_partial_forceExtDict(
+ source, dest, compressedSize, targetOutputSize, dstCapacity,
+ dictStart, (size_t)dictSize);
+}
+
+int LZ4_decompress_fast_usingDict(const char *source, char *dest,
+ int originalSize, const char *dictStart,
+ int dictSize)
+{
+ if (dictSize == 0 || dictStart + dictSize == dest)
+ return LZ4_decompress_unsafe_generic((const BYTE *)source,
+ (BYTE *)dest, originalSize,
+ (size_t)dictSize, NULL, 0);
+ assert(dictSize >= 0);
+ return LZ4_decompress_fast_extDict(source, dest, originalSize,
+ dictStart, (size_t)dictSize);
+}
+EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
+
+/*
+These decompression functions are deprecated and should no longer be used.
+They are only provided here for compatibility with older user programs.
+- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
+- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
+*/
+int LZ4_uncompress(const char *source, char *dest, int outputSize)
+{
+ return LZ4_decompress_fast(source, dest, outputSize);
+}
+int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
+ int maxOutputSize)
+{
+ return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
+}
+
+/* Obsolete Streaming functions */
+
+int LZ4_sizeofStreamState(void)
+{
+ return sizeof(LZ4_stream_t);
+}
+
+int LZ4_resetStreamState(void *state, char *inputBuffer)
+{
+ (void)inputBuffer;
+ LZ4_resetStream((LZ4_stream_t *)state);
+ return 0;
+}
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+void *LZ4_create(char *inputBuffer)
+{
+ (void)inputBuffer;
+ return LZ4_createStream();
+}
+#endif
+
+char *LZ4_slideInputBuffer(void *state)
+{
+ /* avoid const char * -> char * conversion warning */
+ return (char *)(uptrval)((LZ4_stream_t *)state)
+ ->internal_donotuse.dictionary;
+}
+
+#endif /* LZ4_COMMONDEFS_ONLY */
Index: lib/lz4/lz4.h
===================================================================
diff --git a/lib/lz4/lz4.h b/lib/lz4/lz4.h
new file mode 100644
--- /dev/null (revision b2497e4243461a835c25469028cd355bfc2e993f)
+++ b/lib/lz4/lz4.h (revision b2497e4243461a835c25469028cd355bfc2e993f)
@@ -0,0 +1,984 @@
+/*
+ * LZ4 - Fast LZ compression algorithm
+ * Header File
+ * Copyright (C) 2011-2023, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
+*/
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef LZ4_H_2983827168210
+#define LZ4_H_2983827168210
+
+/**
+ Introduction
+
+ LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
+ scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
+ multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
+
+ The LZ4 compression library provides in-memory compression and decompression functions.
+ It gives full buffer control to user.
+ Compression can be done in:
+ - a single step (described as Simple Functions)
+ - a single step, reusing a context (described in Advanced Functions)
+ - unbounded multiple steps (described as Streaming compression)
+
+ lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
+ Decompressing such a compressed block requires additional metadata.
+ Exact metadata depends on exact decompression function.
+ For the typical case of LZ4_decompress_safe(),
+ metadata includes block's compressed size, and maximum bound of decompressed size.
+ Each application is free to encode and pass such metadata in whichever way it wants.
+
+ lz4.h only handle blocks, it can not generate Frames.
+
+ Blocks are different from Frames (doc/lz4_Frame_format.md).
+ Frames bundle both blocks and metadata in a specified manner.
+ Embedding metadata is required for compressed data to be self-contained and portable.
+ Frame format is delivered through a companion API, declared in lz4frame.h.
+ The `lz4` CLI can only manage frames.
+*/
+
+#include
+#include
+#include
+
+#include "lz4armv8/lz4accel.h"
+
+#define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
+
+/*^***************************************************************
+* Export parameters
+*****************************************************************/
+/*
+* LZ4_DLL_EXPORT :
+* Enable exporting of functions when building a Windows DLL
+* LZ4LIB_VISIBILITY :
+* Control library symbols visibility.
+*/
+#ifndef LZ4LIB_VISIBILITY
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define LZ4LIB_VISIBILITY __attribute__((visibility("default")))
+#else
+#define LZ4LIB_VISIBILITY
+#endif
+#endif
+#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT == 1)
+#define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY
+#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT == 1)
+#define LZ4LIB_API \
+ __declspec(dllimport) \
+ LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#define LZ4LIB_API LZ4LIB_VISIBILITY
+#endif
+
+/*-************************************
+* Reading and writing into memory
+**************************************/
+
+/**
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so it can't apply its specialized memcpy() inlining
+ * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
+ * memcpy() as if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#define LZ4_memset(dst, src, size) __builtin_memset(dst, src, size)
+#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
+
+/*! LZ4_FREESTANDING :
+ * When this macro is set to 1, it enables "freestanding mode" that is
+ * suitable for typical freestanding environment which doesn't support
+ * standard C library.
+ *
+ * - LZ4_FREESTANDING is a compile-time switch.
+ * - It requires the following macros to be defined:
+ * LZ4_memcpy, LZ4_memmove, LZ4_memset.
+ * - It only enables LZ4/HC functions which don't use heap.
+ * All LZ4F_* functions are not supported.
+ * - See tests/freestanding.c to check its basic setup.
+ */
+#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1)
+#define LZ4_HEAPMODE 1
+#define LZ4HC_HEAPMODE 1
+#define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1
+#if !defined(LZ4_memcpy)
+#error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'."
+#endif
+#if !defined(LZ4_memset)
+#error "LZ4_FREESTANDING requires macro 'LZ4_memset'."
+#endif
+#if !defined(LZ4_memmove)
+#error "LZ4_FREESTANDING requires macro 'LZ4_memmove'."
+#endif
+#elif !defined(LZ4_FREESTANDING)
+#define LZ4_FREESTANDING 0
+#endif
+
+/*------ Version ------*/
+#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
+#define LZ4_VERSION_MINOR 10 /* for new (non-breaking) interface capabilities */
+#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */
+
+#define LZ4_VERSION_NUMBER \
+ (LZ4_VERSION_MAJOR * 100 * 100 + LZ4_VERSION_MINOR * 100 + \
+ LZ4_VERSION_RELEASE)
+
+#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
+#define LZ4_QUOTE(str) #str
+#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
+#define LZ4_VERSION_STRING \
+ LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) /* requires v1.7.3+ */
+
+LZ4LIB_API int LZ4_versionNumber(
+ void); /**< library version number; useful to check dll version; requires v1.3.0+ */
+LZ4LIB_API const char *LZ4_versionString(
+ void); /**< library version string; useful to check dll version; requires v1.7.5+ */
+
+/*-************************************
+* Tuning memory usage
+**************************************/
+/*!
+ * LZ4_MEMORY_USAGE :
+ * Can be selected at compile time, by setting LZ4_MEMORY_USAGE.
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB)
+ * Increasing memory usage improves compression ratio, generally at the cost of speed.
+ * Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality.
+ * Default value is 14, for 16KB, which nicely fits into most L1 caches.
+ */
+#ifndef LZ4_MEMORY_USAGE
+#define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT
+#endif
+
+/* These are absolute limits, they should not be changed by users */
+#define LZ4_MEMORY_USAGE_MIN 10
+#define LZ4_MEMORY_USAGE_DEFAULT 14
+#define LZ4_MEMORY_USAGE_MAX 20
+
+#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN)
+#error "LZ4_MEMORY_USAGE is too small !"
+#endif
+
+#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX)
+#error "LZ4_MEMORY_USAGE is too large !"
+#endif
+
+/*
+ * LZ4_ACCELERATION_DEFAULT :
+ * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
+ */
+#define LZ4_ACCELERATION_DEFAULT 1
+/*
+ * LZ4_ACCELERATION_MAX :
+ * Any "acceleration" value higher than this threshold
+ * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
+ */
+#define LZ4_ACCELERATION_MAX 65537
+
+/*-************************************
+* Simple Functions
+**************************************/
+/*! LZ4_compress_default() :
+ * Compresses 'srcSize' bytes from buffer 'src'
+ * into already allocated 'dst' buffer of size 'dstCapacity'.
+ * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
+ * It also runs faster, so it's a recommended setting.
+ * If the function cannot compress 'src' into a more limited 'dst' budget,
+ * compression stops *immediately*, and the function result is zero.
+ * In which case, 'dst' content is undefined (invalid).
+ * srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
+ * dstCapacity : size of buffer 'dst' (which must be already allocated)
+ * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
+ * or 0 if compression fails
+ * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
+ */
+LZ4LIB_API int LZ4_compress_default(const char *src, char *dst, int srcSize,
+ int dstCapacity, void *wrkmem);
+
+/*! LZ4_decompress_safe() :
+ * @compressedSize : is the exact complete size of the compressed block.
+ * @dstCapacity : is the size of destination buffer (which must be already allocated),
+ * presumed an upper bound of decompressed size.
+ * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
+ * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
+ * If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ * Note 1 : This function is protected against malicious data packets :
+ * it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
+ * even if the compressed block is maliciously modified to order the decoder to do these actions.
+ * In such case, the decoder stops immediately, and considers the compressed block malformed.
+ * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
+ * The implementation is free to send / store / derive this information in whichever way is most beneficial.
+ * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
+ */
+LZ4LIB_API int LZ4_decompress_safe(const char *src, char *dst,
+ int compressedSize, int dstCapacity);
+
+/*-************************************
+* Advanced Functions
+**************************************/
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) \
+ ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? \
+ 0 : \
+ (isize) + ((isize) / 255) + 16)
+
+/*! LZ4_compressBound() :
+ Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
+ This function is primarily useful for memory allocation purposes (destination buffer size).
+ Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
+ Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize)
+ inputSize : max supported value is LZ4_MAX_INPUT_SIZE
+ return : maximum output size in a "worst case" scenario
+ or 0, if input size is incorrect (too large or negative)
+*/
+LZ4LIB_API int LZ4_compressBound(int inputSize);
+
+/*! LZ4_compress_fast() :
+ Same as LZ4_compress_default(), but allows selection of "acceleration" factor.
+ The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
+ It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
+ An acceleration value of "1" is the same as regular LZ4_compress_default()
+ Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
+ Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
+*/
+LZ4LIB_API int LZ4_compress_fast(const char *src, char *dst, int srcSize,
+ int dstCapacity, int acceleration, void *wrkmem);
+
+/*! LZ4_compress_fast_extState() :
+ * Same as LZ4_compress_fast(), using an externally allocated memory space for its state.
+ * Use LZ4_sizeofState() to know how much memory must be allocated,
+ * and allocate it on 8-bytes boundaries (using `malloc()` typically).
+ * Then, provide this buffer as `void* state` to compression function.
+ */
+LZ4LIB_API int LZ4_sizeofState(void);
+LZ4LIB_API int LZ4_compress_fast_extState(void *state, const char *src,
+ char *dst, int srcSize,
+ int dstCapacity, int acceleration);
+
+/*! LZ4_compress_destSize() :
+ * Reverse the logic : compresses as much data as possible from 'src' buffer
+ * into already allocated buffer 'dst', of size >= 'dstCapacity'.
+ * This function either compresses the entire 'src' content into 'dst' if it's large enough,
+ * or fill 'dst' buffer completely with as much data as possible from 'src'.
+ * note: acceleration parameter is fixed to "default".
+ *
+ * *srcSizePtr : in+out parameter. Initially contains size of input.
+ * Will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
+ * New value is necessarily <= input value.
+ * @return : Nb bytes written into 'dst' (necessarily <= dstCapacity)
+ * or 0 if compression fails.
+ *
+ * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed in v1.9.2+):
+ * the produced compressed content could, in specific circumstances,
+ * require to be decompressed into a destination buffer larger
+ * by at least 1 byte than the content to decompress.
+ * If an application uses `LZ4_compress_destSize()`,
+ * it's highly recommended to update liblz4 to v1.9.2 or better.
+ * If this can't be done or ensured,
+ * the receiving decompression function should provide
+ * a dstCapacity which is > decompressedSize, by at least 1 byte.
+ * See https://github.com/lz4/lz4/issues/859 for details
+ */
+LZ4LIB_API int LZ4_compress_destSize(const char *src, char *dst,
+ int *srcSizePtr, int targetDstSize, void *wrkmem);
+
+/*! LZ4_decompress_safe_partial() :
+ * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
+ * into destination buffer 'dst' of size 'dstCapacity'.
+ * Up to 'targetOutputSize' bytes will be decoded.
+ * The function stops decoding on reaching this objective.
+ * This can be useful to boost performance
+ * whenever only the beginning of a block is required.
+ *
+ * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
+ * If source stream is detected malformed, function returns a negative result.
+ *
+ * Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
+ *
+ * Note 2 : targetOutputSize must be <= dstCapacity
+ *
+ * Note 3 : this function effectively stops decoding on reaching targetOutputSize,
+ * so dstCapacity is kind of redundant.
+ * This is because in older versions of this function,
+ * decoding operation would still write complete sequences.
+ * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
+ * it could write more bytes, though only up to dstCapacity.
+ * Some "margin" used to be required for this operation to work properly.
+ * Thankfully, this is no longer necessary.
+ * The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
+ *
+ * Note 4 : If srcSize is the exact size of the block,
+ * then targetOutputSize can be any value,
+ * including larger than the block's decompressed size.
+ * The function will, at most, generate block's decompressed size.
+ *
+ * Note 5 : If srcSize is _larger_ than block's compressed size,
+ * then targetOutputSize **MUST** be <= block's decompressed size.
+ * Otherwise, *silent corruption will occur*.
+ */
+LZ4LIB_API int LZ4_decompress_safe_partial(const char *src, char *dst,
+ int srcSize, int targetOutputSize,
+ int dstCapacity);
+
+/*-*********************************************
+* Streaming Compression Functions
+***********************************************/
+typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
+
+/*!
+ Note about RC_INVOKED
+
+ - RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio).
+ https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros
+
+ - Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars)
+ and reports warning "RC4011: identifier truncated".
+
+ - To eliminate the warning, we surround long preprocessor symbol with
+ "#if !defined(RC_INVOKED) ... #endif" block that means
+ "skip this block when rc.exe is trying to read it".
+*/
+#if !defined( \
+ RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4LIB_API LZ4_stream_t *LZ4_createStream(void);
+LZ4LIB_API int LZ4_freeStream(LZ4_stream_t *streamPtr);
+#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
+#endif
+
+/*! LZ4_resetStream_fast() : v1.9.0+
+ * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
+ * (e.g., LZ4_compress_fast_continue()).
+ *
+ * An LZ4_stream_t must be initialized once before usage.
+ * This is automatically done when created by LZ4_createStream().
+ * However, should the LZ4_stream_t be simply declared on stack (for example),
+ * it's necessary to initialize it first, using LZ4_initStream().
+ *
+ * After init, start any new stream with LZ4_resetStream_fast().
+ * A same LZ4_stream_t can be re-used multiple times consecutively
+ * and compress multiple streams,
+ * provided that it starts each new stream with LZ4_resetStream_fast().
+ *
+ * LZ4_resetStream_fast() is much faster than LZ4_initStream(),
+ * but is not compatible with memory regions containing garbage data.
+ *
+ * Note: it's only useful to call LZ4_resetStream_fast()
+ * in the context of streaming compression.
+ * The *extState* functions perform their own resets.
+ * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive.
+ */
+LZ4LIB_API void LZ4_resetStream_fast(LZ4_stream_t *streamPtr);
+
+/*! LZ4_loadDict() :
+ * Use this function to reference a static dictionary into LZ4_stream_t.
+ * The dictionary must remain available during compression.
+ * LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
+ * The same dictionary will have to be loaded on decompression side for successful decoding.
+ * Dictionary are useful for better compression of small data (KB range).
+ * While LZ4 itself accepts any input as dictionary, dictionary efficiency is also a topic.
+ * When in doubt, employ the Zstandard's Dictionary Builder.
+ * Loading a size of 0 is allowed, and is the same as reset.
+ * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded)
+ */
+LZ4LIB_API int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary,
+ int dictSize);
+
+/*! LZ4_loadDictSlow() : v1.10.0+
+ * Same as LZ4_loadDict(),
+ * but uses a bit more cpu to reference the dictionary content more thoroughly.
+ * This is expected to slightly improve compression ratio.
+ * The extra-cpu cost is likely worth it if the dictionary is re-used across multiple sessions.
+ * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded)
+ */
+LZ4LIB_API int LZ4_loadDictSlow(LZ4_stream_t *streamPtr, const char *dictionary,
+ int dictSize);
+
+/*! LZ4_attach_dictionary() : stable since v1.10.0
+ *
+ * This allows efficient re-use of a static dictionary multiple times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
+ * working LZ4_stream_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references @dictionaryStream in-place.
+ *
+ * Several assumptions are made about the state of @dictionaryStream.
+ * Currently, only states which have been prepared by LZ4_loadDict() or
+ * LZ4_loadDictSlow() should be expected to work.
+ *
+ * Alternatively, the provided @dictionaryStream may be NULL,
+ * in which case any existing dictionary stream is unset.
+ *
+ * If a dictionary is provided, it replaces any pre-existing stream history.
+ * The dictionary contents are the only history that can be referenced and
+ * logically immediately precede the data compressed in the first subsequent
+ * compression call.
+ *
+ * The dictionary will only remain attached to the working stream through the
+ * first compression call, at the end of which it is cleared.
+ * @dictionaryStream stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the completion of the compression session.
+ *
+ * Note: there is no equivalent LZ4_attach_*() method on the decompression side
+ * because there is no initialization cost, hence no need to share the cost across multiple sessions.
+ * To decompress LZ4 blocks using dictionary, attached or not,
+ * just employ the regular LZ4_setStreamDecode() for streaming,
+ * or the stateless LZ4_decompress_safe_usingDict() for one-shot decompression.
+ */
+LZ4LIB_API void LZ4_attach_dictionary(LZ4_stream_t *workingStream,
+ const LZ4_stream_t *dictionaryStream);
+
+/*! LZ4_compress_fast_continue() :
+ * Compress 'src' content using data from previously compressed blocks, for better compression ratio.
+ * 'dst' buffer must be already allocated.
+ * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
+ *
+ * @return : size of compressed block
+ * or 0 if there is an error (typically, cannot fit into 'dst').
+ *
+ * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block.
+ * Each block has precise boundaries.
+ * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata.
+ * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together.
+ *
+ * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory !
+ *
+ * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB.
+ * Make sure that buffers are separated, by at least one byte.
+ * This construction ensures that each block only depends on previous block.
+ *
+ * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
+ *
+ * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed.
+ */
+LZ4LIB_API int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr,
+ const char *src, char *dst,
+ int srcSize, int dstCapacity,
+ int acceleration);
+
+/*! LZ4_saveDict() :
+ * If last 64KB data cannot be guaranteed to remain available at its current memory location,
+ * save it into a safer place (char* safeBuffer).
+ * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(),
+ * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables.
+ * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error.
+ */
+LZ4LIB_API int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer,
+ int maxDictSize);
+
+/*-**********************************************
+* Streaming Decompression Functions
+* Bufferless synchronous API
+************************************************/
+typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */
+
+/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
+ * creation / destruction of streaming decompression tracking context.
+ * A tracking context can be re-used multiple times.
+ */
+#if !defined( \
+ RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4LIB_API LZ4_streamDecode_t *LZ4_createStreamDecode(void);
+LZ4LIB_API int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream);
+#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
+#endif
+
+/*! LZ4_setStreamDecode() :
+ * An LZ4_streamDecode_t context can be allocated once and re-used multiple times.
+ * Use this function to start decompression of a new stream of blocks.
+ * A dictionary can optionally be set. Use NULL or size 0 for a reset order.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression.
+ * @return : 1 if OK, 0 if error
+ */
+LZ4LIB_API int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *dictionary, int dictSize);
+
+/*! LZ4_decoderRingBufferSize() : v1.8.2+
+ * Note : in a ring buffer scenario (optional),
+ * blocks are presumed decompressed next to each other
+ * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize),
+ * at which stage it resumes from beginning of ring buffer.
+ * When setting such a ring buffer for streaming decompression,
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
+#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) \
+ (65536 + 14 + \
+ (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */
+
+/*! LZ4_decompress_safe_continue() :
+ * This decoding function allows decompression of consecutive blocks in "streaming" mode.
+ * The difference with the usual independent blocks is that
+ * new blocks are allowed to find references into former blocks.
+ * A block is an unsplittable entity, and must be presented entirely to the decompression function.
+ * LZ4_decompress_safe_continue() only accepts one block at a time.
+ * It's modeled after `LZ4_decompress_safe()` and behaves similarly.
+ *
+ * @LZ4_streamDecode : decompression state, tracking the position in memory of past data
+ * @compressedSize : exact complete size of one compressed block.
+ * @dstCapacity : size of destination buffer (which must be already allocated),
+ * must be an upper bound of decompressed size.
+ * @return : number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
+ * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
+ * If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ *
+ * The last 64KB of previously decoded data *must* remain available and unmodified
+ * at the memory position where they were previously decoded.
+ * If less than 64KB of data has been decoded, all the data must be present.
+ *
+ * Special : if decompression side sets a ring buffer, it must respect one of the following conditions :
+ * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize).
+ * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized.
+ * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize.
+ * - Synchronized mode :
+ * Decompression buffer size is _exactly_ the same as compression buffer size,
+ * and follows exactly same update rule (block boundaries at same positions),
+ * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream),
+ * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB).
+ * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized,
+ * and encoding ring buffer can have any size, including small ones ( < 64 KB).
+ *
+ * Whenever these conditions are not possible,
+ * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
+ * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block.
+*/
+LZ4LIB_API int
+LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *src, char *dst, int srcSize,
+ int dstCapacity);
+
+LZ4LIB_API ssize_t LZ4_arm64_decompress_safe_partial(const void *source,
+ void *dest,
+ size_t inputSize,
+ size_t outputSize,
+ bool dip);
+
+LZ4LIB_API ssize_t LZ4_arm64_decompress_safe(const void *source, void *dest,
+ size_t inputSize,
+ size_t outputSize, bool dip);
+
+/*! LZ4_decompress_safe_usingDict() :
+ * Works the same as
+ * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_safe_continue()
+ * However, it's stateless: it doesn't need any LZ4_streamDecode_t state.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
+ * Performance tip : Decompression speed can be substantially increased
+ * when dst == dictStart + dictSize.
+ */
+LZ4LIB_API int LZ4_decompress_safe_usingDict(const char *src, char *dst,
+ int srcSize, int dstCapacity,
+ const char *dictStart,
+ int dictSize);
+
+/*! LZ4_decompress_safe_partial_usingDict() :
+ * Behaves the same as LZ4_decompress_safe_partial()
+ * with the added ability to specify a memory segment for past data.
+ * Performance tip : Decompression speed can be substantially increased
+ * when dst == dictStart + dictSize.
+ */
+LZ4LIB_API int LZ4_decompress_safe_partial_usingDict(
+ const char *src, char *dst, int compressedSize, int targetOutputSize,
+ int maxOutputSize, const char *dictStart, int dictSize);
+
+#endif /* LZ4_H_2983827168210 */
+
+/*^*************************************
+ * !!!!!! STATIC LINKING ONLY !!!!!!
+ ***************************************/
+
+/*-****************************************************************************
+ * Experimental section
+ *
+ * Symbols declared in this section must be considered unstable. Their
+ * signatures or semantics may change, or they may be removed altogether in the
+ * future. They are therefore only safe to depend on when the caller is
+ * statically linked against the library.
+ *
+ * To protect against unsafe usage, not only are the declarations guarded,
+ * the definitions are hidden by default
+ * when building LZ4 as a shared/dynamic library.
+ *
+ * In order to access these declarations,
+ * define LZ4_STATIC_LINKING_ONLY in your application
+ * before including LZ4's headers.
+ *
+ * In order to make their implementations accessible dynamically, you must
+ * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
+ ******************************************************************************/
+
+#ifdef LZ4_STATIC_LINKING_ONLY
+
+#ifndef LZ4_STATIC_3504398509
+#define LZ4_STATIC_3504398509
+
+#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
+#define LZ4LIB_STATIC_API LZ4LIB_API
+#else
+#define LZ4LIB_STATIC_API
+#endif
+
+/*! LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step.
+ * It is only safe to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized").
+ * From a high level, the difference is that
+ * this function initializes the provided state with a call to something like LZ4_resetStream_fast()
+ * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream().
+ */
+LZ4LIB_STATIC_API int
+LZ4_compress_fast_extState_fastReset(void *state, const char *src, char *dst,
+ int srcSize, int dstCapacity,
+ int acceleration);
+
+/*! LZ4_compress_destSize_extState() : introduced in v1.10.0
+ * Same as LZ4_compress_destSize(), but using an externally allocated state.
+ * Also: exposes @acceleration
+ */
+int LZ4_compress_destSize_extState(void *state, const char *src, char *dst,
+ int *srcSizePtr, int targetDstSize,
+ int acceleration);
+
+/*! In-place compression and decompression
+ *
+ * It's possible to have input and output sharing the same buffer,
+ * for highly constrained memory environments.
+ * In both cases, it requires input to lay at the end of the buffer,
+ * and decompression to start at beginning of the buffer.
+ * Buffer size must feature some margin, hence be larger than final size.
+ *
+ * |<------------------------buffer--------------------------------->|
+ * |<-----------compressed data--------->|
+ * |<-----------decompressed size------------------>|
+ * |<----margin---->|
+ *
+ * This technique is more useful for decompression,
+ * since decompressed size is typically larger,
+ * and margin is short.
+ *
+ * In-place decompression will work inside any buffer
+ * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
+ * This presumes that decompressedSize > compressedSize.
+ * Otherwise, it means compression actually expanded data,
+ * and it would be more efficient to store such data with a flag indicating it's not compressed.
+ * This can happen when data is not compressible (already compressed, or encrypted).
+ *
+ * For in-place compression, margin is larger, as it must be able to cope with both
+ * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
+ * and data expansion, which can happen when input is not compressible.
+ * As a consequence, buffer size requirements are much higher,
+ * and memory savings offered by in-place compression are more limited.
+ *
+ * There are ways to limit this cost for compression :
+ * - Reduce history size, by modifying LZ4_DISTANCE_MAX.
+ * Note that it is a compile-time constant, so all compressions will apply this limit.
+ * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
+ * so it's a reasonable trick when inputs are known to be small.
+ * - Require the compressor to deliver a "maximum compressed size".
+ * This is the `dstCapacity` parameter in `LZ4_compress*()`.
+ * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
+ * in which case, the return code will be 0 (zero).
+ * The caller must be ready for these cases to happen,
+ * and typically design a backup scheme to send data uncompressed.
+ * The combination of both techniques can significantly reduce
+ * the amount of margin required for in-place compression.
+ *
+ * In-place compression can work in any buffer
+ * which size is >= (maxCompressedSize)
+ * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
+ * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
+ * so it's possible to reduce memory requirements by playing with them.
+ */
+
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) \
+ (((compressedSize) >> 8) + 32)
+#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) \
+ ((decompressedSize) + \
+ LZ4_DECOMPRESS_INPLACE_MARGIN( \
+ decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
+#define LZ4_COMPRESS_INPLACE_MARGIN \
+ (LZ4_DISTANCE_MAX + \
+ 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
+#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) \
+ ((maxCompressedSize) + \
+ LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
+
+#endif /* LZ4_STATIC_3504398509 */
+#endif /* LZ4_STATIC_LINKING_ONLY */
+
+#ifndef LZ4_H_98237428734687
+#define LZ4_H_98237428734687
+
+/*-************************************************************
+ * Private Definitions
+ **************************************************************
+ * Do not use these definitions directly.
+ * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
+ * Accessing members will expose user code to API and/or ABI break in future versions of the library.
+ **************************************************************/
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE - 2)
+#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define LZ4_HASH_SIZE_U32 \
+ (1 << LZ4_HASHLOG) /* required as macro for static allocation */
+
+#include
+#include
+typedef int8_t LZ4_i8;
+typedef uint8_t LZ4_byte;
+typedef uint16_t LZ4_u16;
+typedef uint32_t LZ4_u32;
+
+/*! LZ4_stream_t :
+ * Never ever use below internal definitions directly !
+ * These definitions are not API/ABI safe, and may change in future versions.
+ * If you need static allocation, declare or allocate an LZ4_stream_t object.
+**/
+
+typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
+struct LZ4_stream_t_internal {
+ LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
+ const LZ4_byte *dictionary;
+ const LZ4_stream_t_internal *dictCtx;
+ LZ4_u32 currentOffset;
+ LZ4_u32 tableType;
+ LZ4_u32 dictSize;
+ /* Implicit padding to ensure structure is aligned */
+};
+
+#define LZ4_STREAM_MINSIZE \
+ ((1UL << (LZ4_MEMORY_USAGE)) + \
+ 32) /* static size, for inter-version compatibility */
+union LZ4_stream_u {
+ char minStateSize[LZ4_STREAM_MINSIZE];
+ LZ4_stream_t_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_stream_t */
+
+/*! LZ4_initStream() : v1.9.0+
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is automatically done when invoking LZ4_createStream(),
+ * but it's not when the structure is simply declared on stack (for example).
+ *
+ * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t.
+ * It can also initialize any arbitrary buffer of sufficient size,
+ * and will @return a pointer of proper type upon initialization.
+ *
+ * Note : initialization fails if size and alignment conditions are not respected.
+ * In which case, the function will @return NULL.
+ * Note2: An LZ4_stream_t structure guarantees correct alignment and size.
+ * Note3: Before v1.9.0, use LZ4_resetStream() instead
+**/
+LZ4LIB_API LZ4_stream_t *LZ4_initStream(void *stateBuffer, size_t size);
+
+/*! LZ4_streamDecode_t :
+ * Never ever use below internal definitions directly !
+ * These definitions are not API/ABI safe, and may change in future versions.
+ * If you need static allocation, declare or allocate an LZ4_streamDecode_t object.
+**/
+typedef struct {
+ const LZ4_byte *externalDict;
+ const LZ4_byte *prefixEnd;
+ size_t extDictSize;
+ size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+
+#define LZ4_STREAMDECODE_MINSIZE 32
+union LZ4_streamDecode_u {
+ char minStateSize[LZ4_STREAMDECODE_MINSIZE];
+ LZ4_streamDecode_t_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_streamDecode_t */
+
+/*-************************************
+* Obsolete Functions
+**************************************/
+
+/*! Deprecation warnings
+ *
+ * Deprecated functions make the compiler generate a warning when invoked.
+ * This is meant to invite users to update their source code.
+ * Should deprecation warnings be a problem, it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc
+ * or _CRT_SECURE_NO_WARNINGS in Visual.
+ *
+ * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS
+ * before including the header file.
+ */
+#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
+#define LZ4_DEPRECATED(message) /* disable deprecation warnings */
+#else
+#if defined(__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+#define LZ4_DEPRECATED(message) [[deprecated(message)]]
+#elif defined(_MSC_VER)
+#define LZ4_DEPRECATED(message) __declspec(deprecated(message))
+#elif defined(__clang__) || \
+ (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
+#define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
+#elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
+#define LZ4_DEPRECATED(message) __attribute__((deprecated))
+#else
+#pragma message( \
+ "WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
+#define LZ4_DEPRECATED(message) /* disabled */
+#endif
+#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
+
+/*! Obsolete compression functions (since v1.7.3) */
+LZ4_DEPRECATED("use LZ4_compress_default() instead")
+LZ4LIB_API int LZ4_compress(const char *src, char *dest, int srcSize);
+LZ4_DEPRECATED("use LZ4_compress_default() instead")
+LZ4LIB_API int LZ4_compress_limitedOutput(const char *src, char *dest,
+ int srcSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead")
+LZ4LIB_API int LZ4_compress_withState(void *state, const char *source,
+ char *dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead")
+LZ4LIB_API int LZ4_compress_limitedOutput_withState(void *state,
+ const char *source,
+ char *dest, int inputSize,
+ int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead")
+LZ4LIB_API int LZ4_compress_continue(LZ4_stream_t *LZ4_streamPtr,
+ const char *source, char *dest,
+ int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead")
+LZ4LIB_API int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_streamPtr,
+ const char *source,
+ char *dest, int inputSize,
+ int maxOutputSize);
+
+/*! Obsolete decompression functions (since v1.8.0) */
+LZ4_DEPRECATED("use LZ4_decompress_fast() instead")
+LZ4LIB_API int LZ4_uncompress(const char *source, char *dest, int outputSize);
+LZ4_DEPRECATED("use LZ4_decompress_safe() instead")
+LZ4LIB_API int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
+ int isize, int maxOutputSize);
+
+/* Obsolete streaming functions (since v1.7.0)
+ * degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, they don't
+ * actually retain any history between compression calls. The compression ratio
+ * achieved will therefore be no better than compressing each chunk
+ * independently.
+ */
+LZ4_DEPRECATED("Use LZ4_createStream() instead")
+LZ4LIB_API void *LZ4_create(char *inputBuffer);
+LZ4_DEPRECATED("Use LZ4_createStream() instead")
+LZ4LIB_API int LZ4_sizeofStreamState(void);
+LZ4_DEPRECATED("Use LZ4_resetStream() instead")
+LZ4LIB_API int LZ4_resetStreamState(void *state, char *inputBuffer);
+LZ4_DEPRECATED("Use LZ4_saveDict() instead")
+LZ4LIB_API char *LZ4_slideInputBuffer(void *state);
+
+/*! Obsolete streaming decoding functions (since v1.7.0) */
+LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead")
+LZ4LIB_API int LZ4_decompress_safe_withPrefix64k(const char *src, char *dst,
+ int compressedSize,
+ int maxDstSize);
+LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead")
+LZ4LIB_API int LZ4_decompress_fast_withPrefix64k(const char *src, char *dst,
+ int originalSize);
+
+/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
+ * These functions used to be faster than LZ4_decompress_safe(),
+ * but this is no longer the case. They are now slower.
+ * This is because LZ4_decompress_fast() doesn't know the input size,
+ * and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
+ * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
+ * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
+ *
+ * The last remaining LZ4_decompress_fast() specificity is that
+ * it can decompress a block without knowing its compressed size.
+ * Such functionality can be achieved in a more secure manner
+ * by employing LZ4_decompress_safe_partial().
+ *
+ * Parameters:
+ * originalSize : is the uncompressed size to regenerate.
+ * `dst` must be already allocated, its size must be >= 'originalSize' bytes.
+ * @return : number of bytes read from source buffer (== compressed size).
+ * The function expects to finish at block's end exactly.
+ * If the source stream is detected malformed, the function stops decoding and returns a negative result.
+ * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer.
+ * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds.
+ * Also, since match offsets are not validated, match reads from 'src' may underflow too.
+ * These issues never happen if input (compressed) data is correct.
+ * But they may happen if input data is invalid (error or intentional tampering).
+ * As a consequence, use these functions in trusted environments with trusted data **only**.
+ */
+LZ4_DEPRECATED(
+ "This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial() instead")
+LZ4LIB_API int LZ4_decompress_fast(const char *src, char *dst,
+ int originalSize);
+LZ4_DEPRECATED(
+ "This function is deprecated and unsafe. Consider migrating towards LZ4_decompress_safe_continue() instead. "
+ "Note that the contract will change (requires block's compressed size, instead of decompressed size)")
+LZ4LIB_API int
+LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *src, char *dst, int originalSize);
+LZ4_DEPRECATED(
+ "This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial_usingDict() instead")
+LZ4LIB_API int LZ4_decompress_fast_usingDict(const char *src, char *dst,
+ int originalSize,
+ const char *dictStart,
+ int dictSize);
+
+/*! LZ4_resetStream() :
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is done with LZ4_initStream(), or LZ4_resetStream().
+ * Consider switching to LZ4_initStream(),
+ * invoking LZ4_resetStream() will trigger deprecation warnings in the future.
+ */
+LZ4LIB_API void LZ4_resetStream(LZ4_stream_t *streamPtr);
+
+#endif /* LZ4_H_98237428734687 */
+
+#if defined(__cplusplus)
+}
+#endif
Index: fs/f2fs/lz4armv8/lz4accel.c
===================================================================
diff --git a/fs/f2fs/lz4armv8/lz4accel.c b/lib/lz4/lz4armv8/lz4accel.c
rename from fs/f2fs/lz4armv8/lz4accel.c
rename to lib/lz4/lz4armv8/lz4accel.c
--- a/fs/f2fs/lz4armv8/lz4accel.c (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
+++ b/lib/lz4/lz4armv8/lz4accel.c (revision b2497e4243461a835c25469028cd355bfc2e993f)
@@ -46,4 +46,3 @@
__read_mostly = {
[0 ... NR_CPUS-1] = lz4_decompress_asm_select,
};
-
Index: lib/lz4/lz4hc.c
===================================================================
diff --git a/lib/lz4/lz4hc.c b/lib/lz4/lz4hc.c
new file mode 100644
--- /dev/null (revision b2497e4243461a835c25469028cd355bfc2e993f)
+++ b/lib/lz4/lz4hc.c (revision b2497e4243461a835c25469028cd355bfc2e993f)
@@ -0,0 +1,2805 @@
+/*
+ LZ4 HC - High Compression Mode of LZ4
+ Copyright (C) 2011-2020, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
+
+/* *************************************
+* Tuning Parameter
+***************************************/
+
+#include
+#define ALLOC(size) kzalloc(size, GFP_KERNEL)
+#define FREEMEM(ptr) kfree(ptr)
+
+/*=== Dependency ===*/
+#define LZ4_HC_STATIC_LINKING_ONLY
+#include "lz4hc.h"
+
+/*! HEAPMODE :
+ * Select how stateless HC compression functions like `LZ4_compress_HC()`
+ * allocate memory for their workspace:
+ * in stack (0:fastest), or in heap (1:default, requires malloc()).
+ * Since workspace is rather large, heap mode is recommended.
+**/
+#ifndef LZ4HC_HEAPMODE
+#define LZ4HC_HEAPMODE 1
+#endif
+
+/*=== Shared lz4.c code ===*/
+#ifndef LZ4_SRC_INCLUDED
+#if defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+#if defined(__clang__)
+#pragma clang diagnostic ignored "-Wunused-function"
+#endif
+#define LZ4_COMMONDEFS_ONLY
+#include "lz4.c" /* LZ4_count, constants, mem */
+#endif
+
+/*=== Enums ===*/
+typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
+
+/*=== Constants ===*/
+#define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
+#define LZ4_OPT_NUM (1 << 12)
+
+/*=== Macros ===*/
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/*=== Levels definition ===*/
+typedef enum { lz4mid, lz4hc, lz4opt } lz4hc_strat_e;
+typedef struct {
+ lz4hc_strat_e strat;
+ int nbSearches;
+ U32 targetLength;
+} cParams_t;
+static const cParams_t k_clTable[LZ4HC_CLEVEL_MAX + 1] = {
+ { lz4mid, 2, 16 }, /* 0, unused */
+ { lz4mid, 2, 16 }, /* 1, unused */
+ { lz4mid, 2, 16 }, /* 2 */
+ { lz4hc, 4, 16 }, /* 3 */
+ { lz4hc, 8, 16 }, /* 4 */
+ { lz4hc, 16, 16 }, /* 5 */
+ { lz4hc, 32, 16 }, /* 6 */
+ { lz4hc, 64, 16 }, /* 7 */
+ { lz4hc, 128, 16 }, /* 8 */
+ { lz4hc, 256, 16 }, /* 9 */
+ { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/
+ { lz4opt, 512, 128 }, /*11 */
+ { lz4opt, 16384, LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
+};
+
+static cParams_t LZ4HC_getCLevelParams(int cLevel)
+{
+ /* note : clevel convention is a bit different from lz4frame,
+ * possibly something worth revisiting for consistency */
+ if (cLevel < 1)
+ cLevel = LZ4HC_CLEVEL_DEFAULT;
+ cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
+ return k_clTable[cLevel];
+}
+
+/*=== Hashing ===*/
+#define LZ4HC_HASHSIZE 4
+#define HASH_FUNCTION(i) \
+ (((i) * 2654435761U) >> ((MINMATCH * 8) - LZ4HC_HASH_LOG))
+static U32 LZ4HC_hashPtr(const void *ptr)
+{
+ return HASH_FUNCTION(LZ4_read32(ptr));
+}
+
+#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 2)
+/* lie to the compiler about data alignment; use with caution */
+static U64 LZ4_read64(const void *memPtr)
+{
+ return *(const U64 *)memPtr;
+}
+
+#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 1)
+/* __pack instructions are safer, but compiler specific */
+LZ4_PACK(typedef struct { U64 u64; }) LZ4_unalign64;
+static U64 LZ4_read64(const void *ptr)
+{
+ return ((const LZ4_unalign64 *)ptr)->u64;
+}
+
+#else /* safe and portable access using memcpy() */
+static U64 LZ4_read64(const void *memPtr)
+{
+ U64 val;
+ LZ4_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* LZ4_FORCE_MEMORY_ACCESS */
+
+#define LZ4MID_HASHSIZE 8
+#define LZ4MID_HASHLOG (LZ4HC_HASH_LOG - 1)
+#define LZ4MID_HASHTABLESIZE (1 << LZ4MID_HASHLOG)
+
+static U32 LZ4MID_hash4(U32 v)
+{
+ return (v * 2654435761U) >> (32 - LZ4MID_HASHLOG);
+}
+static U32 LZ4MID_hash4Ptr(const void *ptr)
+{
+ return LZ4MID_hash4(LZ4_read32(ptr));
+}
+/* note: hash7 hashes the lower 56-bits.
+ * It presumes input was read using little endian.*/
+static U32 LZ4MID_hash7(U64 v)
+{
+ return (U32)(((v << (64 - 56)) * 58295818150454627ULL) >>
+ (64 - LZ4MID_HASHLOG));
+}
+static U64 LZ4_readLE64(const void *memPtr);
+static U32 LZ4MID_hash8Ptr(const void *ptr)
+{
+ return LZ4MID_hash7(LZ4_readLE64(ptr));
+}
+
+static U64 LZ4_readLE64(const void *memPtr)
+{
+ if (LZ4_isLittleEndian()) {
+ return LZ4_read64(memPtr);
+ } else {
+ const BYTE *p = (const BYTE *)memPtr;
+ /* note: relies on the compiler to simplify this expression */
+ return (U64)p[0] | ((U64)p[1] << 8) | ((U64)p[2] << 16) |
+ ((U64)p[3] << 24) | ((U64)p[4] << 32) |
+ ((U64)p[5] << 40) | ((U64)p[6] << 48) |
+ ((U64)p[7] << 56);
+ }
+}
+
+/*=== Count match length ===*/
+LZ4_FORCE_INLINE
+unsigned LZ4HC_NbCommonBytes32(U32 val)
+{
+ assert(val != 0);
+ if (LZ4_isLittleEndian()) {
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanReverse(&r, val);
+ return (unsigned)((31 - r) >> 3);
+#elif (defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clz(val) >> 3;
+#else
+ val >>= 8;
+ val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
+ (val + 0x00FF0000)) >>
+ 24;
+ return (unsigned)val ^ 3;
+#endif
+ } else {
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward(&r, val);
+ return (unsigned)(r >> 3);
+#elif (defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctz(val) >> 3;
+#else
+ const U32 m = 0x01010101;
+ return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
+#endif
+ }
+}
+
+/** LZ4HC_countBack() :
+ * @return : negative value, nb of common bytes before ip/match */
+LZ4_FORCE_INLINE
+int LZ4HC_countBack(const BYTE *const ip, const BYTE *const match,
+ const BYTE *const iMin, const BYTE *const mMin)
+{
+ int back = 0;
+ int const min = (int)MAX(iMin - ip, mMin - match);
+ assert(min <= 0);
+ assert(ip >= iMin);
+ assert((size_t)(ip - iMin) < (1U << 31));
+ assert(match >= mMin);
+ assert((size_t)(match - mMin) < (1U << 31));
+
+ while ((back - min) > 3) {
+ U32 const v = LZ4_read32(ip + back - 4) ^
+ LZ4_read32(match + back - 4);
+ if (v) {
+ return (back - (int)LZ4HC_NbCommonBytes32(v));
+ } else
+ back -= 4; /* 4-byte step */
+ }
+ /* check remainder if any */
+ while ((back > min) && (ip[back - 1] == match[back - 1]))
+ back--;
+ return back;
+}
+
+/*=== Chain table updates ===*/
+#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */
+/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
+#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
+
+/**************************************
+* Init
+**************************************/
+static void LZ4HC_clearTables(LZ4HC_CCtx_internal *hc4)
+{
+ MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
+ MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
+}
+
+static void LZ4HC_init_internal(LZ4HC_CCtx_internal *hc4, const BYTE *start)
+{
+ size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart);
+ size_t newStartingOffset = bufferSize + hc4->dictLimit;
+ DEBUGLOG(5, "LZ4HC_init_internal");
+ assert(newStartingOffset >= bufferSize); /* check overflow */
+ if (newStartingOffset > 1 GB) {
+ LZ4HC_clearTables(hc4);
+ newStartingOffset = 0;
+ }
+ newStartingOffset += 64 KB;
+ hc4->nextToUpdate = (U32)newStartingOffset;
+ hc4->prefixStart = start;
+ hc4->end = start;
+ hc4->dictStart = start;
+ hc4->dictLimit = (U32)newStartingOffset;
+ hc4->lowLimit = (U32)newStartingOffset;
+}
+
+/**************************************
+* Encode
+**************************************/
+/* LZ4HC_encodeSequence() :
+ * @return : 0 if ok,
+ * 1 if buffer issue detected */
+LZ4_FORCE_INLINE int LZ4HC_encodeSequence(const BYTE **_ip, BYTE **_op,
+ const BYTE **_anchor, int matchLength,
+ int offset,
+ limitedOutput_directive limit,
+ BYTE *oend)
+{
+#define ip (*_ip)
+#define op (*_op)
+#define anchor (*_anchor)
+
+ size_t length;
+ BYTE *const token = op++;
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
+ static const BYTE *start = NULL;
+ static U32 totalCost = 0;
+ U32 const pos = (start == NULL) ? 0 : (U32)(anchor - start);
+ U32 const ll = (U32)(ip - anchor);
+ U32 const llAdd = (ll >= 15) ? ((ll - 15) / 255) + 1 : 0;
+ U32 const mlAdd =
+ (matchLength >= 19) ? ((matchLength - 19) / 255) + 1 : 0;
+ U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
+ if (start == NULL)
+ start = anchor; /* only works for single segment */
+ /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
+ DEBUGLOG(
+ 6,
+ "pos:%7u -- literals:%4u, match:%4i, offset:%5i, cost:%4u + %5u",
+ pos, (U32)(ip - anchor), matchLength, offset, cost, totalCost);
+ totalCost += cost;
+#endif
+
+ /* Encode Literal length */
+ length = (size_t)(ip - anchor);
+ LZ4_STATIC_ASSERT(notLimited == 0);
+ /* Check output limit */
+ if (limit &&
+ ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ DEBUGLOG(
+ 6,
+ "Not enough room to write %i literals (%i bytes remaining)",
+ (int)length, (int)(oend - op));
+ return 1;
+ }
+ if (length >= RUN_MASK) {
+ size_t len = length - RUN_MASK;
+ *token = (RUN_MASK << ML_BITS);
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else {
+ *token = (BYTE)(length << ML_BITS);
+ }
+
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op + length);
+ op += length;
+
+ /* Encode Offset */
+ assert(offset <= LZ4_DISTANCE_MAX);
+ assert(offset > 0);
+ LZ4_writeLE16(op, (U16)(offset));
+ op += 2;
+
+ /* Encode MatchLength */
+ assert(matchLength >= MINMATCH);
+ length = (size_t)matchLength - MINMATCH;
+ if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
+ DEBUGLOG(6, "Not enough room to write match length");
+ return 1; /* Check output limit */
+ }
+ if (length >= ML_MASK) {
+ *token += ML_MASK;
+ length -= ML_MASK;
+ for (; length >= 510; length -= 510) {
+ *op++ = 255;
+ *op++ = 255;
+ }
+ if (length >= 255) {
+ length -= 255;
+ *op++ = 255;
+ }
+ *op++ = (BYTE)length;
+ } else {
+ *token += (BYTE)(length);
+ }
+
+ /* Prepare next loop */
+ ip += matchLength;
+ anchor = ip;
+
+ return 0;
+
+#undef ip
+#undef op
+#undef anchor
+}
+
+typedef struct {
+ int off;
+ int len;
+ int back; /* negative value */
+} LZ4HC_match_t;
+
+LZ4HC_match_t LZ4HC_searchExtDict(const BYTE *ip, U32 ipIndex,
+ const BYTE *const iLowLimit,
+ const BYTE *const iHighLimit,
+ const LZ4HC_CCtx_internal *dictCtx,
+ U32 gDictEndIndex, int currentBestML,
+ int nbAttempts)
+{
+ size_t const lDictEndIndex =
+ (size_t)(dictCtx->end - dictCtx->prefixStart) +
+ dictCtx->dictLimit;
+ U32 lDictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
+ U32 matchIndex = lDictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
+ int offset = 0, sBack = 0;
+ assert(lDictEndIndex <= 1 GB);
+ if (lDictMatchIndex > 0)
+ DEBUGLOG(7, "lDictEndIndex = %zu, lDictMatchIndex = %u",
+ lDictEndIndex, lDictMatchIndex);
+ while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
+ const BYTE *const matchPtr = dictCtx->prefixStart -
+ dictCtx->dictLimit +
+ lDictMatchIndex;
+
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ int mlt;
+ int back = 0;
+ const BYTE *vLimit =
+ ip + (lDictEndIndex - lDictMatchIndex);
+ if (vLimit > iHighLimit)
+ vLimit = iHighLimit;
+ mlt = (int)LZ4_count(ip + MINMATCH, matchPtr + MINMATCH,
+ vLimit) +
+ MINMATCH;
+ back = (ip > iLowLimit) ?
+ LZ4HC_countBack(ip, matchPtr, iLowLimit,
+ dictCtx->prefixStart) :
+ 0;
+ mlt -= back;
+ if (mlt > currentBestML) {
+ currentBestML = mlt;
+ offset = (int)(ipIndex - matchIndex);
+ sBack = back;
+ DEBUGLOG(
+ 7,
+ "found match of length %i within extDictCtx",
+ currentBestML);
+ }
+ }
+
+ {
+ U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable,
+ lDictMatchIndex);
+ lDictMatchIndex -= nextOffset;
+ matchIndex -= nextOffset;
+ }
+ }
+
+ {
+ LZ4HC_match_t md;
+ md.len = currentBestML;
+ md.off = offset;
+ md.back = sBack;
+ return md;
+ }
+}
+
+typedef LZ4HC_match_t (*LZ4MID_searchIntoDict_f)(
+ const BYTE *ip, U32 ipIndex, const BYTE *const iHighLimit,
+ const LZ4HC_CCtx_internal *dictCtx, U32 gDictEndIndex);
+
+static LZ4HC_match_t LZ4MID_searchHCDict(const BYTE *ip, U32 ipIndex,
+ const BYTE *const iHighLimit,
+ const LZ4HC_CCtx_internal *dictCtx,
+ U32 gDictEndIndex)
+{
+ return LZ4HC_searchExtDict(ip, ipIndex, ip, iHighLimit, dictCtx,
+ gDictEndIndex, MINMATCH - 1, 2);
+}
+
+static LZ4HC_match_t LZ4MID_searchExtDict(const BYTE *ip, U32 ipIndex,
+ const BYTE *const iHighLimit,
+ const LZ4HC_CCtx_internal *dictCtx,
+ U32 gDictEndIndex)
+{
+ size_t const lDictEndIndex =
+ (size_t)(dictCtx->end - dictCtx->prefixStart) +
+ dictCtx->dictLimit;
+ const U32 *const hash4Table = dictCtx->hashTable;
+ const U32 *const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
+ DEBUGLOG(7, "LZ4MID_searchExtDict (ipIdx=%u)", ipIndex);
+
+ /* search long match first */
+ {
+ U32 l8DictMatchIndex = hash8Table[LZ4MID_hash8Ptr(ip)];
+ U32 m8Index =
+ l8DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
+ assert(lDictEndIndex <= 1 GB);
+ if (ipIndex - m8Index <= LZ4_DISTANCE_MAX) {
+ const BYTE *const matchPtr = dictCtx->prefixStart -
+ dictCtx->dictLimit +
+ l8DictMatchIndex;
+ const size_t safeLen =
+ MIN(lDictEndIndex - l8DictMatchIndex,
+ (size_t)(iHighLimit - ip));
+ int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
+ if (mlt >= MINMATCH) {
+ LZ4HC_match_t md;
+ DEBUGLOG(7,
+ "Found long ExtDict match of len=%u",
+ mlt);
+ md.len = mlt;
+ md.off = (int)(ipIndex - m8Index);
+ md.back = 0;
+ return md;
+ }
+ }
+ }
+
+ /* search for short match second */
+ {
+ U32 l4DictMatchIndex = hash4Table[LZ4MID_hash4Ptr(ip)];
+ U32 m4Index =
+ l4DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
+ if (ipIndex - m4Index <= LZ4_DISTANCE_MAX) {
+ const BYTE *const matchPtr = dictCtx->prefixStart -
+ dictCtx->dictLimit +
+ l4DictMatchIndex;
+ const size_t safeLen =
+ MIN(lDictEndIndex - l4DictMatchIndex,
+ (size_t)(iHighLimit - ip));
+ int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
+ if (mlt >= MINMATCH) {
+ LZ4HC_match_t md;
+ DEBUGLOG(7,
+ "Found short ExtDict match of len=%u",
+ mlt);
+ md.len = mlt;
+ md.off = (int)(ipIndex - m4Index);
+ md.back = 0;
+ return md;
+ }
+ }
+ }
+
+ /* nothing found */
+ {
+ LZ4HC_match_t const md = { 0, 0, 0 };
+ return md;
+ }
+}
+
+/**************************************
+* Mid Compression (level 2)
+**************************************/
+
+LZ4_FORCE_INLINE void LZ4MID_addPosition(U32 *hTable, U32 hValue, U32 index)
+{
+ hTable[hValue] = index;
+}
+
+#define ADDPOS8(_p, _idx) \
+ LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(_p), _idx)
+#define ADDPOS4(_p, _idx) \
+ LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(_p), _idx)
+
+/* Fill hash tables with references into dictionary.
+ * The resulting table is only exploitable by LZ4MID (level 2) */
+static void LZ4MID_fillHTable(LZ4HC_CCtx_internal *cctx, const void *dict,
+ size_t size)
+{
+ U32 *const hash4Table = cctx->hashTable;
+ U32 *const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
+ const BYTE *const prefixPtr = (const BYTE *)dict;
+ U32 const prefixIdx = cctx->dictLimit;
+ U32 const target = prefixIdx + (U32)size - LZ4MID_HASHSIZE;
+ U32 idx = cctx->nextToUpdate;
+ assert(dict == cctx->prefixStart);
+ DEBUGLOG(4, "LZ4MID_fillHTable (size:%zu)", size);
+ if (size <= LZ4MID_HASHSIZE)
+ return;
+
+ for (; idx < target; idx += 3) {
+ ADDPOS4(prefixPtr + idx - prefixIdx, idx);
+ ADDPOS8(prefixPtr + idx + 1 - prefixIdx, idx + 1);
+ }
+
+ idx = (size > 32 KB + LZ4MID_HASHSIZE) ? target - 32 KB :
+ cctx->nextToUpdate;
+ for (; idx < target; idx += 1) {
+ ADDPOS8(prefixPtr + idx - prefixIdx, idx);
+ }
+
+ cctx->nextToUpdate = target;
+}
+
+static LZ4MID_searchIntoDict_f
+select_searchDict_function(const LZ4HC_CCtx_internal *dictCtx)
+{
+ if (dictCtx == NULL)
+ return NULL;
+ if (LZ4HC_getCLevelParams(dictCtx->compressionLevel).strat == lz4mid)
+ return LZ4MID_searchExtDict;
+ return LZ4MID_searchHCDict;
+}
+
+static int LZ4MID_compress(LZ4HC_CCtx_internal *const ctx,
+ const char *const src, char *const dst,
+ int *srcSizePtr, int const maxOutputSize,
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict)
+{
+ U32 *const hash4Table = ctx->hashTable;
+ U32 *const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
+ const BYTE *ip = (const BYTE *)src;
+ const BYTE *anchor = ip;
+ const BYTE *const iend = ip + *srcSizePtr;
+ const BYTE *const mflimit = iend - MFLIMIT;
+ const BYTE *const matchlimit = (iend - LASTLITERALS);
+ const BYTE *const ilimit = (iend - LZ4MID_HASHSIZE);
+ BYTE *op = (BYTE *)dst;
+ BYTE *oend = op + maxOutputSize;
+
+ const BYTE *const prefixPtr = ctx->prefixStart;
+ const U32 prefixIdx = ctx->dictLimit;
+ const U32 ilimitIdx = (U32)(ilimit - prefixPtr) + prefixIdx;
+ const BYTE *const dictStart = ctx->dictStart;
+ const U32 dictIdx = ctx->lowLimit;
+ const U32 gDictEndIndex = ctx->lowLimit;
+ const LZ4MID_searchIntoDict_f searchIntoDict =
+ (dict == usingDictCtxHc) ?
+ select_searchDict_function(ctx->dictCtx) :
+ NULL;
+ unsigned matchLength;
+ unsigned matchDistance;
+
+ /* input sanitization */
+ DEBUGLOG(5, "LZ4MID_compress (%i bytes)", *srcSizePtr);
+ if (dict == usingDictCtxHc)
+ DEBUGLOG(5, "usingDictCtxHc");
+ assert(*srcSizePtr >= 0);
+ if (*srcSizePtr)
+ assert(src != NULL);
+ if (maxOutputSize)
+ assert(dst != NULL);
+ if (*srcSizePtr < 0)
+ return 0; /* invalid */
+ if (maxOutputSize < 0)
+ return 0; /* invalid */
+ if (*srcSizePtr > LZ4_MAX_INPUT_SIZE) {
+ /* forbidden: no input is allowed to be that large */
+ return 0;
+ }
+ if (limit == fillOutput)
+ oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (*srcSizePtr < LZ4_minLength)
+ goto _lz4mid_last_literals; /* Input too small, no compression (all literals) */
+
+ /* main loop */
+ while (ip <= mflimit) {
+ const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
+ /* search long match */
+ {
+ U32 const h8 = LZ4MID_hash8Ptr(ip);
+ U32 const pos8 = hash8Table[h8];
+ assert(h8 < LZ4MID_HASHTABLESIZE);
+ assert(pos8 < ipIndex);
+ LZ4MID_addPosition(hash8Table, h8, ipIndex);
+ if (ipIndex - pos8 <= LZ4_DISTANCE_MAX) {
+ /* match candidate found */
+ if (pos8 >= prefixIdx) {
+ const BYTE *const matchPtr =
+ prefixPtr + pos8 - prefixIdx;
+ assert(matchPtr < ip);
+ matchLength = LZ4_count(ip, matchPtr,
+ matchlimit);
+ if (matchLength >= MINMATCH) {
+ DEBUGLOG(
+ 7,
+ "found long match at pos %u (len=%u)",
+ pos8, matchLength);
+ matchDistance = ipIndex - pos8;
+ goto _lz4mid_encode_sequence;
+ }
+ } else {
+ if (pos8 >= dictIdx) {
+ /* extDict match candidate */
+ const BYTE *const matchPtr =
+ dictStart +
+ (pos8 - dictIdx);
+ const size_t safeLen = MIN(
+ prefixIdx - pos8,
+ (size_t)(matchlimit -
+ ip));
+ matchLength =
+ LZ4_count(ip, matchPtr,
+ ip + safeLen);
+ if (matchLength >= MINMATCH) {
+ DEBUGLOG(
+ 7,
+ "found long match at ExtDict pos %u (len=%u)",
+ pos8,
+ matchLength);
+ matchDistance =
+ ipIndex - pos8;
+ goto _lz4mid_encode_sequence;
+ }
+ }
+ }
+ }
+ }
+ /* search short match */
+ {
+ U32 const h4 = LZ4MID_hash4Ptr(ip);
+ U32 const pos4 = hash4Table[h4];
+ assert(h4 < LZ4MID_HASHTABLESIZE);
+ assert(pos4 < ipIndex);
+ LZ4MID_addPosition(hash4Table, h4, ipIndex);
+ if (ipIndex - pos4 <= LZ4_DISTANCE_MAX) {
+ /* match candidate found */
+ if (pos4 >= prefixIdx) {
+ /* only search within prefix */
+ const BYTE *const matchPtr =
+ prefixPtr + (pos4 - prefixIdx);
+ assert(matchPtr < ip);
+ assert(matchPtr >= prefixPtr);
+ matchLength = LZ4_count(ip, matchPtr,
+ matchlimit);
+ if (matchLength >= MINMATCH) {
+ /* short match found, let's just check ip+1 for longer */
+ U32 const h8 =
+ LZ4MID_hash8Ptr(ip + 1);
+ U32 const pos8 = hash8Table[h8];
+ U32 const m2Distance =
+ ipIndex + 1 - pos8;
+ matchDistance = ipIndex - pos4;
+ if (m2Distance <=
+ LZ4_DISTANCE_MAX &&
+ pos8 >= prefixIdx /* only search within prefix */
+ && likely(ip < mflimit)) {
+ const BYTE *const m2Ptr =
+ prefixPtr +
+ (pos8 -
+ prefixIdx);
+ unsigned ml2 = LZ4_count(
+ ip + 1, m2Ptr,
+ matchlimit);
+ if (ml2 > matchLength) {
+ LZ4MID_addPosition(
+ hash8Table,
+ h8,
+ ipIndex +
+ 1);
+ ip++;
+ matchLength =
+ ml2;
+ matchDistance =
+ m2Distance;
+ }
+ }
+ goto _lz4mid_encode_sequence;
+ }
+ } else {
+ if (pos4 >= dictIdx) {
+ /* extDict match candidate */
+ const BYTE *const matchPtr =
+ dictStart +
+ (pos4 - dictIdx);
+ const size_t safeLen = MIN(
+ prefixIdx - pos4,
+ (size_t)(matchlimit -
+ ip));
+ matchLength =
+ LZ4_count(ip, matchPtr,
+ ip + safeLen);
+ if (matchLength >= MINMATCH) {
+ DEBUGLOG(
+ 7,
+ "found match at ExtDict pos %u (len=%u)",
+ pos4,
+ matchLength);
+ matchDistance =
+ ipIndex - pos4;
+ goto _lz4mid_encode_sequence;
+ }
+ }
+ }
+ }
+ }
+ /* no match found in prefix */
+ if ((dict == usingDictCtxHc) &&
+ (ipIndex - gDictEndIndex < LZ4_DISTANCE_MAX - 8)) {
+ /* search a match into external dictionary */
+ LZ4HC_match_t dMatch =
+ searchIntoDict(ip, ipIndex, matchlimit,
+ ctx->dictCtx, gDictEndIndex);
+ if (dMatch.len >= MINMATCH) {
+ DEBUGLOG(7,
+ "found Dictionary match (offset=%i)",
+ dMatch.off);
+ assert(dMatch.back == 0);
+ matchLength = (unsigned)dMatch.len;
+ matchDistance = (unsigned)dMatch.off;
+ goto _lz4mid_encode_sequence;
+ }
+ }
+ /* no match found */
+ ip += 1 + ((ip - anchor) >>
+ 9); /* skip faster over incompressible data */
+ continue;
+
+ _lz4mid_encode_sequence:
+ /* catch back */
+ while (((ip > anchor) &
+ ((U32)(ip - prefixPtr) > matchDistance)) &&
+ (unlikely(ip[-1] == ip[-(int)matchDistance - 1]))) {
+ ip--;
+ matchLength++;
+ };
+
+ /* fill table with beginning of match */
+ ADDPOS8(ip + 1, ipIndex + 1);
+ ADDPOS8(ip + 2, ipIndex + 2);
+ ADDPOS4(ip + 1, ipIndex + 1);
+
+ /* encode */
+ {
+ BYTE *const saved_op = op;
+ /* LZ4HC_encodeSequence always updates @op; on success, it updates @ip and @anchor */
+ if (LZ4HC_encodeSequence(
+ UPDATABLE(ip, op, anchor), (int)matchLength,
+ (int)matchDistance, limit, oend)) {
+ op = saved_op; /* restore @op value before failed LZ4HC_encodeSequence */
+ goto _lz4mid_dest_overflow;
+ }
+ }
+
+ /* fill table with end of match */
+ {
+ U32 endMatchIdx = (U32)(ip - prefixPtr) + prefixIdx;
+ U32 pos_m2 = endMatchIdx - 2;
+ if (pos_m2 < ilimitIdx) {
+ if (likely(ip - prefixPtr > 5)) {
+ ADDPOS8(ip - 5, endMatchIdx - 5);
+ }
+ ADDPOS8(ip - 3, endMatchIdx - 3);
+ ADDPOS8(ip - 2, endMatchIdx - 2);
+ ADDPOS4(ip - 2, endMatchIdx - 2);
+ ADDPOS4(ip - 1, endMatchIdx - 1);
+ }
+ }
+ }
+
+_lz4mid_last_literals:
+ /* Encode Last Literals */
+ {
+ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput)
+ oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput)
+ return 0; /* not enough space in @dst */
+ /* adapt lastRunSize to fill 'dest' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals",
+ (int)lastRunSize);
+ ip = anchor +
+ lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ assert(lastRunSize <= (size_t)(oend - op));
+ LZ4_memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ DEBUGLOG(5, "compressed %i bytes into %i bytes", *srcSizePtr,
+ (int)((char *)op - dst));
+ assert(ip >= (const BYTE *)src);
+ assert(ip <= iend);
+ *srcSizePtr = (int)(ip - (const BYTE *)src);
+ assert((char *)op >= dst);
+ assert(op <= oend);
+ assert((char *)op - dst < INT_MAX);
+ return (int)((char *)op - dst);
+
+_lz4mid_dest_overflow:
+ if (limit == fillOutput) {
+ /* Assumption : @ip, @anchor, @optr and @matchLength must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE *const maxLitPos =
+ oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(
+ 6,
+ "Last sequence is overflowing : %u literals, %u remaining space",
+ (unsigned)ll, (unsigned)(oend - op));
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl =
+ (size_t)(maxLitPos - (op + ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK - 1) +
+ (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX);
+ if ((size_t)matchLength > maxMlSize)
+ matchLength = (unsigned)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) -
+ 1 + matchLength >=
+ MFLIMIT) {
+ DEBUGLOG(
+ 6,
+ "Let's encode a last sequence (ll=%u, ml=%u)",
+ (unsigned)ll, matchLength);
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ (int)matchLength,
+ (int)matchDistance,
+ notLimited, oend);
+ }
+ }
+ DEBUGLOG(6,
+ "Let's finish with a run of literals (%u bytes left)",
+ (unsigned)(oend - op));
+ goto _lz4mid_last_literals;
+ }
+ /* compression failed */
+ return 0;
+}
+
+/**************************************
+* HC Compression - Search
+**************************************/
+
+/* Update chains up to ip (excluded) */
+LZ4_FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4, const BYTE *ip)
+{
+ U16 *const chainTable = hc4->chainTable;
+ U32 *const hashTable = hc4->hashTable;
+ const BYTE *const prefixPtr = hc4->prefixStart;
+ U32 const prefixIdx = hc4->dictLimit;
+ U32 const target = (U32)(ip - prefixPtr) + prefixIdx;
+ U32 idx = hc4->nextToUpdate;
+ assert(ip >= prefixPtr);
+ assert(target >= prefixIdx);
+
+ while (idx < target) {
+ U32 const h = LZ4HC_hashPtr(prefixPtr + idx - prefixIdx);
+ size_t delta = idx - hashTable[h];
+ if (delta > LZ4_DISTANCE_MAX)
+ delta = LZ4_DISTANCE_MAX;
+ DELTANEXTU16(chainTable, idx) = (U16)delta;
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ hc4->nextToUpdate = target;
+}
+
+#if defined(_MSC_VER)
+#define LZ4HC_rotl32(x, r) _rotl(x, r)
+#else
+#define LZ4HC_rotl32(x, r) ((x << r) | (x >> (32 - r)))
+#endif
+
+static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
+{
+ size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
+ if (bitsToRotate == 0)
+ return pattern;
+ return LZ4HC_rotl32(pattern, (int)bitsToRotate);
+}
+
+/* LZ4HC_countPattern() :
+ * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
+static unsigned LZ4HC_countPattern(const BYTE *ip, const BYTE *const iEnd,
+ U32 const pattern32)
+{
+ const BYTE *const iStart = ip;
+ reg_t const pattern =
+ (sizeof(pattern) == 8) ?
+ (reg_t)pattern32 +
+ (((reg_t)pattern32) << (sizeof(pattern) * 4)) :
+ pattern32;
+
+ while (likely(ip < iEnd - (sizeof(pattern) - 1))) {
+ reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
+ if (!diff) {
+ ip += sizeof(pattern);
+ continue;
+ }
+ ip += LZ4_NbCommonBytes(diff);
+ return (unsigned)(ip - iStart);
+ }
+
+ if (LZ4_isLittleEndian()) {
+ reg_t patternByte = pattern;
+ while ((ip < iEnd) && (*ip == (BYTE)patternByte)) {
+ ip++;
+ patternByte >>= 8;
+ }
+ } else { /* big endian */
+ U32 bitOffset = (sizeof(pattern) * 8) - 8;
+ while (ip < iEnd) {
+ BYTE const byte = (BYTE)(pattern >> bitOffset);
+ if (*ip != byte)
+ break;
+ ip++;
+ bitOffset -= 8;
+ }
+ }
+
+ return (unsigned)(ip - iStart);
+}
+
+/* LZ4HC_reverseCountPattern() :
+ * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
+ * read using natural platform endianness */
+static unsigned LZ4HC_reverseCountPattern(const BYTE *ip,
+ const BYTE *const iLow, U32 pattern)
+{
+ const BYTE *const iStart = ip;
+
+ while (likely(ip >= iLow + 4)) {
+ if (LZ4_read32(ip - 4) != pattern)
+ break;
+ ip -= 4;
+ }
+ {
+ const BYTE *bytePtr = (const BYTE *)(&pattern) +
+ 3; /* works for any endianness */
+ while (likely(ip > iLow)) {
+ if (ip[-1] != *bytePtr)
+ break;
+ ip--;
+ bytePtr--;
+ }
+ }
+ return (unsigned)(iStart - ip);
+}
+
+/* LZ4HC_protectDictEnd() :
+ * Checks if the match is in the last 3 bytes of the dictionary, so reading the
+ * 4 byte MINMATCH would overflow.
+ * @returns true if the match index is okay.
+ */
+static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
+{
+ return ((U32)((dictLimit - 1) - matchIndex) >= 3);
+}
+
+typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
+typedef enum { favorCompressionRatio = 0, favorDecompressionSpeed } HCfavor_e;
+
+LZ4_FORCE_INLINE LZ4HC_match_t LZ4HC_InsertAndGetWiderMatch(
+ LZ4HC_CCtx_internal *const hc4, const BYTE *const ip,
+ const BYTE *const iLowLimit, const BYTE *const iHighLimit, int longest,
+ const int maxNbAttempts, const int patternAnalysis, const int chainSwap,
+ const dictCtx_directive dict, const HCfavor_e favorDecSpeed)
+{
+ U16 *const chainTable = hc4->chainTable;
+ U32 *const hashTable = hc4->hashTable;
+ const LZ4HC_CCtx_internal *const dictCtx = hc4->dictCtx;
+ const BYTE *const prefixPtr = hc4->prefixStart;
+ const U32 prefixIdx = hc4->dictLimit;
+ const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
+ const int withinStartDistance =
+ (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex);
+ const U32 lowestMatchIndex = (withinStartDistance) ?
+ hc4->lowLimit :
+ ipIndex - LZ4_DISTANCE_MAX;
+ const BYTE *const dictStart = hc4->dictStart;
+ const U32 dictIdx = hc4->lowLimit;
+ const BYTE *const dictEnd = dictStart + prefixIdx - dictIdx;
+ int const lookBackLength = (int)(ip - iLowLimit);
+ int nbAttempts = maxNbAttempts;
+ U32 matchChainPos = 0;
+ U32 const pattern = LZ4_read32(ip);
+ U32 matchIndex;
+ repeat_state_e repeat = rep_untested;
+ size_t srcPatternLength = 0;
+ int offset = 0, sBack = 0;
+
+ DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
+ /* First Match */
+ LZ4HC_Insert(hc4,
+ ip); /* insert all prior positions up to ip (excluded) */
+ matchIndex = hashTable[LZ4HC_hashPtr(ip)];
+ DEBUGLOG(
+ 7,
+ "First candidate match for pos %u found at index %u / %u (lowestMatchIndex)",
+ ipIndex, matchIndex, lowestMatchIndex);
+
+ while ((matchIndex >= lowestMatchIndex) && (nbAttempts > 0)) {
+ int matchLength = 0;
+ nbAttempts--;
+ assert(matchIndex < ipIndex);
+ if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
+ /* do nothing:
+ * favorDecSpeed intentionally skips matches with offset < 8 */
+ } else if (matchIndex >=
+ prefixIdx) { /* within current Prefix */
+ const BYTE *const matchPtr =
+ prefixPtr + (matchIndex - prefixIdx);
+ assert(matchPtr < ip);
+ assert(longest >= 1);
+ if (LZ4_read16(iLowLimit + longest - 1) ==
+ LZ4_read16(matchPtr - lookBackLength + longest -
+ 1)) {
+ if (LZ4_read32(matchPtr) == pattern) {
+ int const back =
+ lookBackLength ?
+ LZ4HC_countBack(
+ ip, matchPtr,
+ iLowLimit,
+ prefixPtr) :
+ 0;
+ matchLength =
+ MINMATCH +
+ (int)LZ4_count(ip + MINMATCH,
+ matchPtr +
+ MINMATCH,
+ iHighLimit);
+ matchLength -= back;
+ if (matchLength > longest) {
+ longest = matchLength;
+ offset = (int)(ipIndex -
+ matchIndex);
+ sBack = back;
+ DEBUGLOG(
+ 7,
+ "Found match of len=%i within prefix, offset=%i, back=%i",
+ longest, offset, -back);
+ }
+ }
+ }
+ } else { /* lowestMatchIndex <= matchIndex < dictLimit : within Ext Dict */
+ const BYTE *const matchPtr =
+ dictStart + (matchIndex - dictIdx);
+ assert(matchIndex >= dictIdx);
+ if (likely(matchIndex <= prefixIdx - 4) &&
+ (LZ4_read32(matchPtr) == pattern)) {
+ int back = 0;
+ const BYTE *vLimit =
+ ip + (prefixIdx - matchIndex);
+ if (vLimit > iHighLimit)
+ vLimit = iHighLimit;
+ matchLength =
+ (int)LZ4_count(ip + MINMATCH,
+ matchPtr + MINMATCH,
+ vLimit) +
+ MINMATCH;
+ if ((ip + matchLength == vLimit) &&
+ (vLimit < iHighLimit))
+ matchLength +=
+ LZ4_count(ip + matchLength,
+ prefixPtr,
+ iHighLimit);
+ back = lookBackLength ?
+ LZ4HC_countBack(ip, matchPtr,
+ iLowLimit,
+ dictStart) :
+ 0;
+ matchLength -= back;
+ if (matchLength > longest) {
+ longest = matchLength;
+ offset = (int)(ipIndex - matchIndex);
+ sBack = back;
+ DEBUGLOG(
+ 7,
+ "Found match of len=%i within dict, offset=%i, back=%i",
+ longest, offset, -back);
+ }
+ }
+ }
+
+ if (chainSwap &&
+ matchLength ==
+ longest) { /* better match => select a better chain */
+ assert(lookBackLength == 0); /* search forward only */
+ if (matchIndex + (U32)longest <= ipIndex) {
+ int const kTrigger = 4;
+ U32 distanceToNextMatch = 1;
+ int const end = longest - MINMATCH + 1;
+ int step = 1;
+ int accel = 1 << kTrigger;
+ int pos;
+ for (pos = 0; pos < end; pos += step) {
+ U32 const candidateDist = DELTANEXTU16(
+ chainTable,
+ matchIndex + (U32)pos);
+ step = (accel++ >> kTrigger);
+ if (candidateDist >
+ distanceToNextMatch) {
+ distanceToNextMatch =
+ candidateDist;
+ matchChainPos = (U32)pos;
+ accel = 1 << kTrigger;
+ }
+ }
+ if (distanceToNextMatch > 1) {
+ if (distanceToNextMatch > matchIndex)
+ break; /* avoid overflow */
+ matchIndex -= distanceToNextMatch;
+ continue;
+ }
+ }
+ }
+
+ {
+ U32 const distNextMatch =
+ DELTANEXTU16(chainTable, matchIndex);
+ if (patternAnalysis && distNextMatch == 1 &&
+ matchChainPos == 0) {
+ U32 const matchCandidateIdx = matchIndex - 1;
+ /* may be a repeated pattern */
+ if (repeat == rep_untested) {
+ if (((pattern & 0xFFFF) ==
+ (pattern >> 16)) &
+ ((pattern & 0xFF) ==
+ (pattern >> 24))) {
+ DEBUGLOG(
+ 7,
+ "Repeat pattern detected, char %02X",
+ pattern >> 24);
+ repeat = rep_confirmed;
+ srcPatternLength =
+ LZ4HC_countPattern(
+ ip + sizeof(pattern),
+ iHighLimit,
+ pattern) +
+ sizeof(pattern);
+ } else {
+ repeat = rep_not;
+ }
+ }
+ if ((repeat == rep_confirmed) &&
+ (matchCandidateIdx >= lowestMatchIndex) &&
+ LZ4HC_protectDictEnd(prefixIdx,
+ matchCandidateIdx)) {
+ const int extDict =
+ matchCandidateIdx < prefixIdx;
+ const BYTE *const matchPtr =
+ extDict ?
+ dictStart +
+ (matchCandidateIdx -
+ dictIdx) :
+ prefixPtr +
+ (matchCandidateIdx -
+ prefixIdx);
+ if (LZ4_read32(matchPtr) ==
+ pattern) { /* good candidate */
+ const BYTE *const iLimit =
+ extDict ? dictEnd :
+ iHighLimit;
+ size_t forwardPatternLength =
+ LZ4HC_countPattern(
+ matchPtr +
+ sizeof(pattern),
+ iLimit,
+ pattern) +
+ sizeof(pattern);
+ if (extDict &&
+ matchPtr + forwardPatternLength ==
+ iLimit) {
+ U32 const rotatedPattern =
+ LZ4HC_rotatePattern(
+ forwardPatternLength,
+ pattern);
+ forwardPatternLength +=
+ LZ4HC_countPattern(
+ prefixPtr,
+ iHighLimit,
+ rotatedPattern);
+ }
+ {
+ const BYTE *const lowestMatchPtr =
+ extDict ?
+ dictStart :
+ prefixPtr;
+ size_t backLength =
+ LZ4HC_reverseCountPattern(
+ matchPtr,
+ lowestMatchPtr,
+ pattern);
+ size_t currentSegmentLength;
+ if (!extDict &&
+ matchPtr - backLength ==
+ prefixPtr &&
+ dictIdx <
+ prefixIdx) {
+ U32 const rotatedPattern = LZ4HC_rotatePattern(
+ (U32)(-(int)backLength),
+ pattern);
+ backLength += LZ4HC_reverseCountPattern(
+ dictEnd,
+ dictStart,
+ rotatedPattern);
+ }
+ /* Limit backLength not go further than lowestMatchIndex */
+ backLength =
+ matchCandidateIdx -
+ MAX(matchCandidateIdx -
+ (U32)backLength,
+ lowestMatchIndex);
+ assert(matchCandidateIdx -
+ backLength >=
+ lowestMatchIndex);
+ currentSegmentLength =
+ backLength +
+ forwardPatternLength;
+ /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
+ if ((currentSegmentLength >=
+ srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
+ &&
+ (forwardPatternLength <=
+ srcPatternLength)) { /* haven't reached this position yet */
+ U32 const newMatchIndex =
+ matchCandidateIdx +
+ (U32)forwardPatternLength -
+ (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
+ if (LZ4HC_protectDictEnd(
+ prefixIdx,
+ newMatchIndex))
+ matchIndex =
+ newMatchIndex;
+ else {
+ /* Can only happen if started in the prefix */
+ assert(newMatchIndex >=
+ prefixIdx -
+ 3 &&
+ newMatchIndex <
+ prefixIdx &&
+ !extDict);
+ matchIndex =
+ prefixIdx;
+ }
+ } else {
+ U32 const newMatchIndex =
+ matchCandidateIdx -
+ (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
+ if (!LZ4HC_protectDictEnd(
+ prefixIdx,
+ newMatchIndex)) {
+ assert(newMatchIndex >=
+ prefixIdx -
+ 3 &&
+ newMatchIndex <
+ prefixIdx &&
+ !extDict);
+ matchIndex =
+ prefixIdx;
+ } else {
+ matchIndex =
+ newMatchIndex;
+ if (lookBackLength ==
+ 0) { /* no back possible */
+ size_t const maxML =
+ MIN(currentSegmentLength,
+ srcPatternLength);
+ if ((size_t)longest <
+ maxML) {
+ assert(prefixPtr -
+ prefixIdx +
+ matchIndex !=
+ ip);
+ if ((size_t)(ip -
+ prefixPtr) +
+ prefixIdx -
+ matchIndex >
+ LZ4_DISTANCE_MAX)
+ break;
+ assert(maxML <
+ 2 GB);
+ longest = (int)
+ maxML;
+ offset =
+ (int)(ipIndex -
+ matchIndex);
+ assert(sBack ==
+ 0);
+ DEBUGLOG(
+ 7,
+ "Found repeat pattern match of len=%i, offset=%i",
+ longest,
+ offset);
+ }
+ {
+ U32 const distToNextPattern =
+ DELTANEXTU16(
+ chainTable,
+ matchIndex);
+ if (distToNextPattern >
+ matchIndex)
+ break; /* avoid overflow */
+ matchIndex -=
+ distToNextPattern;
+ }
+ }
+ }
+ }
+ }
+ continue;
+ }
+ }
+ }
+ } /* PA optimization */
+
+ /* follow current chain */
+ matchIndex -=
+ DELTANEXTU16(chainTable, matchIndex + matchChainPos);
+
+ } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
+
+ if (dict == usingDictCtxHc && nbAttempts > 0 && withinStartDistance) {
+ size_t const dictEndOffset =
+ (size_t)(dictCtx->end - dictCtx->prefixStart) +
+ dictCtx->dictLimit;
+ U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
+ assert(dictEndOffset <= 1 GB);
+ matchIndex =
+ dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
+ if (dictMatchIndex > 0)
+ DEBUGLOG(
+ 7,
+ "dictEndOffset = %zu, dictMatchIndex = %u => relative matchIndex = %i",
+ dictEndOffset, dictMatchIndex,
+ (int)dictMatchIndex - (int)dictEndOffset);
+ while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX &&
+ nbAttempts--) {
+ const BYTE *const matchPtr = dictCtx->prefixStart -
+ dictCtx->dictLimit +
+ dictMatchIndex;
+
+ if (LZ4_read32(matchPtr) == pattern) {
+ int mlt;
+ int back = 0;
+ const BYTE *vLimit =
+ ip + (dictEndOffset - dictMatchIndex);
+ if (vLimit > iHighLimit)
+ vLimit = iHighLimit;
+ mlt = (int)LZ4_count(ip + MINMATCH,
+ matchPtr + MINMATCH,
+ vLimit) +
+ MINMATCH;
+ back = lookBackLength ?
+ LZ4HC_countBack(
+ ip, matchPtr, iLowLimit,
+ dictCtx->prefixStart) :
+ 0;
+ mlt -= back;
+ if (mlt > longest) {
+ longest = mlt;
+ offset = (int)(ipIndex - matchIndex);
+ sBack = back;
+ DEBUGLOG(
+ 7,
+ "found match of length %i within extDictCtx",
+ longest);
+ }
+ }
+
+ {
+ U32 const nextOffset = DELTANEXTU16(
+ dictCtx->chainTable, dictMatchIndex);
+ dictMatchIndex -= nextOffset;
+ matchIndex -= nextOffset;
+ }
+ }
+ }
+
+ {
+ LZ4HC_match_t md;
+ assert(longest >= 0);
+ md.len = longest;
+ md.off = offset;
+ md.back = sBack;
+ return md;
+ }
+}
+
+LZ4_FORCE_INLINE LZ4HC_match_t LZ4HC_InsertAndFindBestMatch(
+ LZ4HC_CCtx_internal *const hc4, /* Index table will be updated */
+ const BYTE *const ip, const BYTE *const iLimit, const int maxNbAttempts,
+ const int patternAnalysis, const dictCtx_directive dict)
+{
+ DEBUGLOG(7, "LZ4HC_InsertAndFindBestMatch");
+ /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
+ * but this won't be the case here, as we define iLowLimit==ip,
+ * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
+ return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH - 1,
+ maxNbAttempts, patternAnalysis,
+ 0 /*chainSwap*/, dict,
+ favorCompressionRatio);
+}
+
+LZ4_FORCE_INLINE int
+LZ4HC_compress_hashChain(LZ4HC_CCtx_internal *const ctx,
+ const char *const source, char *const dest,
+ int *srcSizePtr, int const maxOutputSize,
+ int maxNbAttempts, const limitedOutput_directive limit,
+ const dictCtx_directive dict)
+{
+ const int inputSize = *srcSizePtr;
+ const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */
+
+ const BYTE *ip = (const BYTE *)source;
+ const BYTE *anchor = ip;
+ const BYTE *const iend = ip + inputSize;
+ const BYTE *const mflimit = iend - MFLIMIT;
+ const BYTE *const matchlimit = (iend - LASTLITERALS);
+
+ BYTE *optr = (BYTE *)dest;
+ BYTE *op = (BYTE *)dest;
+ BYTE *oend = op + maxOutputSize;
+
+ const BYTE *start0;
+ const BYTE *start2 = NULL;
+ const BYTE *start3 = NULL;
+ LZ4HC_match_t m0, m1, m2, m3;
+ const LZ4HC_match_t nomatch = { 0, 0, 0 };
+
+ /* init */
+ DEBUGLOG(5, "LZ4HC_compress_hashChain (dict?=>%i)", dict);
+ *srcSizePtr = 0;
+ if (limit == fillOutput)
+ oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (inputSize < LZ4_minLength)
+ goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* Main Loop */
+ while (ip <= mflimit) {
+ m1 = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit,
+ maxNbAttempts,
+ patternAnalysis, dict);
+ if (m1.len < MINMATCH) {
+ ip++;
+ continue;
+ }
+
+ /* saved, in case we would skip too much */
+ start0 = ip;
+ m0 = m1;
+
+ _Search2:
+ DEBUGLOG(7, "_Search2 (currently found match of size %i)",
+ m1.len);
+ if (ip + m1.len <= mflimit) {
+ start2 = ip + m1.len - 2;
+ m2 = LZ4HC_InsertAndGetWiderMatch(
+ ctx, start2, ip + 0, matchlimit, m1.len,
+ maxNbAttempts, patternAnalysis, 0, dict,
+ favorCompressionRatio);
+ start2 += m2.back;
+ } else {
+ m2 = nomatch; /* do not search further */
+ }
+
+ if (m2.len <=
+ m1.len) { /* No better match => encode ML1 immediately */
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ m1.len, m1.off, limit, oend))
+ goto _dest_overflow;
+ continue;
+ }
+
+ if (start0 < ip) { /* first match was skipped at least once */
+ if (start2 <
+ ip + m0.len) { /* squeezing ML1 between ML0(original ML1) and ML2 */
+ ip = start0;
+ m1 = m0; /* restore initial Match1 */
+ }
+ }
+
+ /* Here, start0==ip */
+ if ((start2 - ip) < 3) { /* First Match too small : removed */
+ ip = start2;
+ m1 = m2;
+ goto _Search2;
+ }
+
+ _Search3:
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ int new_ml = m1.len;
+ if (new_ml > OPTIMAL_ML)
+ new_ml = OPTIMAL_ML;
+ if (ip + new_ml > start2 + m2.len - MINMATCH)
+ new_ml = (int)(start2 - ip) + m2.len - MINMATCH;
+ correction = new_ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ m2.len -= correction;
+ }
+ }
+
+ if (start2 + m2.len <= mflimit) {
+ start3 = start2 + m2.len - 3;
+ m3 = LZ4HC_InsertAndGetWiderMatch(
+ ctx, start3, start2, matchlimit, m2.len,
+ maxNbAttempts, patternAnalysis, 0, dict,
+ favorCompressionRatio);
+ start3 += m3.back;
+ } else {
+ m3 = nomatch; /* do not search further */
+ }
+
+ if (m3.len <=
+ m2.len) { /* No better match => encode ML1 and ML2 */
+ /* ip & ref are known; Now for ml */
+ if (start2 < ip + m1.len)
+ m1.len = (int)(start2 - ip);
+ /* Now, encode 2 sequences */
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ m1.len, m1.off, limit, oend))
+ goto _dest_overflow;
+ ip = start2;
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ m2.len, m2.off, limit, oend)) {
+ m1 = m2;
+ goto _dest_overflow;
+ }
+ continue;
+ }
+
+ if (start3 <
+ ip + m1.len +
+ 3) { /* Not enough space for match 2 : remove it */
+ if (start3 >=
+ (ip +
+ m1.len)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
+ if (start2 < ip + m1.len) {
+ int correction =
+ (int)(ip + m1.len - start2);
+ start2 += correction;
+ m2.len -= correction;
+ if (m2.len < MINMATCH) {
+ start2 = start3;
+ m2 = m3;
+ }
+ }
+
+ optr = op;
+ if (LZ4HC_encodeSequence(
+ UPDATABLE(ip, op, anchor), m1.len,
+ m1.off, limit, oend))
+ goto _dest_overflow;
+ ip = start3;
+ m1 = m3;
+
+ start0 = start2;
+ m0 = m2;
+ goto _Search2;
+ }
+
+ start2 = start3;
+ m2 = m3;
+ goto _Search3;
+ }
+
+ /*
+ * OK, now we have 3 ascending matches;
+ * let's write the first one ML1.
+ * ip & ref are known; Now decide ml.
+ */
+ if (start2 < ip + m1.len) {
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ if (m1.len > OPTIMAL_ML)
+ m1.len = OPTIMAL_ML;
+ if (ip + m1.len > start2 + m2.len - MINMATCH)
+ m1.len = (int)(start2 - ip) + m2.len -
+ MINMATCH;
+ correction = m1.len - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ m2.len -= correction;
+ }
+ } else {
+ m1.len = (int)(start2 - ip);
+ }
+ }
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len,
+ m1.off, limit, oend))
+ goto _dest_overflow;
+
+ /* ML2 becomes ML1 */
+ ip = start2;
+ m1 = m2;
+
+ /* ML3 becomes ML2 */
+ start2 = start3;
+ m2 = m3;
+
+ /* let's find a new ML3 */
+ goto _Search3;
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ {
+ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput)
+ oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput)
+ return 0;
+ /* adapt lastRunSize to fill 'dest' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals",
+ (int)lastRunSize);
+ ip = anchor +
+ lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int)(((const char *)ip) - source);
+ return (int)(((char *)op) - dest);
+
+_dest_overflow:
+ if (limit == fillOutput) {
+ /* Assumption : @ip, @anchor, @optr and @m1 must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE *const maxLitPos =
+ oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing");
+ op = optr; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl =
+ (size_t)(maxLitPos - (op + ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK - 1) +
+ (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX);
+ assert(m1.len >= 0);
+ if ((size_t)m1.len > maxMlSize)
+ m1.len = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) -
+ 1 + m1.len >=
+ MFLIMIT) {
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ m1.len, m1.off, notLimited,
+ oend);
+ }
+ }
+ goto _last_literals;
+ }
+ /* compression failed */
+ return 0;
+}
+
+static int LZ4HC_compress_optimal(LZ4HC_CCtx_internal *ctx,
+ const char *const source, char *dst,
+ int *srcSizePtr, int dstCapacity,
+ int const nbSearches, size_t sufficient_len,
+ const limitedOutput_directive limit,
+ int const fullUpdate,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed);
+
+LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal(
+ LZ4HC_CCtx_internal *const ctx, const char *const src, char *const dst,
+ int *const srcSizePtr, int const dstCapacity, int cLevel,
+ const limitedOutput_directive limit, const dictCtx_directive dict)
+{
+ DEBUGLOG(5, "LZ4HC_compress_generic_internal(src=%p, srcSize=%d)", src,
+ *srcSizePtr);
+
+ if (limit == fillOutput && dstCapacity < 1)
+ return 0; /* Impossible to store anything */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
+ return 0; /* Unsupported input size (too large or negative) */
+
+ ctx->end += *srcSizePtr;
+ {
+ cParams_t const cParam = LZ4HC_getCLevelParams(cLevel);
+ HCfavor_e const favor = ctx->favorDecSpeed ?
+ favorDecompressionSpeed :
+ favorCompressionRatio;
+ int result;
+
+ if (cParam.strat == lz4mid) {
+ result = LZ4MID_compress(ctx, src, dst, srcSizePtr,
+ dstCapacity, limit, dict);
+ } else if (cParam.strat == lz4hc) {
+ result = LZ4HC_compress_hashChain(
+ ctx, src, dst, srcSizePtr, dstCapacity,
+ cParam.nbSearches, limit, dict);
+ } else {
+ assert(cParam.strat == lz4opt);
+ result = LZ4HC_compress_optimal(
+ ctx, src, dst, srcSizePtr, dstCapacity,
+ cParam.nbSearches, cParam.targetLength, limit,
+ cLevel >= LZ4HC_CLEVEL_MAX, /* ultra mode */
+ dict, favor);
+ }
+ if (result <= 0)
+ ctx->dirty = 1;
+ return result;
+ }
+}
+
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal *ctxPtr,
+ const BYTE *newBlock);
+
+static int LZ4HC_compress_generic_noDictCtx(LZ4HC_CCtx_internal *const ctx,
+ const char *const src,
+ char *const dst,
+ int *const srcSizePtr,
+ int const dstCapacity, int cLevel,
+ limitedOutput_directive limit)
+{
+ assert(ctx->dictCtx == NULL);
+ return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr,
+ dstCapacity, cLevel, limit,
+ noDictCtx);
+}
+
+static int isStateCompatible(const LZ4HC_CCtx_internal *ctx1,
+ const LZ4HC_CCtx_internal *ctx2)
+{
+ int const isMid1 =
+ LZ4HC_getCLevelParams(ctx1->compressionLevel).strat == lz4mid;
+ int const isMid2 =
+ LZ4HC_getCLevelParams(ctx2->compressionLevel).strat == lz4mid;
+ return !(isMid1 ^ isMid2);
+}
+
+static int LZ4HC_compress_generic_dictCtx(LZ4HC_CCtx_internal *const ctx,
+ const char *const src,
+ char *const dst,
+ int *const srcSizePtr,
+ int const dstCapacity, int cLevel,
+ limitedOutput_directive limit)
+{
+ const size_t position = (size_t)(ctx->end - ctx->prefixStart) +
+ (ctx->dictLimit - ctx->lowLimit);
+ assert(ctx->dictCtx != NULL);
+ if (position >= 64 KB) {
+ ctx->dictCtx = NULL;
+ return LZ4HC_compress_generic_noDictCtx(
+ ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else if (position == 0 && *srcSizePtr > 4 KB &&
+ isStateCompatible(ctx, ctx->dictCtx)) {
+ LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
+ LZ4HC_setExternalDict(ctx, (const BYTE *)src);
+ ctx->compressionLevel = (short)cLevel;
+ return LZ4HC_compress_generic_noDictCtx(
+ ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_internal(ctx, src, dst,
+ srcSizePtr, dstCapacity,
+ cLevel, limit,
+ usingDictCtxHc);
+ }
+}
+
+static int LZ4HC_compress_generic(LZ4HC_CCtx_internal *const ctx,
+ const char *const src, char *const dst,
+ int *const srcSizePtr, int const dstCapacity,
+ int cLevel, limitedOutput_directive limit)
+{
+ if (ctx->dictCtx == NULL) {
+ return LZ4HC_compress_generic_noDictCtx(
+ ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_dictCtx(
+ ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ }
+}
+
+int LZ4_sizeofStateHC(void)
+{
+ return (int)sizeof(LZ4_streamHC_t);
+}
+
+static size_t LZ4_streamHC_t_alignment(void)
+{
+#if LZ4_ALIGN_TEST
+ typedef struct {
+ char c;
+ LZ4_streamHC_t t;
+ } t_a;
+ return sizeof(t_a) - sizeof(LZ4_streamHC_t);
+#else
+ return 1; /* effectively disabled */
+#endif
+}
+
+/* state is presumed correctly initialized,
+ * in which case its size and alignment have already been validate */
+int LZ4_compress_HC_extStateHC_fastReset(void *state, const char *src,
+ char *dst, int srcSize,
+ int dstCapacity, int compressionLevel)
+{
+ LZ4HC_CCtx_internal *const ctx =
+ &((LZ4_streamHC_t *)state)->internal_donotuse;
+ if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment()))
+ return 0;
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t *)state, compressionLevel);
+ LZ4HC_init_internal(ctx, (const BYTE *)src);
+ if (dstCapacity < LZ4_compressBound(srcSize))
+ return LZ4HC_compress_generic(ctx, src, dst, &srcSize,
+ dstCapacity, compressionLevel,
+ limitedOutput);
+ else
+ return LZ4HC_compress_generic(ctx, src, dst, &srcSize,
+ dstCapacity, compressionLevel,
+ notLimited);
+}
+
+int LZ4_compress_HC_extStateHC(void *state, const char *src, char *dst,
+ int srcSize, int dstCapacity,
+ int compressionLevel)
+{
+ LZ4_streamHC_t *const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
+ if (ctx == NULL)
+ return 0; /* init failure */
+ return LZ4_compress_HC_extStateHC_fastReset(
+ state, src, dst, srcSize, dstCapacity, compressionLevel);
+}
+
+int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity,
+ int compressionLevel, void *wrkmem)
+{
+ DEBUGLOG(5, "LZ4_compress_HC")
+ return LZ4_compress_HC_extStateHC(wrkmem, src, dst, srcSize,
+ dstCapacity, compressionLevel);
+}
+EXPORT_SYMBOL(LZ4_compress_HC);
+
+/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
+int LZ4_compress_HC_destSize(void *state, const char *source, char *dest,
+ int *sourceSizePtr, int targetDestSize, int cLevel)
+{
+ LZ4_streamHC_t *const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
+ if (ctx == NULL)
+ return 0; /* init failure */
+ LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE *)source);
+ LZ4_setCompressionLevel(ctx, cLevel);
+ return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest,
+ sourceSizePtr, targetDestSize, cLevel,
+ fillOutput);
+}
+
+/**************************************
+* Streaming Functions
+**************************************/
+/* allocation */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_streamHC_t *LZ4_createStreamHC(void)
+{
+ LZ4_streamHC_t *const state =
+ (LZ4_streamHC_t *)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
+ if (state == NULL)
+ return NULL;
+ LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
+ return state;
+}
+
+int LZ4_freeStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr)
+{
+ DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
+ if (!LZ4_streamHCPtr)
+ return 0; /* support free on NULL */
+ FREEMEM(LZ4_streamHCPtr);
+ return 0;
+}
+#endif
+
+LZ4_streamHC_t *LZ4_initStreamHC(void *buffer, size_t size)
+{
+ LZ4_streamHC_t *const LZ4_streamHCPtr = (LZ4_streamHC_t *)buffer;
+ DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
+ /* check conditions */
+ if (buffer == NULL)
+ return NULL;
+ if (size < sizeof(LZ4_streamHC_t))
+ return NULL;
+ if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment()))
+ return NULL;
+ /* init */
+ {
+ LZ4HC_CCtx_internal *const hcstate =
+ &(LZ4_streamHCPtr->internal_donotuse);
+ MEM_INIT(hcstate, 0, sizeof(*hcstate));
+ }
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
+ return LZ4_streamHCPtr;
+}
+
+/* just a stub */
+void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
+{
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
+}
+
+void LZ4_resetStreamHC_fast(LZ4_streamHC_t *LZ4_streamHCPtr,
+ int compressionLevel)
+{
+ LZ4HC_CCtx_internal *const s = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(5, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr,
+ compressionLevel);
+ if (s->dirty) {
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ } else {
+ assert(s->end >= s->prefixStart);
+ s->dictLimit += (U32)(s->end - s->prefixStart);
+ s->prefixStart = NULL;
+ s->end = NULL;
+ s->dictCtx = NULL;
+ }
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
+}
+
+void LZ4_setCompressionLevel(LZ4_streamHC_t *LZ4_streamHCPtr,
+ int compressionLevel)
+{
+ DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr,
+ compressionLevel);
+ if (compressionLevel < 1)
+ compressionLevel = LZ4HC_CLEVEL_DEFAULT;
+ if (compressionLevel > LZ4HC_CLEVEL_MAX)
+ compressionLevel = LZ4HC_CLEVEL_MAX;
+ LZ4_streamHCPtr->internal_donotuse.compressionLevel =
+ (short)compressionLevel;
+}
+
+void LZ4_favorDecompressionSpeed(LZ4_streamHC_t *LZ4_streamHCPtr, int favor)
+{
+ LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor != 0);
+}
+
+/* LZ4_loadDictHC() :
+ * LZ4_streamHCPtr is presumed properly initialized */
+int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr, const char *dictionary,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ cParams_t cp;
+ DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d, clevel=%d)",
+ LZ4_streamHCPtr, dictionary, dictSize,
+ ctxPtr->compressionLevel);
+ assert(dictSize >= 0);
+ assert(LZ4_streamHCPtr != NULL);
+ if (dictSize > 64 KB) {
+ dictionary += (size_t)dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ /* need a full initialization, there are bad side-effects when using resetFast() */
+ {
+ int const cLevel = ctxPtr->compressionLevel;
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
+ cp = LZ4HC_getCLevelParams(cLevel);
+ }
+ LZ4HC_init_internal(ctxPtr, (const BYTE *)dictionary);
+ ctxPtr->end = (const BYTE *)dictionary + dictSize;
+ if (cp.strat == lz4mid) {
+ LZ4MID_fillHTable(ctxPtr, dictionary, (size_t)dictSize);
+ } else {
+ if (dictSize >= LZ4HC_HASHSIZE)
+ LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
+ }
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_loadDictHC);
+
+void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream,
+ const LZ4_streamHC_t *dictionary_stream)
+{
+ working_stream->internal_donotuse.dictCtx =
+ dictionary_stream != NULL ?
+ &(dictionary_stream->internal_donotuse) :
+ NULL;
+}
+
+/* compression */
+
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal *ctxPtr,
+ const BYTE *newBlock)
+{
+ DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
+ if ((ctxPtr->end >= ctxPtr->prefixStart + 4) &&
+ (LZ4HC_getCLevelParams(ctxPtr->compressionLevel).strat != lz4mid)) {
+ LZ4HC_Insert(
+ ctxPtr,
+ ctxPtr->end -
+ 3); /* Referencing remaining dictionary content */
+ }
+
+ /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictStart = ctxPtr->prefixStart;
+ ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart);
+ ctxPtr->prefixStart = newBlock;
+ ctxPtr->end = newBlock;
+ ctxPtr->nextToUpdate =
+ ctxPtr->dictLimit; /* match referencing will resume from there */
+
+ /* cannot reference an extDict and a dictCtx at the same time */
+ ctxPtr->dictCtx = NULL;
+}
+
+static int LZ4_compressHC_continue_generic(LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *src, char *dst,
+ int *srcSizePtr, int dstCapacity,
+ limitedOutput_directive limit)
+{
+ LZ4HC_CCtx_internal *const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(
+ 5,
+ "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ LZ4_streamHCPtr, src, *srcSizePtr, limit);
+ assert(ctxPtr != NULL);
+ /* auto-init if forgotten */
+ if (ctxPtr->prefixStart == NULL)
+ LZ4HC_init_internal(ctxPtr, (const BYTE *)src);
+
+ /* Check overflow */
+ if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit >
+ 2 GB) {
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart);
+ if (dictSize > 64 KB)
+ dictSize = 64 KB;
+ LZ4_loadDictHC(LZ4_streamHCPtr,
+ (const char *)(ctxPtr->end) - dictSize,
+ (int)dictSize);
+ }
+
+ /* Check if blocks follow each other */
+ if ((const BYTE *)src != ctxPtr->end)
+ LZ4HC_setExternalDict(ctxPtr, (const BYTE *)src);
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE *sourceEnd = (const BYTE *)src + *srcSizePtr;
+ const BYTE *const dictBegin = ctxPtr->dictStart;
+ const BYTE *const dictEnd =
+ ctxPtr->dictStart +
+ (ctxPtr->dictLimit - ctxPtr->lowLimit);
+ if ((sourceEnd > dictBegin) && ((const BYTE *)src < dictEnd)) {
+ if (sourceEnd > dictEnd)
+ sourceEnd = dictEnd;
+ ctxPtr->lowLimit +=
+ (U32)(sourceEnd - ctxPtr->dictStart);
+ ctxPtr->dictStart +=
+ (U32)(sourceEnd - ctxPtr->dictStart);
+ /* invalidate dictionary is it's too small */
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit <
+ LZ4HC_HASHSIZE) {
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictStart = ctxPtr->prefixStart;
+ }
+ }
+ }
+
+ return LZ4HC_compress_generic(ctxPtr, src, dst, srcSizePtr, dstCapacity,
+ ctxPtr->compressionLevel, limit);
+}
+
+int LZ4_compress_HC_continue(LZ4_streamHC_t *LZ4_streamHCPtr, const char *src,
+ char *dst, int srcSize, int dstCapacity)
+{
+ DEBUGLOG(5, "LZ4_compress_HC_continue");
+ if (dstCapacity < LZ4_compressBound(srcSize))
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src,
+ dst, &srcSize,
+ dstCapacity,
+ limitedOutput);
+ else
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src,
+ dst, &srcSize,
+ dstCapacity, notLimited);
+}
+EXPORT_SYMBOL(LZ4_compress_HC_continue);
+
+int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *src, char *dst,
+ int *srcSizePtr, int targetDestSize)
+{
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst,
+ srcSizePtr, targetDestSize,
+ fillOutput);
+}
+
+/* LZ4_saveDictHC :
+ * save history content
+ * into a user-provided buffer
+ * which is then used to continue compression
+ */
+int LZ4_saveDictHC(LZ4_streamHC_t *LZ4_streamHCPtr, char *safeBuffer,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *const streamPtr =
+ &LZ4_streamHCPtr->internal_donotuse;
+ int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart);
+ DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer,
+ dictSize);
+ assert(prefixSize >= 0);
+ if (dictSize > 64 KB)
+ dictSize = 64 KB;
+ if (dictSize < 4)
+ dictSize = 0;
+ if (dictSize > prefixSize)
+ dictSize = prefixSize;
+ if (safeBuffer == NULL)
+ assert(dictSize == 0);
+ if (dictSize > 0)
+ LZ4_memmove(safeBuffer, streamPtr->end - dictSize,
+ (size_t)dictSize);
+ {
+ U32 const endIndex =
+ (U32)(streamPtr->end - streamPtr->prefixStart) +
+ streamPtr->dictLimit;
+ streamPtr->end = (safeBuffer == NULL) ?
+ NULL :
+ (const BYTE *)safeBuffer + dictSize;
+ streamPtr->prefixStart = (const BYTE *)safeBuffer;
+ streamPtr->dictLimit = endIndex - (U32)dictSize;
+ streamPtr->lowLimit = endIndex - (U32)dictSize;
+ streamPtr->dictStart = streamPtr->prefixStart;
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
+ }
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_saveDictHC);
+
+/* ================================================
+ * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
+ * ===============================================*/
+typedef struct {
+ int price;
+ int off;
+ int mlen;
+ int litlen;
+} LZ4HC_optimal_t;
+
+/* price in bytes */
+LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
+{
+ int price = litlen;
+ assert(litlen >= 0);
+ if (litlen >= (int)RUN_MASK)
+ price += 1 + ((litlen - (int)RUN_MASK) / 255);
+ return price;
+}
+
+/* requires mlen >= MINMATCH */
+LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
+{
+ int price = 1 + 2; /* token + 16-bit offset */
+ assert(litlen >= 0);
+ assert(mlen >= MINMATCH);
+
+ price += LZ4HC_literalsPrice(litlen);
+
+ if (mlen >= (int)(ML_MASK + MINMATCH))
+ price += 1 + ((mlen - (int)(ML_MASK + MINMATCH)) / 255);
+
+ return price;
+}
+
+LZ4_FORCE_INLINE LZ4HC_match_t LZ4HC_FindLongerMatch(
+ LZ4HC_CCtx_internal *const ctx, const BYTE *ip,
+ const BYTE *const iHighLimit, int minLen, int nbSearches,
+ const dictCtx_directive dict, const HCfavor_e favorDecSpeed)
+{
+ LZ4HC_match_t const match0 = { 0, 0, 0 };
+ /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
+ * but this won't be the case here, as we define iLowLimit==ip,
+ ** so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
+ LZ4HC_match_t md = LZ4HC_InsertAndGetWiderMatch(
+ ctx, ip, ip, iHighLimit, minLen, nbSearches,
+ 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
+ assert(md.back == 0);
+ if (md.len <= minLen)
+ return match0;
+ if (favorDecSpeed) {
+ if ((md.len > 18) & (md.len <= 36))
+ md.len = 18; /* favor dec.speed (shortcut) */
+ }
+ return md;
+}
+
+static int LZ4HC_compress_optimal(LZ4HC_CCtx_internal *ctx,
+ const char *const source, char *dst,
+ int *srcSizePtr, int dstCapacity,
+ int const nbSearches, size_t sufficient_len,
+ const limitedOutput_directive limit,
+ int const fullUpdate,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ int retval = 0;
+#define TRAILING_LITERALS 3
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE == 1
+ LZ4HC_optimal_t *const opt = (LZ4HC_optimal_t *)ALLOC(
+ sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
+#else
+ LZ4HC_optimal_t
+ opt[LZ4_OPT_NUM +
+ TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
+#endif
+
+ const BYTE *ip = (const BYTE *)source;
+ const BYTE *anchor = ip;
+ const BYTE *const iend = ip + *srcSizePtr;
+ const BYTE *const mflimit = iend - MFLIMIT;
+ const BYTE *const matchlimit = iend - LASTLITERALS;
+ BYTE *op = (BYTE *)dst;
+ BYTE *opSaved = (BYTE *)dst;
+ BYTE *oend = op + dstCapacity;
+ int ovml = MINMATCH; /* overflow - last sequence */
+ int ovoff = 0;
+
+ /* init */
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE == 1
+ if (opt == NULL)
+ goto _return_label;
+#endif
+ DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst,
+ (unsigned)dstCapacity);
+ *srcSizePtr = 0;
+ if (limit == fillOutput)
+ oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (sufficient_len >= LZ4_OPT_NUM)
+ sufficient_len = LZ4_OPT_NUM - 1;
+
+ /* Main Loop */
+ while (ip <= mflimit) {
+ int const llen = (int)(ip - anchor);
+ int best_mlen, best_off;
+ int cur, last_match_pos = 0;
+
+ LZ4HC_match_t const firstMatch =
+ LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH - 1,
+ nbSearches, dict, favorDecSpeed);
+ if (firstMatch.len == 0) {
+ ip++;
+ continue;
+ }
+
+ if ((size_t)firstMatch.len > sufficient_len) {
+ /* good enough solution : immediate encoding */
+ int const firstML = firstMatch.len;
+ opSaved = op;
+ if (LZ4HC_encodeSequence(
+ UPDATABLE(ip, op, anchor), firstML,
+ firstMatch.off, limit,
+ oend)) { /* updates ip, op and anchor */
+ ovml = firstML;
+ ovoff = firstMatch.off;
+ goto _dest_overflow;
+ }
+ continue;
+ }
+
+ /* set prices for first positions (literals) */
+ {
+ int rPos;
+ for (rPos = 0; rPos < MINMATCH; rPos++) {
+ int const cost =
+ LZ4HC_literalsPrice(llen + rPos);
+ opt[rPos].mlen = 1;
+ opt[rPos].off = 0;
+ opt[rPos].litlen = llen + rPos;
+ opt[rPos].price = cost;
+ DEBUGLOG(
+ 7,
+ "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
+ rPos, cost, opt[rPos].litlen);
+ }
+ }
+ /* set prices using initial match */
+ {
+ int const matchML =
+ firstMatch
+ .len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
+ int const offset = firstMatch.off;
+ int mlen;
+ assert(matchML < LZ4_OPT_NUM);
+ for (mlen = MINMATCH; mlen <= matchML; mlen++) {
+ int const cost =
+ LZ4HC_sequencePrice(llen, mlen);
+ opt[mlen].mlen = mlen;
+ opt[mlen].off = offset;
+ opt[mlen].litlen = llen;
+ opt[mlen].price = cost;
+ DEBUGLOG(
+ 7,
+ "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",
+ mlen, cost, mlen);
+ }
+ }
+ last_match_pos = firstMatch.len;
+ {
+ int addLit;
+ for (addLit = 1; addLit <= TRAILING_LITERALS;
+ addLit++) {
+ opt[last_match_pos + addLit].mlen =
+ 1; /* literal */
+ opt[last_match_pos + addLit].off = 0;
+ opt[last_match_pos + addLit].litlen = addLit;
+ opt[last_match_pos + addLit].price =
+ opt[last_match_pos].price +
+ LZ4HC_literalsPrice(addLit);
+ DEBUGLOG(
+ 7,
+ "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
+ last_match_pos + addLit,
+ opt[last_match_pos + addLit].price,
+ addLit);
+ }
+ }
+
+ /* check further positions */
+ for (cur = 1; cur < last_match_pos; cur++) {
+ const BYTE *const curPtr = ip + cur;
+ LZ4HC_match_t newMatch;
+
+ if (curPtr > mflimit)
+ break;
+ DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u", cur,
+ opt[cur].price, opt[cur + 1].price, cur + 1);
+ if (fullUpdate) {
+ /* not useful to search here if next position has same (or lower) cost */
+ if ((opt[cur + 1].price <= opt[cur].price)
+ /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
+ && (opt[cur + MINMATCH].price <
+ opt[cur].price + 3 /*min seq price*/))
+ continue;
+ } else {
+ /* not useful to search here if next position has same (or lower) cost */
+ if (opt[cur + 1].price <= opt[cur].price)
+ continue;
+ }
+
+ DEBUGLOG(7, "search at rPos:%u", cur);
+ if (fullUpdate)
+ newMatch = LZ4HC_FindLongerMatch(
+ ctx, curPtr, matchlimit, MINMATCH - 1,
+ nbSearches, dict, favorDecSpeed);
+ else
+ /* only test matches of minimum length; slightly faster, but misses a few bytes */
+ newMatch = LZ4HC_FindLongerMatch(
+ ctx, curPtr, matchlimit,
+ last_match_pos - cur, nbSearches, dict,
+ favorDecSpeed);
+ if (!newMatch.len)
+ continue;
+
+ if (((size_t)newMatch.len > sufficient_len) ||
+ (newMatch.len + cur >= LZ4_OPT_NUM)) {
+ /* immediate encoding */
+ best_mlen = newMatch.len;
+ best_off = newMatch.off;
+ last_match_pos = cur + 1;
+ goto encode;
+ }
+
+ /* before match : set price with literals at beginning */
+ {
+ int const baseLitlen = opt[cur].litlen;
+ int litlen;
+ for (litlen = 1; litlen < MINMATCH; litlen++) {
+ int const price =
+ opt[cur].price -
+ LZ4HC_literalsPrice(
+ baseLitlen) +
+ LZ4HC_literalsPrice(baseLitlen +
+ litlen);
+ int const pos = cur + litlen;
+ if (price < opt[pos].price) {
+ opt[pos].mlen = 1; /* literal */
+ opt[pos].off = 0;
+ opt[pos].litlen =
+ baseLitlen + litlen;
+ opt[pos].price = price;
+ DEBUGLOG(
+ 7,
+ "rPos:%3i => price:%3i (litlen=%i)",
+ pos, price,
+ opt[pos].litlen);
+ }
+ }
+ }
+
+ /* set prices using match at position = cur */
+ {
+ int const matchML = newMatch.len;
+ int ml = MINMATCH;
+
+ assert(cur + newMatch.len < LZ4_OPT_NUM);
+ for (; ml <= matchML; ml++) {
+ int const pos = cur + ml;
+ int const offset = newMatch.off;
+ int price;
+ int ll;
+ DEBUGLOG(
+ 7,
+ "testing price rPos %i (last_match_pos=%i)",
+ pos, last_match_pos);
+ if (opt[cur].mlen == 1) {
+ ll = opt[cur].litlen;
+ price = ((cur > ll) ?
+ opt[cur - ll]
+ .price :
+ 0) +
+ LZ4HC_sequencePrice(ll,
+ ml);
+ } else {
+ ll = 0;
+ price = opt[cur].price +
+ LZ4HC_sequencePrice(0,
+ ml);
+ }
+
+ assert((U32)favorDecSpeed <= 1);
+ if (pos > last_match_pos +
+ TRAILING_LITERALS ||
+ price <=
+ opt[pos].price -
+ (int)favorDecSpeed) {
+ DEBUGLOG(
+ 7,
+ "rPos:%3i => price:%3i (matchlen=%i)",
+ pos, price, ml);
+ assert(pos < LZ4_OPT_NUM);
+ if ((ml ==
+ matchML) /* last pos of last match */
+ && (last_match_pos < pos))
+ last_match_pos = pos;
+ opt[pos].mlen = ml;
+ opt[pos].off = offset;
+ opt[pos].litlen = ll;
+ opt[pos].price = price;
+ }
+ }
+ }
+ /* complete following positions with literals */
+ {
+ int addLit;
+ for (addLit = 1; addLit <= TRAILING_LITERALS;
+ addLit++) {
+ opt[last_match_pos + addLit].mlen =
+ 1; /* literal */
+ opt[last_match_pos + addLit].off = 0;
+ opt[last_match_pos + addLit].litlen =
+ addLit;
+ opt[last_match_pos + addLit].price =
+ opt[last_match_pos].price +
+ LZ4HC_literalsPrice(addLit);
+ DEBUGLOG(
+ 7,
+ "rPos:%3i => price:%3i (litlen=%i)",
+ last_match_pos + addLit,
+ opt[last_match_pos + addLit]
+ .price,
+ addLit);
+ }
+ }
+ } /* for (cur = 1; cur <= last_match_pos; cur++) */
+
+ assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS);
+ best_mlen = opt[last_match_pos].mlen;
+ best_off = opt[last_match_pos].off;
+ cur = last_match_pos - best_mlen;
+
+ encode: /* cur, last_match_pos, best_mlen, best_off must be set */
+ assert(cur < LZ4_OPT_NUM);
+ assert(last_match_pos >= 1); /* == 1 when only one candidate */
+ DEBUGLOG(
+ 6,
+ "reverse traversal, looking for shortest path (last_match_pos=%i)",
+ last_match_pos);
+ {
+ int candidate_pos = cur;
+ int selected_matchLength = best_mlen;
+ int selected_offset = best_off;
+ while (1) { /* from end to beginning */
+ int const next_matchLength =
+ opt[candidate_pos]
+ .mlen; /* can be 1, means literal */
+ int const next_offset = opt[candidate_pos].off;
+ DEBUGLOG(7, "pos %i: sequence length %i",
+ candidate_pos, selected_matchLength);
+ opt[candidate_pos].mlen = selected_matchLength;
+ opt[candidate_pos].off = selected_offset;
+ selected_matchLength = next_matchLength;
+ selected_offset = next_offset;
+ if (next_matchLength > candidate_pos)
+ break; /* last match elected, first match to encode */
+ assert(next_matchLength >
+ 0); /* can be 1, means literal */
+ candidate_pos -= next_matchLength;
+ }
+ }
+
+ /* encode all recorded sequences in order */
+ {
+ int rPos = 0; /* relative position (to ip) */
+ while (rPos < last_match_pos) {
+ int const ml = opt[rPos].mlen;
+ int const offset = opt[rPos].off;
+ if (ml == 1) {
+ ip++;
+ rPos++;
+ continue;
+ } /* literal; note: can end up with several literals, in which case, skip them */
+ rPos += ml;
+ assert(ml >= MINMATCH);
+ assert((offset >= 1) &&
+ (offset <= LZ4_DISTANCE_MAX));
+ opSaved = op;
+ if (LZ4HC_encodeSequence(
+ UPDATABLE(ip, op, anchor), ml,
+ offset, limit,
+ oend)) { /* updates ip, op and anchor */
+ ovml = ml;
+ ovoff = offset;
+ goto _dest_overflow;
+ }
+ }
+ }
+ } /* while (ip <= mflimit) */
+
+_last_literals:
+ /* Encode Last Literals */
+ {
+ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput)
+ oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput) { /* Check output limit */
+ retval = 0;
+ goto _return_label;
+ }
+ /* adapt lastRunSize to fill 'dst' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals",
+ (int)lastRunSize);
+ ip = anchor +
+ lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int)(((const char *)ip) - source);
+ retval = (int)((char *)op - dst);
+ goto _return_label;
+
+_dest_overflow:
+ if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ovml and ovref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE *const maxLitPos =
+ oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6,
+ "Last sequence overflowing (only %i bytes remaining)",
+ (int)(oend - 1 - opSaved));
+ op = opSaved; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl =
+ (size_t)(maxLitPos - (op + ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK - 1) +
+ (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX);
+ assert(ovml >= 0);
+ if ((size_t)ovml > maxMlSize)
+ ovml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) -
+ 1 + ovml >=
+ MFLIMIT) {
+ DEBUGLOG(6, "Space to end : %i + ml (%i)",
+ (int)((oend + LASTLITERALS) -
+ (op + ll_totalCost + 2) - 1),
+ ovml);
+ DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip,
+ anchor);
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ ovml, ovoff, notLimited,
+ oend);
+ DEBUGLOG(6, "After : ip = %p, anchor = %p", ip,
+ anchor);
+ }
+ }
+ goto _last_literals;
+ }
+_return_label:
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE == 1
+ if (opt)
+ FREEMEM(opt);
+#endif
+ return retval;
+}
+
+/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
+ * @return : 0 on success, !=0 if error */
+int LZ4_resetStreamStateHC(void *state, char *inputBuffer)
+{
+ LZ4_streamHC_t *const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
+ if (hc4 == NULL)
+ return 1; /* init failed */
+ LZ4HC_init_internal(&hc4->internal_donotuse, (const BYTE *)inputBuffer);
+ return 0;
+}
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+void *LZ4_createHC(const char *inputBuffer)
+{
+ LZ4_streamHC_t *const hc4 = LZ4_createStreamHC();
+ if (hc4 == NULL)
+ return NULL; /* not enough memory */
+ LZ4HC_init_internal(&hc4->internal_donotuse, (const BYTE *)inputBuffer);
+ return hc4;
+}
+
+int LZ4_freeHC(void *LZ4HC_Data)
+{
+ if (!LZ4HC_Data)
+ return 0; /* support free on NULL */
+ FREEMEM(LZ4HC_Data);
+ return 0;
+}
+#endif
+
+int LZ4_compressHC2_continue(void *LZ4HC_Data, const char *src, char *dst,
+ int srcSize, int cLevel)
+{
+ return LZ4HC_compress_generic(
+ &((LZ4_streamHC_t *)LZ4HC_Data)->internal_donotuse, src, dst,
+ &srcSize, 0, cLevel, notLimited);
+}
+
+int LZ4_compressHC2_limitedOutput_continue(void *LZ4HC_Data, const char *src,
+ char *dst, int srcSize,
+ int dstCapacity, int cLevel)
+{
+ return LZ4HC_compress_generic(
+ &((LZ4_streamHC_t *)LZ4HC_Data)->internal_donotuse, src, dst,
+ &srcSize, dstCapacity, cLevel, limitedOutput);
+}
+
+char *LZ4_slideInputBufferHC(void *LZ4HC_Data)
+{
+ LZ4HC_CCtx_internal *const s =
+ &((LZ4_streamHC_t *)LZ4HC_Data)->internal_donotuse;
+ const BYTE *const bufferStart =
+ s->prefixStart - s->dictLimit + s->lowLimit;
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t *)LZ4HC_Data,
+ s->compressionLevel);
+ /* ugly conversion trick, required to evade (const char*) -> (char*) cast-qual warning :( */
+ return (char *)(uptrval)bufferStart;
+}
Index: lib/lz4/lz4hc.h
===================================================================
diff --git a/lib/lz4/lz4hc.h b/lib/lz4/lz4hc.h
new file mode 100644
--- /dev/null (revision b2497e4243461a835c25469028cd355bfc2e993f)
+++ b/lib/lz4/lz4hc.h (revision b2497e4243461a835c25469028cd355bfc2e993f)
@@ -0,0 +1,451 @@
+/*
+ LZ4 HC - High Compression Mode of LZ4
+ Header File
+ Copyright (C) 2011-2020, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+#ifndef LZ4_HC_H_19834876238432
+#define LZ4_HC_H_19834876238432
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* --- Dependency --- */
+/* note : lz4hc requires lz4.h/lz4.c for compilation */
+#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
+
+/* --- Useful constants --- */
+#define LZ4HC_CLEVEL_MIN 2
+#define LZ4HC_CLEVEL_DEFAULT 9
+#define LZ4HC_CLEVEL_OPT_MIN 10
+#define LZ4HC_CLEVEL_MAX 12
+
+/*-************************************
+ * Block Compression
+ **************************************/
+/*! LZ4_compress_HC() :
+ * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm.
+ * `dst` must be already allocated.
+ * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
+ * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
+ * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work.
+ * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX.
+ * @return : the number of bytes written into 'dst'
+ * or 0 if compression fails.
+ */
+LZ4LIB_API int LZ4_compress_HC(const char *src, char *dst, int srcSize,
+ int dstCapacity, int compressionLevel,
+ void *wrkmem);
+
+/* Note :
+ * Decompression functions are provided within "lz4.h" (BSD license)
+ */
+
+/*! LZ4_compress_HC_extStateHC() :
+ * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`.
+ * `state` size is provided by LZ4_sizeofStateHC().
+ * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly).
+ */
+LZ4LIB_API int LZ4_sizeofStateHC(void);
+LZ4LIB_API int LZ4_compress_HC_extStateHC(void *stateHC, const char *src,
+ char *dst, int srcSize,
+ int maxDstSize, int compressionLevel);
+
+/*! LZ4_compress_HC_destSize() : v1.9.0+
+ * Will compress as much data as possible from `src`
+ * to fit into `targetDstSize` budget.
+ * Result is provided in 2 parts :
+ * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
+ * or 0 if compression fails.
+ * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src`
+ */
+LZ4LIB_API int LZ4_compress_HC_destSize(void *stateHC, const char *src,
+ char *dst, int *srcSizePtr,
+ int targetDstSize,
+ int compressionLevel);
+
+/*-************************************
+ * Streaming Compression
+ * Bufferless synchronous API
+ **************************************/
+typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */
+
+/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
+ * These functions create and release memory for LZ4 HC streaming state.
+ * Newly created states are automatically initialized.
+ * A same state can be used multiple times consecutively,
+ * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks.
+ */
+LZ4LIB_API LZ4_streamHC_t *LZ4_createStreamHC(void);
+LZ4LIB_API int LZ4_freeStreamHC(LZ4_streamHC_t *streamHCPtr);
+
+/*
+ These functions compress data in successive blocks of any size,
+ using previous blocks as dictionary, to improve compression ratio.
+ One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
+ There is an exception for ring buffers, which can be smaller than 64 KB.
+ Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue().
+
+ Before starting compression, state must be allocated and properly initialized.
+ LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT.
+
+ Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream)
+ or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental).
+ LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once,
+ which is automatically the case when state is created using LZ4_createStreamHC().
+
+ After reset, a first "fictional block" can be designated as initial dictionary,
+ using LZ4_loadDictHC() (Optional).
+ Note: In order for LZ4_loadDictHC() to create the correct data structure,
+ it is essential to set the compression level _before_ loading the dictionary.
+
+ Invoke LZ4_compress_HC_continue() to compress each successive block.
+ The number of blocks is unlimited.
+ Previous input blocks, including initial dictionary when present,
+ must remain accessible and unmodified during compression.
+
+ It's allowed to update compression level anytime between blocks,
+ using LZ4_setCompressionLevel() (experimental).
+
+ @dst buffer should be sized to handle worst case scenarios
+ (see LZ4_compressBound(), it ensures compression success).
+ In case of failure, the API does not guarantee recovery,
+ so the state _must_ be reset.
+ To ensure compression success
+ whenever @dst buffer size cannot be made >= LZ4_compressBound(),
+ consider using LZ4_compress_HC_continue_destSize().
+
+ Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks,
+ it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC().
+ Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB)
+
+ After completing a streaming compression,
+ it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state,
+ just by resetting it, using LZ4_resetStreamHC_fast().
+*/
+
+LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t *streamHCPtr,
+ int compressionLevel); /* v1.9.0+ */
+LZ4LIB_API int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr,
+ const char *dictionary, int dictSize);
+
+LZ4LIB_API int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr,
+ const char *src, char *dst, int srcSize,
+ int maxDstSize);
+
+/*! LZ4_compress_HC_continue_destSize() : v1.9.0+
+ * Similar to LZ4_compress_HC_continue(),
+ * but will read as much data as possible from `src`
+ * to fit into `targetDstSize` budget.
+ * Result is provided into 2 parts :
+ * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
+ * or 0 if compression fails.
+ * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`.
+ * Note that this function may not consume the entire input.
+ */
+LZ4LIB_API int
+LZ4_compress_HC_continue_destSize(LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *src, char *dst, int *srcSizePtr,
+ int targetDstSize);
+
+LZ4LIB_API int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer,
+ int maxDictSize);
+
+/*! LZ4_attach_HC_dictionary() : stable since v1.10.0
+ * This API allows for the efficient re-use of a static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
+ * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDictHC() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionary stream pointer may be NULL, in which
+ * case any existing dictionary stream is unset.
+ *
+ * A dictionary should only be attached to a stream without any history (i.e.,
+ * a stream that has just been reset).
+ *
+ * The dictionary will remain attached to the working stream only for the
+ * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
+ * dictionary context association from the working stream. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the lifetime of the stream session.
+ */
+LZ4LIB_API void
+LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream,
+ const LZ4_streamHC_t *dictionary_stream);
+
+/*^**********************************************
+ * !!!!!! STATIC LINKING ONLY !!!!!!
+ ***********************************************/
+
+/*-******************************************************************
+ * PRIVATE DEFINITIONS :
+ * Do not use these definitions directly.
+ * They are merely exposed to allow static allocation of `LZ4_streamHC_t`.
+ * Declare an `LZ4_streamHC_t` directly, rather than any type below.
+ * Even then, only do so in the context of static linking, as definitions may change between versions.
+ ********************************************************************/
+
+#define LZ4HC_DICTIONARY_LOGSIZE 16
+#define LZ4HC_MAXD (1 << LZ4HC_DICTIONARY_LOGSIZE)
+#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
+
+#define LZ4HC_HASH_LOG 15
+#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
+#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
+
+/* Never ever use these definitions directly !
+ * Declare or allocate an LZ4_streamHC_t instead.
+**/
+typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
+struct LZ4HC_CCtx_internal {
+ LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
+ LZ4_u16 chainTable[LZ4HC_MAXD];
+ const LZ4_byte *end; /* next block here to continue on current prefix */
+ const LZ4_byte *prefixStart; /* Indexes relative to this position */
+ const LZ4_byte *dictStart; /* alternate reference for extDict */
+ LZ4_u32 dictLimit; /* below that point, need extDict */
+ LZ4_u32 lowLimit; /* below that point, no more history */
+ LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
+ short compressionLevel;
+ LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
+ otherwise, favor compression ratio */
+ LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
+ const LZ4HC_CCtx_internal *dictCtx;
+};
+
+#define LZ4_STREAMHC_MINSIZE \
+ 262200 /* static size, for inter-version compatibility */
+union LZ4_streamHC_u {
+ char minStateSize[LZ4_STREAMHC_MINSIZE];
+ LZ4HC_CCtx_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_streamHC_t */
+
+/* LZ4_streamHC_t :
+ * This structure allows static allocation of LZ4 HC streaming state.
+ * This can be used to allocate statically on stack, or as part of a larger structure.
+ *
+ * Such state **must** be initialized using LZ4_initStreamHC() before first use.
+ *
+ * Note that invoking LZ4_initStreamHC() is not required when
+ * the state was created using LZ4_createStreamHC() (which is recommended).
+ * Using the normal builder, a newly created state is automatically initialized.
+ *
+ * Static allocation shall only be used in combination with static linking.
+ */
+
+/* LZ4_initStreamHC() : v1.9.0+
+ * Required before first use of a statically allocated LZ4_streamHC_t.
+ * Before v1.9.0 : use LZ4_resetStreamHC() instead
+ */
+LZ4LIB_API LZ4_streamHC_t *LZ4_initStreamHC(void *buffer, size_t size);
+
+/*-************************************
+* Deprecated Functions
+**************************************/
+/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
+
+/* deprecated compression functions */
+LZ4_DEPRECATED("use LZ4_compress_HC() instead")
+LZ4LIB_API int LZ4_compressHC(const char *source, char *dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead")
+LZ4LIB_API int LZ4_compressHC_limitedOutput(const char *source, char *dest,
+ int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead")
+LZ4LIB_API int LZ4_compressHC2(const char *source, char *dest, int inputSize,
+ int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead")
+LZ4LIB_API int LZ4_compressHC2_limitedOutput(const char *source, char *dest,
+ int inputSize, int maxOutputSize,
+ int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead")
+LZ4LIB_API int LZ4_compressHC_withStateHC(void *state, const char *source,
+ char *dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead")
+LZ4LIB_API
+int LZ4_compressHC_limitedOutput_withStateHC(void *state, const char *source,
+ char *dest, int inputSize,
+ int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead")
+LZ4LIB_API int LZ4_compressHC2_withStateHC(void *state, const char *source,
+ char *dest, int inputSize,
+ int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead")
+LZ4LIB_API
+int LZ4_compressHC2_limitedOutput_withStateHC(void *state, const char *source,
+ char *dest, int inputSize,
+ int maxOutputSize,
+ int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead")
+LZ4LIB_API int LZ4_compressHC_continue(LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source, char *dest,
+ int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead")
+LZ4LIB_API int
+LZ4_compressHC_limitedOutput_continue(LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source, char *dest,
+ int inputSize, int maxOutputSize);
+
+/* Obsolete streaming functions; degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, use of
+ * LZ4_slideInputBufferHC() will truncate the history of the stream, rather
+ * than preserve a window-sized chunk of history.
+ */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_DEPRECATED("use LZ4_createStreamHC() instead")
+LZ4LIB_API void *LZ4_createHC(const char *inputBuffer);
+LZ4_DEPRECATED("use LZ4_freeStreamHC() instead")
+LZ4LIB_API int LZ4_freeHC(void *LZ4HC_Data);
+#endif
+LZ4_DEPRECATED("use LZ4_saveDictHC() instead")
+LZ4LIB_API char *LZ4_slideInputBufferHC(void *LZ4HC_Data);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead")
+LZ4LIB_API int LZ4_compressHC2_continue(void *LZ4HC_Data, const char *source,
+ char *dest, int inputSize,
+ int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead")
+LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue(void *LZ4HC_Data,
+ const char *source,
+ char *dest, int inputSize,
+ int maxOutputSize,
+ int compressionLevel);
+LZ4_DEPRECATED("use LZ4_createStreamHC() instead")
+LZ4LIB_API int LZ4_sizeofStreamStateHC(void);
+LZ4_DEPRECATED("use LZ4_initStreamHC() instead")
+LZ4LIB_API int LZ4_resetStreamStateHC(void *state, char *inputBuffer);
+
+/* LZ4_resetStreamHC() is now replaced by LZ4_initStreamHC().
+ * The intention is to emphasize the difference with LZ4_resetStreamHC_fast(),
+ * which is now the recommended function to start a new stream of blocks,
+ * but cannot be used to initialize a memory segment containing arbitrary garbage data.
+ *
+ * It is recommended to switch to LZ4_initStreamHC().
+ * LZ4_resetStreamHC() will generate deprecation warnings in a future version.
+ */
+LZ4LIB_API void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr,
+ int compressionLevel);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* LZ4_HC_H_19834876238432 */
+
+/*-**************************************************
+ * !!!!! STATIC LINKING ONLY !!!!!
+ * Following definitions are considered experimental.
+ * They should not be linked from DLL,
+ * as there is no guarantee of API stability yet.
+ * Prototypes will be promoted to "stable" status
+ * after successful usage in real-life scenarios.
+ ***************************************************/
+#ifdef LZ4_HC_STATIC_LINKING_ONLY /* protection macro */
+#ifndef LZ4_HC_SLO_098092834
+#define LZ4_HC_SLO_098092834
+
+#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */
+#include "lz4.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*! LZ4_setCompressionLevel() : v1.8.0+ (experimental)
+ * It's possible to change compression level
+ * between successive invocations of LZ4_compress_HC_continue*()
+ * for dynamic adaptation.
+ */
+LZ4LIB_STATIC_API void LZ4_setCompressionLevel(LZ4_streamHC_t *LZ4_streamHCPtr,
+ int compressionLevel);
+
+/*! LZ4_favorDecompressionSpeed() : v1.8.2+ (experimental)
+ * Opt. Parser will favor decompression speed over compression ratio.
+ * Only applicable to levels >= LZ4HC_CLEVEL_OPT_MIN.
+ */
+LZ4LIB_STATIC_API void
+LZ4_favorDecompressionSpeed(LZ4_streamHC_t *LZ4_streamHCPtr, int favor);
+
+/*! LZ4_resetStreamHC_fast() : v1.9.0+
+ * When an LZ4_streamHC_t is known to be in a internally coherent state,
+ * it can often be prepared for a new compression with almost no work, only
+ * sometimes falling back to the full, expensive reset that is always required
+ * when the stream is in an indeterminate state (i.e., the reset performed by
+ * LZ4_resetStreamHC()).
+ *
+ * LZ4_streamHCs are guaranteed to be in a valid state when:
+ * - returned from LZ4_createStreamHC()
+ * - reset by LZ4_resetStreamHC()
+ * - memset(stream, 0, sizeof(LZ4_streamHC_t))
+ * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast()
+ * - the stream was in a valid state and was then used in any compression call
+ * that returned success
+ * - the stream was in an indeterminate state and was used in a compression
+ * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that
+ * returned success
+ *
+ * Note:
+ * A stream that was last used in a compression call that returned an error
+ * may be passed to this function. However, it will be fully reset, which will
+ * clear any existing history and settings from the context.
+ */
+LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t *LZ4_streamHCPtr,
+ int compressionLevel);
+
+/*! LZ4_compress_HC_extStateHC_fastReset() :
+ * A variant of LZ4_compress_HC_extStateHC().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStreamHC_fast() for a definition of
+ * "correctly initialized"). From a high level, the difference is that this
+ * function initializes the provided state with a call to
+ * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a
+ * call to LZ4_resetStreamHC().
+ */
+LZ4LIB_STATIC_API int
+LZ4_compress_HC_extStateHC_fastReset(void *state, const char *src, char *dst,
+ int srcSize, int dstCapacity,
+ int compressionLevel);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* LZ4_HC_SLO_098092834 */
+#endif /* LZ4_HC_STATIC_LINKING_ONLY */
Index: crypto/lz4.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/crypto/lz4.c b/crypto/lz4.c
--- a/crypto/lz4.c (revision b2497e4243461a835c25469028cd355bfc2e993f)
+++ b/crypto/lz4.c (revision fc5a9e7d3276f214f39df6195d290478513d39d1)
@@ -81,7 +81,13 @@
static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
+ int out_len;
+
+#if defined(CONFIG_ARM64) && defined(CONFIG_KERNEL_MODE_NEON)
+ out_len = LZ4_arm64_decompress_safe(src, dst, slen, *dlen, false);
+#else
+ out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
+#endif
if (out_len < 0)
return -EINVAL;
Index: crypto/lz4hc.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
--- a/crypto/lz4hc.c (revision b2497e4243461a835c25469028cd355bfc2e993f)
+++ b/crypto/lz4hc.c (revision fc5a9e7d3276f214f39df6195d290478513d39d1)
@@ -82,7 +82,13 @@
static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
+ int out_len;
+
+#if defined(CONFIG_ARM64) && defined(CONFIG_KERNEL_MODE_NEON)
+ out_len = LZ4_arm64_decompress_safe(src, dst, slen, *dlen, false);
+#else
+ out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
+#endif
if (out_len < 0)
return -EINVAL;
Index: fs/incfs/data_mgmt.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/fs/incfs/data_mgmt.c b/fs/incfs/data_mgmt.c
--- a/fs/incfs/data_mgmt.c (revision fc5a9e7d3276f214f39df6195d290478513d39d1)
+++ b/fs/incfs/data_mgmt.c (revision 40e8e52660dbefefdfa8e57cebab5dbc03796bf5)
@@ -472,8 +472,11 @@
switch (alg) {
case INCFS_BLOCK_COMPRESSED_LZ4:
- result = LZ4_decompress_safe(src.data, dst.data, src.len,
- dst.len);
+#if defined(CONFIG_ARM64) && defined(CONFIG_KERNEL_MODE_NEON)
+ result = LZ4_arm64_decompress_safe(src.data, dst.data, src.len, dst.len, false);
+#else
+ result = LZ4_decompress_safe(src.data, dst.data, src.len, dst.len);
+#endif
if (result < 0)
return -EBADMSG;
return result;
Index: lib/lz4/lz4_compress.c
===================================================================
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
deleted file mode 100644
--- a/lib/lz4/lz4_compress.c (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
+++ /dev/null (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
@@ -1,940 +0,0 @@
-/*
- * LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011 - 2016, Yann Collet.
- * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * You can contact the author at :
- * - LZ4 homepage : http://www.lz4.org
- * - LZ4 source repository : https://github.com/lz4/lz4
- *
- * Changed for kernel usage by:
- * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
- */
-
-/*-************************************
- * Dependencies
- **************************************/
-#include
-#include "lz4defs.h"
-#include
-#include
-#include
-
-static const int LZ4_minLength = (MFLIMIT + 1);
-static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
-
-/*-******************************
- * Compression functions
- ********************************/
-static FORCE_INLINE U32 LZ4_hash4(
- U32 sequence,
- tableType_t const tableType)
-{
- if (tableType == byU16)
- return ((sequence * 2654435761U)
- >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
- else
- return ((sequence * 2654435761U)
- >> ((MINMATCH * 8) - LZ4_HASHLOG));
-}
-
-static FORCE_INLINE U32 LZ4_hash5(
- U64 sequence,
- tableType_t const tableType)
-{
- const U32 hashLog = (tableType == byU16)
- ? LZ4_HASHLOG + 1
- : LZ4_HASHLOG;
-
-#if LZ4_LITTLE_ENDIAN
- static const U64 prime5bytes = 889523592379ULL;
-
- return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
-#else
- static const U64 prime8bytes = 11400714785074694791ULL;
-
- return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
-#endif
-}
-
-static FORCE_INLINE U32 LZ4_hashPosition(
- const void *p,
- tableType_t const tableType)
-{
-#if LZ4_ARCH64
- if (tableType == byU32)
- return LZ4_hash5(LZ4_read_ARCH(p), tableType);
-#endif
-
- return LZ4_hash4(LZ4_read32(p), tableType);
-}
-
-static void LZ4_putPositionOnHash(
- const BYTE *p,
- U32 h,
- void *tableBase,
- tableType_t const tableType,
- const BYTE *srcBase)
-{
- switch (tableType) {
- case byPtr:
- {
- const BYTE **hashTable = (const BYTE **)tableBase;
-
- hashTable[h] = p;
- return;
- }
- case byU32:
- {
- U32 *hashTable = (U32 *) tableBase;
-
- hashTable[h] = (U32)(p - srcBase);
- return;
- }
- case byU16:
- {
- U16 *hashTable = (U16 *) tableBase;
-
- hashTable[h] = (U16)(p - srcBase);
- return;
- }
- }
-}
-
-static FORCE_INLINE void LZ4_putPosition(
- const BYTE *p,
- void *tableBase,
- tableType_t tableType,
- const BYTE *srcBase)
-{
- U32 const h = LZ4_hashPosition(p, tableType);
-
- LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
-}
-
-static const BYTE *LZ4_getPositionOnHash(
- U32 h,
- void *tableBase,
- tableType_t tableType,
- const BYTE *srcBase)
-{
- if (tableType == byPtr) {
- const BYTE **hashTable = (const BYTE **) tableBase;
-
- return hashTable[h];
- }
-
- if (tableType == byU32) {
- const U32 * const hashTable = (U32 *) tableBase;
-
- return hashTable[h] + srcBase;
- }
-
- {
- /* default, to ensure a return */
- const U16 * const hashTable = (U16 *) tableBase;
-
- return hashTable[h] + srcBase;
- }
-}
-
-static FORCE_INLINE const BYTE *LZ4_getPosition(
- const BYTE *p,
- void *tableBase,
- tableType_t tableType,
- const BYTE *srcBase)
-{
- U32 const h = LZ4_hashPosition(p, tableType);
-
- return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
-}
-
-
-/*
- * LZ4_compress_generic() :
- * inlined, to ensure branches are decided at compilation time
- */
-static FORCE_INLINE int LZ4_compress_generic(
- LZ4_stream_t_internal * const dictPtr,
- const char * const source,
- char * const dest,
- const int inputSize,
- const int maxOutputSize,
- const limitedOutput_directive outputLimited,
- const tableType_t tableType,
- const dict_directive dict,
- const dictIssue_directive dictIssue,
- const U32 acceleration)
-{
- const BYTE *ip = (const BYTE *) source;
- const BYTE *base;
- const BYTE *lowLimit;
- const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
- const BYTE * const dictionary = dictPtr->dictionary;
- const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
- const size_t dictDelta = dictEnd - (const BYTE *)source;
- const BYTE *anchor = (const BYTE *) source;
- const BYTE * const iend = ip + inputSize;
- const BYTE * const mflimit = iend - MFLIMIT;
- const BYTE * const matchlimit = iend - LASTLITERALS;
-
- BYTE *op = (BYTE *) dest;
- BYTE * const olimit = op + maxOutputSize;
-
- U32 forwardH;
- size_t refDelta = 0;
-
- /* Init conditions */
- if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
- /* Unsupported inputSize, too large (or negative) */
- return 0;
- }
-
- switch (dict) {
- case noDict:
- default:
- base = (const BYTE *)source;
- lowLimit = (const BYTE *)source;
- break;
- case withPrefix64k:
- base = (const BYTE *)source - dictPtr->currentOffset;
- lowLimit = (const BYTE *)source - dictPtr->dictSize;
- break;
- case usingExtDict:
- base = (const BYTE *)source - dictPtr->currentOffset;
- lowLimit = (const BYTE *)source;
- break;
- }
-
- if ((tableType == byU16)
- && (inputSize >= LZ4_64Klimit)) {
- /* Size too large (not within 64K limit) */
- return 0;
- }
-
- if (inputSize < LZ4_minLength) {
- /* Input too small, no compression (all literals) */
- goto _last_literals;
- }
-
- /* First Byte */
- LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
- ip++;
- forwardH = LZ4_hashPosition(ip, tableType);
-
- /* Main Loop */
- for ( ; ; ) {
- const BYTE *match;
- BYTE *token;
-
- /* Find a match */
- {
- const BYTE *forwardIp = ip;
- unsigned int step = 1;
- unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
-
- do {
- U32 const h = forwardH;
-
- ip = forwardIp;
- forwardIp += step;
- step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
-
- if (unlikely(forwardIp > mflimit))
- goto _last_literals;
-
- match = LZ4_getPositionOnHash(h,
- dictPtr->hashTable,
- tableType, base);
-
- if (dict == usingExtDict) {
- if (match < (const BYTE *)source) {
- refDelta = dictDelta;
- lowLimit = dictionary;
- } else {
- refDelta = 0;
- lowLimit = (const BYTE *)source;
- } }
-
- forwardH = LZ4_hashPosition(forwardIp,
- tableType);
-
- LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
- tableType, base);
- } while (((dictIssue == dictSmall)
- ? (match < lowRefLimit)
- : 0)
- || ((tableType == byU16)
- ? 0
- : (match + MAX_DISTANCE < ip))
- || (LZ4_read32(match + refDelta)
- != LZ4_read32(ip)));
- }
-
- /* Catch up */
- while (((ip > anchor) & (match + refDelta > lowLimit))
- && (unlikely(ip[-1] == match[refDelta - 1]))) {
- ip--;
- match--;
- }
-
- /* Encode Literals */
- {
- unsigned const int litLength = (unsigned int)(ip - anchor);
-
- token = op++;
-
- if ((outputLimited) &&
- /* Check output buffer overflow */
- (unlikely(op + litLength +
- (2 + 1 + LASTLITERALS) +
- (litLength / 255) > olimit)))
- return 0;
-
- if (litLength >= RUN_MASK) {
- int len = (int)litLength - RUN_MASK;
-
- *token = (RUN_MASK << ML_BITS);
-
- for (; len >= 255; len -= 255)
- *op++ = 255;
- *op++ = (BYTE)len;
- } else
- *token = (BYTE)(litLength << ML_BITS);
-
- /* Copy Literals */
- LZ4_wildCopy(op, anchor, op + litLength);
- op += litLength;
- }
-
-_next_match:
- /* Encode Offset */
- LZ4_writeLE16(op, (U16)(ip - match));
- op += 2;
-
- /* Encode MatchLength */
- {
- unsigned int matchCode;
-
- if ((dict == usingExtDict)
- && (lowLimit == dictionary)) {
- const BYTE *limit;
-
- match += refDelta;
- limit = ip + (dictEnd - match);
-
- if (limit > matchlimit)
- limit = matchlimit;
-
- matchCode = LZ4_count(ip + MINMATCH,
- match + MINMATCH, limit);
-
- ip += MINMATCH + matchCode;
-
- if (ip == limit) {
- unsigned const int more = LZ4_count(ip,
- (const BYTE *)source,
- matchlimit);
-
- matchCode += more;
- ip += more;
- }
- } else {
- matchCode = LZ4_count(ip + MINMATCH,
- match + MINMATCH, matchlimit);
- ip += MINMATCH + matchCode;
- }
-
- if (outputLimited &&
- /* Check output buffer overflow */
- (unlikely(op +
- (1 + LASTLITERALS) +
- (matchCode >> 8) > olimit)))
- return 0;
-
- if (matchCode >= ML_MASK) {
- *token += ML_MASK;
- matchCode -= ML_MASK;
- LZ4_write32(op, 0xFFFFFFFF);
-
- while (matchCode >= 4 * 255) {
- op += 4;
- LZ4_write32(op, 0xFFFFFFFF);
- matchCode -= 4 * 255;
- }
-
- op += matchCode / 255;
- *op++ = (BYTE)(matchCode % 255);
- } else
- *token += (BYTE)(matchCode);
- }
-
- anchor = ip;
-
- /* Test end of chunk */
- if (ip > mflimit)
- break;
-
- /* Fill table */
- LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
-
- /* Test next position */
- match = LZ4_getPosition(ip, dictPtr->hashTable,
- tableType, base);
-
- if (dict == usingExtDict) {
- if (match < (const BYTE *)source) {
- refDelta = dictDelta;
- lowLimit = dictionary;
- } else {
- refDelta = 0;
- lowLimit = (const BYTE *)source;
- }
- }
-
- LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
-
- if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
- && (match + MAX_DISTANCE >= ip)
- && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
- token = op++;
- *token = 0;
- goto _next_match;
- }
-
- /* Prepare next loop */
- forwardH = LZ4_hashPosition(++ip, tableType);
- }
-
-_last_literals:
- /* Encode Last Literals */
- {
- size_t const lastRun = (size_t)(iend - anchor);
-
- if ((outputLimited) &&
- /* Check output buffer overflow */
- ((op - (BYTE *)dest) + lastRun + 1 +
- ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
- return 0;
-
- if (lastRun >= RUN_MASK) {
- size_t accumulator = lastRun - RUN_MASK;
- *op++ = RUN_MASK << ML_BITS;
- for (; accumulator >= 255; accumulator -= 255)
- *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRun << ML_BITS);
- }
-
- LZ4_memcpy(op, anchor, lastRun);
-
- op += lastRun;
- }
-
- /* End */
- return (int) (((char *)op) - dest);
-}
-
-static int LZ4_compress_fast_extState(
- void *state,
- const char *source,
- char *dest,
- int inputSize,
- int maxOutputSize,
- int acceleration)
-{
- LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
-#if LZ4_ARCH64
- const tableType_t tableType = byU32;
-#else
- const tableType_t tableType = byPtr;
-#endif
-
- LZ4_resetStream((LZ4_stream_t *)state);
-
- if (acceleration < 1)
- acceleration = LZ4_ACCELERATION_DEFAULT;
-
- if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
- if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(ctx, source,
- dest, inputSize, 0,
- noLimit, byU16, noDict,
- noDictIssue, acceleration);
- else
- return LZ4_compress_generic(ctx, source,
- dest, inputSize, 0,
- noLimit, tableType, noDict,
- noDictIssue, acceleration);
- } else {
- if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(ctx, source,
- dest, inputSize,
- maxOutputSize, limitedOutput, byU16, noDict,
- noDictIssue, acceleration);
- else
- return LZ4_compress_generic(ctx, source,
- dest, inputSize,
- maxOutputSize, limitedOutput, tableType, noDict,
- noDictIssue, acceleration);
- }
-}
-
-int LZ4_compress_fast(const char *source, char *dest, int inputSize,
- int maxOutputSize, int acceleration, void *wrkmem)
-{
- return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
- maxOutputSize, acceleration);
-}
-EXPORT_SYMBOL(LZ4_compress_fast);
-
-int LZ4_compress_default(const char *source, char *dest, int inputSize,
- int maxOutputSize, void *wrkmem)
-{
- return LZ4_compress_fast(source, dest, inputSize,
- maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
-}
-EXPORT_SYMBOL(LZ4_compress_default);
-
-/*-******************************
- * *_destSize() variant
- ********************************/
-static int LZ4_compress_destSize_generic(
- LZ4_stream_t_internal * const ctx,
- const char * const src,
- char * const dst,
- int * const srcSizePtr,
- const int targetDstSize,
- const tableType_t tableType)
-{
- const BYTE *ip = (const BYTE *) src;
- const BYTE *base = (const BYTE *) src;
- const BYTE *lowLimit = (const BYTE *) src;
- const BYTE *anchor = ip;
- const BYTE * const iend = ip + *srcSizePtr;
- const BYTE * const mflimit = iend - MFLIMIT;
- const BYTE * const matchlimit = iend - LASTLITERALS;
-
- BYTE *op = (BYTE *) dst;
- BYTE * const oend = op + targetDstSize;
- BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
- - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
- BYTE * const oMaxMatch = op + targetDstSize
- - (LASTLITERALS + 1 /* token */);
- BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
-
- U32 forwardH;
-
- /* Init conditions */
- /* Impossible to store anything */
- if (targetDstSize < 1)
- return 0;
- /* Unsupported input size, too large (or negative) */
- if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
- return 0;
- /* Size too large (not within 64K limit) */
- if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
- return 0;
- /* Input too small, no compression (all literals) */
- if (*srcSizePtr < LZ4_minLength)
- goto _last_literals;
-
- /* First Byte */
- *srcSizePtr = 0;
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
- ip++; forwardH = LZ4_hashPosition(ip, tableType);
-
- /* Main Loop */
- for ( ; ; ) {
- const BYTE *match;
- BYTE *token;
-
- /* Find a match */
- {
- const BYTE *forwardIp = ip;
- unsigned int step = 1;
- unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
-
- do {
- U32 h = forwardH;
-
- ip = forwardIp;
- forwardIp += step;
- step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
-
- if (unlikely(forwardIp > mflimit))
- goto _last_literals;
-
- match = LZ4_getPositionOnHash(h, ctx->hashTable,
- tableType, base);
- forwardH = LZ4_hashPosition(forwardIp,
- tableType);
- LZ4_putPositionOnHash(ip, h,
- ctx->hashTable, tableType,
- base);
-
- } while (((tableType == byU16)
- ? 0
- : (match + MAX_DISTANCE < ip))
- || (LZ4_read32(match) != LZ4_read32(ip)));
- }
-
- /* Catch up */
- while ((ip > anchor)
- && (match > lowLimit)
- && (unlikely(ip[-1] == match[-1]))) {
- ip--;
- match--;
- }
-
- /* Encode Literal length */
- {
- unsigned int litLength = (unsigned int)(ip - anchor);
-
- token = op++;
- if (op + ((litLength + 240) / 255)
- + litLength > oMaxLit) {
- /* Not enough space for a last match */
- op--;
- goto _last_literals;
- }
- if (litLength >= RUN_MASK) {
- unsigned int len = litLength - RUN_MASK;
- *token = (RUN_MASK<= 255; len -= 255)
- *op++ = 255;
- *op++ = (BYTE)len;
- } else
- *token = (BYTE)(litLength << ML_BITS);
-
- /* Copy Literals */
- LZ4_wildCopy(op, anchor, op + litLength);
- op += litLength;
- }
-
-_next_match:
- /* Encode Offset */
- LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
-
- /* Encode MatchLength */
- {
- size_t matchLength = LZ4_count(ip + MINMATCH,
- match + MINMATCH, matchlimit);
-
- if (op + ((matchLength + 240)/255) > oMaxMatch) {
- /* Match description too long : reduce it */
- matchLength = (15 - 1) + (oMaxMatch - op) * 255;
- }
- ip += MINMATCH + matchLength;
-
- if (matchLength >= ML_MASK) {
- *token += ML_MASK;
- matchLength -= ML_MASK;
- while (matchLength >= 255) {
- matchLength -= 255;
- *op++ = 255;
- }
- *op++ = (BYTE)matchLength;
- } else
- *token += (BYTE)(matchLength);
- }
-
- anchor = ip;
-
- /* Test end of block */
- if (ip > mflimit)
- break;
- if (op > oMaxSeq)
- break;
-
- /* Fill table */
- LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
-
- /* Test next position */
- match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
-
- if ((match + MAX_DISTANCE >= ip)
- && (LZ4_read32(match) == LZ4_read32(ip))) {
- token = op++; *token = 0;
- goto _next_match;
- }
-
- /* Prepare next loop */
- forwardH = LZ4_hashPosition(++ip, tableType);
- }
-
-_last_literals:
- /* Encode Last Literals */
- {
- size_t lastRunSize = (size_t)(iend - anchor);
-
- if (op + 1 /* token */
- + ((lastRunSize + 240) / 255) /* litLength */
- + lastRunSize /* literals */ > oend) {
- /* adapt lastRunSize to fill 'dst' */
- lastRunSize = (oend - op) - 1;
- lastRunSize -= (lastRunSize + 240) / 255;
- }
- ip = anchor + lastRunSize;
-
- if (lastRunSize >= RUN_MASK) {
- size_t accumulator = lastRunSize - RUN_MASK;
-
- *op++ = RUN_MASK << ML_BITS;
- for (; accumulator >= 255; accumulator -= 255)
- *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRunSize<= LZ4_COMPRESSBOUND(*srcSizePtr)) {
- /* compression success is guaranteed */
- return LZ4_compress_fast_extState(
- state, src, dst, *srcSizePtr,
- targetDstSize, 1);
- } else {
- if (*srcSizePtr < LZ4_64Klimit)
- return LZ4_compress_destSize_generic(
- &state->internal_donotuse,
- src, dst, srcSizePtr,
- targetDstSize, byU16);
- else
- return LZ4_compress_destSize_generic(
- &state->internal_donotuse,
- src, dst, srcSizePtr,
- targetDstSize, tableType);
- }
-}
-
-
-int LZ4_compress_destSize(
- const char *src,
- char *dst,
- int *srcSizePtr,
- int targetDstSize,
- void *wrkmem)
-{
- return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
- targetDstSize);
-}
-EXPORT_SYMBOL(LZ4_compress_destSize);
-
-/*-******************************
- * Streaming functions
- ********************************/
-void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
-{
- memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
-}
-
-int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
- const char *dictionary, int dictSize)
-{
- LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
- const BYTE *p = (const BYTE *)dictionary;
- const BYTE * const dictEnd = p + dictSize;
- const BYTE *base;
-
- if ((dict->initCheck)
- || (dict->currentOffset > 1 * GB)) {
- /* Uninitialized structure, or reuse overflow */
- LZ4_resetStream(LZ4_dict);
- }
-
- if (dictSize < (int)HASH_UNIT) {
- dict->dictionary = NULL;
- dict->dictSize = 0;
- return 0;
- }
-
- if ((dictEnd - p) > 64 * KB)
- p = dictEnd - 64 * KB;
- dict->currentOffset += 64 * KB;
- base = p - dict->currentOffset;
- dict->dictionary = p;
- dict->dictSize = (U32)(dictEnd - p);
- dict->currentOffset += dict->dictSize;
-
- while (p <= dictEnd - HASH_UNIT) {
- LZ4_putPosition(p, dict->hashTable, byU32, base);
- p += 3;
- }
-
- return dict->dictSize;
-}
-EXPORT_SYMBOL(LZ4_loadDict);
-
-static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
- const BYTE *src)
-{
- if ((LZ4_dict->currentOffset > 0x80000000) ||
- ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
- /* address space overflow */
- /* rescale hash table */
- U32 const delta = LZ4_dict->currentOffset - 64 * KB;
- const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
- int i;
-
- for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
- if (LZ4_dict->hashTable[i] < delta)
- LZ4_dict->hashTable[i] = 0;
- else
- LZ4_dict->hashTable[i] -= delta;
- }
- LZ4_dict->currentOffset = 64 * KB;
- if (LZ4_dict->dictSize > 64 * KB)
- LZ4_dict->dictSize = 64 * KB;
- LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
- }
-}
-
-int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
-{
- LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
- const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
-
- if ((U32)dictSize > 64 * KB) {
- /* useless to define a dictionary > 64 * KB */
- dictSize = 64 * KB;
- }
- if ((U32)dictSize > dict->dictSize)
- dictSize = dict->dictSize;
-
- memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
-
- dict->dictionary = (const BYTE *)safeBuffer;
- dict->dictSize = (U32)dictSize;
-
- return dictSize;
-}
-EXPORT_SYMBOL(LZ4_saveDict);
-
-int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
- char *dest, int inputSize, int maxOutputSize, int acceleration)
-{
- LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
- const BYTE * const dictEnd = streamPtr->dictionary
- + streamPtr->dictSize;
-
- const BYTE *smallest = (const BYTE *) source;
-
- if (streamPtr->initCheck) {
- /* Uninitialized structure detected */
- return 0;
- }
-
- if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
- smallest = dictEnd;
-
- LZ4_renormDictT(streamPtr, smallest);
-
- if (acceleration < 1)
- acceleration = LZ4_ACCELERATION_DEFAULT;
-
- /* Check overlapping input/dictionary space */
- {
- const BYTE *sourceEnd = (const BYTE *) source + inputSize;
-
- if ((sourceEnd > streamPtr->dictionary)
- && (sourceEnd < dictEnd)) {
- streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
- if (streamPtr->dictSize > 64 * KB)
- streamPtr->dictSize = 64 * KB;
- if (streamPtr->dictSize < 4)
- streamPtr->dictSize = 0;
- streamPtr->dictionary = dictEnd - streamPtr->dictSize;
- }
- }
-
- /* prefix mode : source data follows dictionary */
- if (dictEnd == (const BYTE *)source) {
- int result;
-
- if ((streamPtr->dictSize < 64 * KB) &&
- (streamPtr->dictSize < streamPtr->currentOffset)) {
- result = LZ4_compress_generic(
- streamPtr, source, dest, inputSize,
- maxOutputSize, limitedOutput, byU32,
- withPrefix64k, dictSmall, acceleration);
- } else {
- result = LZ4_compress_generic(
- streamPtr, source, dest, inputSize,
- maxOutputSize, limitedOutput, byU32,
- withPrefix64k, noDictIssue, acceleration);
- }
- streamPtr->dictSize += (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
- return result;
- }
-
- /* external dictionary mode */
- {
- int result;
-
- if ((streamPtr->dictSize < 64 * KB) &&
- (streamPtr->dictSize < streamPtr->currentOffset)) {
- result = LZ4_compress_generic(
- streamPtr, source, dest, inputSize,
- maxOutputSize, limitedOutput, byU32,
- usingExtDict, dictSmall, acceleration);
- } else {
- result = LZ4_compress_generic(
- streamPtr, source, dest, inputSize,
- maxOutputSize, limitedOutput, byU32,
- usingExtDict, noDictIssue, acceleration);
- }
- streamPtr->dictionary = (const BYTE *)source;
- streamPtr->dictSize = (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
- return result;
- }
-}
-EXPORT_SYMBOL(LZ4_compress_fast_continue);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4 compressor");
Index: lib/lz4/lz4_decompress.c
===================================================================
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
deleted file mode 100644
--- a/lib/lz4/lz4_decompress.c (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
+++ /dev/null (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
@@ -1,720 +0,0 @@
-/*
- * LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011 - 2016, Yann Collet.
- * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * You can contact the author at :
- * - LZ4 homepage : http://www.lz4.org
- * - LZ4 source repository : https://github.com/lz4/lz4
- *
- * Changed for kernel usage by:
- * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
- */
-
-/*-************************************
- * Dependencies
- **************************************/
-#include
-#include "lz4defs.h"
-#include
-#include
-#include
-#include
-
-/*-*****************************
- * Decompression functions
- *******************************/
-
-#define DEBUGLOG(l, ...) {} /* disabled */
-
-#ifndef assert
-#define assert(condition) ((void)0)
-#endif
-
-/*
- * LZ4_decompress_generic() :
- * This generic decompression function covers all use cases.
- * It shall be instantiated several times, using different sets of directives.
- * Note that it is important for performance that this function really get inlined,
- * in order to remove useless branches during compilation optimization.
- */
-static FORCE_INLINE int LZ4_decompress_generic(
- const char * const src,
- char * const dst,
- int srcSize,
- /*
- * If endOnInput == endOnInputSize,
- * this value is `dstCapacity`
- */
- int outputSize,
- /* endOnOutputSize, endOnInputSize */
- endCondition_directive endOnInput,
- /* full, partial */
- earlyEnd_directive partialDecoding,
- /* noDict, withPrefix64k, usingExtDict */
- dict_directive dict,
- /* always <= dst, == dst when no prefix */
- const BYTE * const lowPrefix,
- /* only if dict == usingExtDict */
- const BYTE * const dictStart,
- /* note : = 0 if noDict */
- const size_t dictSize
- )
-{
- const BYTE *ip = (const BYTE *) src;
- const BYTE * const iend = ip + srcSize;
-
- BYTE *op = (BYTE *) dst;
- BYTE * const oend = op + outputSize;
- BYTE *cpy;
-
- const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
- static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
- static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
-
- const int safeDecode = (endOnInput == endOnInputSize);
- const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
-
- /* Set up the "end" pointers for the shortcut. */
- const BYTE *const shortiend = iend -
- (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
- const BYTE *const shortoend = oend -
- (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
-
- DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
- srcSize, outputSize);
-
- /* Special cases */
- assert(lowPrefix <= op);
- assert(src != NULL);
-
- /* Empty output buffer */
- if ((endOnInput) && (unlikely(outputSize == 0)))
- return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
-
- if ((!endOnInput) && (unlikely(outputSize == 0)))
- return (*ip == 0 ? 1 : -1);
-
- if ((endOnInput) && unlikely(srcSize == 0))
- return -1;
-
- /* Main Loop : decode sequences */
- while (1) {
- size_t length;
- const BYTE *match;
- size_t offset;
-
- /* get literal length */
- unsigned int const token = *ip++;
- length = token>>ML_BITS;
-
- /* ip < iend before the increment */
- assert(!endOnInput || ip <= iend);
-
- /*
- * A two-stage shortcut for the most common case:
- * 1) If the literal length is 0..14, and there is enough
- * space, enter the shortcut and copy 16 bytes on behalf
- * of the literals (in the fast mode, only 8 bytes can be
- * safely copied this way).
- * 2) Further if the match length is 4..18, copy 18 bytes
- * in a similar manner; but we ensure that there's enough
- * space in the output for those 18 bytes earlier, upon
- * entering the shortcut (in other words, there is a
- * combined check for both stages).
- *
- * The & in the likely() below is intentionally not && so that
- * some compilers can produce better parallelized runtime code
- */
- if ((endOnInput ? length != RUN_MASK : length <= 8)
- /*
- * strictly "less than" on input, to re-enter
- * the loop with at least one byte
- */
- && likely((endOnInput ? ip < shortiend : 1) &
- (op <= shortoend))) {
- /* Copy the literals */
- LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
- op += length; ip += length;
-
- /*
- * The second stage:
- * prepare for match copying, decode full info.
- * If it doesn't work out, the info won't be wasted.
- */
- length = token & ML_MASK; /* match length */
- offset = LZ4_readLE16(ip);
- ip += 2;
- match = op - offset;
- assert(match <= op); /* check overflow */
-
- /* Do not deal with overlapping matches. */
- if ((length != ML_MASK) &&
- (offset >= 8) &&
- (dict == withPrefix64k || match >= lowPrefix)) {
- /* Copy the match. */
- LZ4_memcpy(op + 0, match + 0, 8);
- LZ4_memcpy(op + 8, match + 8, 8);
- LZ4_memcpy(op + 16, match + 16, 2);
- op += length + MINMATCH;
- /* Both stages worked, load the next token. */
- continue;
- }
-
- /*
- * The second stage didn't work out, but the info
- * is ready. Propel it right to the point of match
- * copying.
- */
- goto _copy_match;
- }
-
- /* decode literal length */
- if (length == RUN_MASK) {
- unsigned int s;
-
- if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
- /* overflow detection */
- goto _output_error;
- }
- do {
- s = *ip++;
- length += s;
- } while (likely(endOnInput
- ? ip < iend - RUN_MASK
- : 1) & (s == 255));
-
- if ((safeDecode)
- && unlikely((uptrval)(op) +
- length < (uptrval)(op))) {
- /* overflow detection */
- goto _output_error;
- }
- if ((safeDecode)
- && unlikely((uptrval)(ip) +
- length < (uptrval)(ip))) {
- /* overflow detection */
- goto _output_error;
- }
- }
-
- /* copy literals */
- cpy = op + length;
- LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
-
- if (((endOnInput) && ((cpy > oend - MFLIMIT)
- || (ip + length > iend - (2 + 1 + LASTLITERALS))))
- || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
- if (partialDecoding) {
- if (cpy > oend) {
- /*
- * Partial decoding :
- * stop in the middle of literal segment
- */
- cpy = oend;
- length = oend - op;
- }
- if ((endOnInput)
- && (ip + length > iend)) {
- /*
- * Error :
- * read attempt beyond
- * end of input buffer
- */
- goto _output_error;
- }
- } else {
- if ((!endOnInput)
- && (cpy != oend)) {
- /*
- * Error :
- * block decoding must
- * stop exactly there
- */
- goto _output_error;
- }
- if ((endOnInput)
- && ((ip + length != iend)
- || (cpy > oend))) {
- /*
- * Error :
- * input must be consumed
- */
- goto _output_error;
- }
- }
-
- /*
- * supports overlapping memory regions; only matters
- * for in-place decompression scenarios
- */
- LZ4_memmove(op, ip, length);
- ip += length;
- op += length;
-
- /* Necessarily EOF when !partialDecoding.
- * When partialDecoding, it is EOF if we've either
- * filled the output buffer or
- * can't proceed with reading an offset for following match.
- */
- if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
- break;
- } else {
- /* may overwrite up to WILDCOPYLENGTH beyond cpy */
- LZ4_wildCopy(op, ip, cpy);
- ip += length;
- op = cpy;
- }
-
- /* get offset */
- offset = LZ4_readLE16(ip);
- ip += 2;
- match = op - offset;
-
- /* get matchlength */
- length = token & ML_MASK;
-
-_copy_match:
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
- /* Error : offset outside buffers */
- goto _output_error;
- }
-
- /* costs ~1%; silence an msan warning when offset == 0 */
- /*
- * note : when partialDecoding, there is no guarantee that
- * at least 4 bytes remain available in output buffer
- */
- if (!partialDecoding) {
- assert(oend > op);
- assert(oend - op >= 4);
-
- LZ4_write32(op, (U32)offset);
- }
-
- if (length == ML_MASK) {
- unsigned int s;
-
- do {
- s = *ip++;
-
- if ((endOnInput) && (ip > iend - LASTLITERALS))
- goto _output_error;
-
- length += s;
- } while (s == 255);
-
- if ((safeDecode)
- && unlikely(
- (uptrval)(op) + length < (uptrval)op)) {
- /* overflow detection */
- goto _output_error;
- }
- }
-
- length += MINMATCH;
-
- /* match starting within external dictionary */
- if ((dict == usingExtDict) && (match < lowPrefix)) {
- if (unlikely(op + length > oend - LASTLITERALS)) {
- /* doesn't respect parsing restriction */
- if (!partialDecoding)
- goto _output_error;
- length = min(length, (size_t)(oend - op));
- }
-
- if (length <= (size_t)(lowPrefix - match)) {
- /*
- * match fits entirely within external
- * dictionary : just copy
- */
- memmove(op, dictEnd - (lowPrefix - match),
- length);
- op += length;
- } else {
- /*
- * match stretches into both external
- * dictionary and current block
- */
- size_t const copySize = (size_t)(lowPrefix - match);
- size_t const restSize = length - copySize;
-
- LZ4_memcpy(op, dictEnd - copySize, copySize);
- op += copySize;
- if (restSize > (size_t)(op - lowPrefix)) {
- /* overlap copy */
- BYTE * const endOfMatch = op + restSize;
- const BYTE *copyFrom = lowPrefix;
-
- while (op < endOfMatch)
- *op++ = *copyFrom++;
- } else {
- LZ4_memcpy(op, lowPrefix, restSize);
- op += restSize;
- }
- }
- continue;
- }
-
- /* copy match within block */
- cpy = op + length;
-
- /*
- * partialDecoding :
- * may not respect endBlock parsing restrictions
- */
- assert(op <= oend);
- if (partialDecoding &&
- (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
- size_t const mlen = min(length, (size_t)(oend - op));
- const BYTE * const matchEnd = match + mlen;
- BYTE * const copyEnd = op + mlen;
-
- if (matchEnd > op) {
- /* overlap copy */
- while (op < copyEnd)
- *op++ = *match++;
- } else {
- LZ4_memcpy(op, match, mlen);
- }
- op = copyEnd;
- if (op == oend)
- break;
- continue;
- }
-
- if (unlikely(offset < 8)) {
- op[0] = match[0];
- op[1] = match[1];
- op[2] = match[2];
- op[3] = match[3];
- match += inc32table[offset];
- LZ4_memcpy(op + 4, match, 4);
- match -= dec64table[offset];
- } else {
- LZ4_copy8(op, match);
- match += 8;
- }
-
- op += 8;
-
- if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
- BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
-
- if (cpy > oend - LASTLITERALS) {
- /*
- * Error : last LASTLITERALS bytes
- * must be literals (uncompressed)
- */
- goto _output_error;
- }
-
- if (op < oCopyLimit) {
- LZ4_wildCopy(op, match, oCopyLimit);
- match += oCopyLimit - op;
- op = oCopyLimit;
- }
- while (op < cpy)
- *op++ = *match++;
- } else {
- LZ4_copy8(op, match);
- if (length > 16)
- LZ4_wildCopy(op + 8, match + 8, cpy);
- }
- op = cpy; /* wildcopy correction */
- }
-
- /* end of decoding */
- if (endOnInput) {
- /* Nb of output bytes decoded */
- return (int) (((char *)op) - dst);
- } else {
- /* Nb of input bytes read */
- return (int) (((const char *)ip) - src);
- }
-
- /* Overflow error detected */
-_output_error:
- return (int) (-(((const char *)ip) - src)) - 1;
-}
-
-int LZ4_decompress_safe(const char *source, char *dest,
- int compressedSize, int maxDecompressedSize)
-{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxDecompressedSize,
- endOnInputSize, decode_full_block,
- noDict, (BYTE *)dest, NULL, 0);
-}
-
-int LZ4_decompress_safe_partial(const char *src, char *dst,
- int compressedSize, int targetOutputSize, int dstCapacity)
-{
- dstCapacity = min(targetOutputSize, dstCapacity);
- return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
- endOnInputSize, partial_decode,
- noDict, (BYTE *)dst, NULL, 0);
-}
-
-int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
-{
- return LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, decode_full_block,
- withPrefix64k,
- (BYTE *)dest - 64 * KB, NULL, 0);
-}
-
-/* ===== Instantiate a few more decoding cases, used more than once. ===== */
-
-static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
- int compressedSize, int maxOutputSize)
-{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block,
- withPrefix64k,
- (BYTE *)dest - 64 * KB, NULL, 0);
-}
-
-static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
- int compressedSize,
- int maxOutputSize,
- size_t prefixSize)
-{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block,
- noDict,
- (BYTE *)dest - prefixSize, NULL, 0);
-}
-
-static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- const void *dictStart, size_t dictSize)
-{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block,
- usingExtDict, (BYTE *)dest,
- (const BYTE *)dictStart, dictSize);
-}
-
-static int LZ4_decompress_fast_extDict(const char *source, char *dest,
- int originalSize,
- const void *dictStart, size_t dictSize)
-{
- return LZ4_decompress_generic(source, dest,
- 0, originalSize,
- endOnOutputSize, decode_full_block,
- usingExtDict, (BYTE *)dest,
- (const BYTE *)dictStart, dictSize);
-}
-
-/*
- * The "double dictionary" mode, for use with e.g. ring buffers: the first part
- * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
- * These routines are used only once, in LZ4_decompress_*_continue().
- */
-static FORCE_INLINE
-int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- size_t prefixSize,
- const void *dictStart, size_t dictSize)
-{
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block,
- usingExtDict, (BYTE *)dest - prefixSize,
- (const BYTE *)dictStart, dictSize);
-}
-
-static FORCE_INLINE
-int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
- int originalSize, size_t prefixSize,
- const void *dictStart, size_t dictSize)
-{
- return LZ4_decompress_generic(source, dest,
- 0, originalSize,
- endOnOutputSize, decode_full_block,
- usingExtDict, (BYTE *)dest - prefixSize,
- (const BYTE *)dictStart, dictSize);
-}
-
-/* ===== streaming decompression functions ===== */
-
-int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
- const char *dictionary, int dictSize)
-{
- LZ4_streamDecode_t_internal *lz4sd =
- &LZ4_streamDecode->internal_donotuse;
-
- lz4sd->prefixSize = (size_t) dictSize;
- lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
- lz4sd->externalDict = NULL;
- lz4sd->extDictSize = 0;
- return 1;
-}
-
-/*
- * *_continue() :
- * These decoding functions allow decompression of multiple blocks
- * in "streaming" mode.
- * Previously decoded blocks must still be available at the memory
- * position where they were decoded.
- * If it's not possible, save the relevant part of
- * decoded data into a safe buffer,
- * and indicate where it stands using LZ4_setStreamDecode()
- */
-int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
- const char *source, char *dest, int compressedSize, int maxOutputSize)
-{
- LZ4_streamDecode_t_internal *lz4sd =
- &LZ4_streamDecode->internal_donotuse;
- int result;
-
- if (lz4sd->prefixSize == 0) {
- /* The first call, no dictionary yet. */
- assert(lz4sd->extDictSize == 0);
- result = LZ4_decompress_safe(source, dest,
- compressedSize, maxOutputSize);
- if (result <= 0)
- return result;
- lz4sd->prefixSize = result;
- lz4sd->prefixEnd = (BYTE *)dest + result;
- } else if (lz4sd->prefixEnd == (BYTE *)dest) {
- /* They're rolling the current segment. */
- if (lz4sd->prefixSize >= 64 * KB - 1)
- result = LZ4_decompress_safe_withPrefix64k(source, dest,
- compressedSize, maxOutputSize);
- else if (lz4sd->extDictSize == 0)
- result = LZ4_decompress_safe_withSmallPrefix(source,
- dest, compressedSize, maxOutputSize,
- lz4sd->prefixSize);
- else
- result = LZ4_decompress_safe_doubleDict(source, dest,
- compressedSize, maxOutputSize,
- lz4sd->prefixSize,
- lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0)
- return result;
- lz4sd->prefixSize += result;
- lz4sd->prefixEnd += result;
- } else {
- /*
- * The buffer wraps around, or they're
- * switching to another buffer.
- */
- lz4sd->extDictSize = lz4sd->prefixSize;
- lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_safe_forceExtDict(source, dest,
- compressedSize, maxOutputSize,
- lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0)
- return result;
- lz4sd->prefixSize = result;
- lz4sd->prefixEnd = (BYTE *)dest + result;
- }
-
- return result;
-}
-
-int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
- const char *source, char *dest, int originalSize)
-{
- LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
- int result;
-
- if (lz4sd->prefixSize == 0) {
- assert(lz4sd->extDictSize == 0);
- result = LZ4_decompress_fast(source, dest, originalSize);
- if (result <= 0)
- return result;
- lz4sd->prefixSize = originalSize;
- lz4sd->prefixEnd = (BYTE *)dest + originalSize;
- } else if (lz4sd->prefixEnd == (BYTE *)dest) {
- if (lz4sd->prefixSize >= 64 * KB - 1 ||
- lz4sd->extDictSize == 0)
- result = LZ4_decompress_fast(source, dest,
- originalSize);
- else
- result = LZ4_decompress_fast_doubleDict(source, dest,
- originalSize, lz4sd->prefixSize,
- lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0)
- return result;
- lz4sd->prefixSize += originalSize;
- lz4sd->prefixEnd += originalSize;
- } else {
- lz4sd->extDictSize = lz4sd->prefixSize;
- lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_fast_extDict(source, dest,
- originalSize, lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0)
- return result;
- lz4sd->prefixSize = originalSize;
- lz4sd->prefixEnd = (BYTE *)dest + originalSize;
- }
- return result;
-}
-
-int LZ4_decompress_safe_usingDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- const char *dictStart, int dictSize)
-{
- if (dictSize == 0)
- return LZ4_decompress_safe(source, dest,
- compressedSize, maxOutputSize);
- if (dictStart+dictSize == dest) {
- if (dictSize >= 64 * KB - 1)
- return LZ4_decompress_safe_withPrefix64k(source, dest,
- compressedSize, maxOutputSize);
- return LZ4_decompress_safe_withSmallPrefix(source, dest,
- compressedSize, maxOutputSize, dictSize);
- }
- return LZ4_decompress_safe_forceExtDict(source, dest,
- compressedSize, maxOutputSize, dictStart, dictSize);
-}
-
-int LZ4_decompress_fast_usingDict(const char *source, char *dest,
- int originalSize,
- const char *dictStart, int dictSize)
-{
- if (dictSize == 0 || dictStart + dictSize == dest)
- return LZ4_decompress_fast(source, dest, originalSize);
-
- return LZ4_decompress_fast_extDict(source, dest, originalSize,
- dictStart, dictSize);
-}
-
-#ifndef STATIC
-EXPORT_SYMBOL(LZ4_decompress_safe);
-EXPORT_SYMBOL(LZ4_decompress_safe_partial);
-EXPORT_SYMBOL(LZ4_decompress_fast);
-EXPORT_SYMBOL(LZ4_setStreamDecode);
-EXPORT_SYMBOL(LZ4_decompress_safe_continue);
-EXPORT_SYMBOL(LZ4_decompress_fast_continue);
-EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
-EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4 decompressor");
-#endif
Index: lib/lz4/lz4defs.h
===================================================================
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
deleted file mode 100644
--- a/lib/lz4/lz4defs.h (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
+++ /dev/null (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
@@ -1,247 +0,0 @@
-#ifndef __LZ4DEFS_H__
-#define __LZ4DEFS_H__
-
-/*
- * lz4defs.h -- common and architecture specific defines for the kernel usage
-
- * LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011-2016, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * You can contact the author at :
- * - LZ4 homepage : http://www.lz4.org
- * - LZ4 source repository : https://github.com/lz4/lz4
- *
- * Changed for kernel usage by:
- * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
- */
-
-#include
-
-#include
-#include /* memset, memcpy */
-
-#define FORCE_INLINE __always_inline
-
-/*-************************************
- * Basic Types
- **************************************/
-#include
-
-typedef uint8_t BYTE;
-typedef uint16_t U16;
-typedef uint32_t U32;
-typedef int32_t S32;
-typedef uint64_t U64;
-typedef uintptr_t uptrval;
-
-/*-************************************
- * Architecture specifics
- **************************************/
-#if defined(CONFIG_64BIT)
-#define LZ4_ARCH64 1
-#else
-#define LZ4_ARCH64 0
-#endif
-
-#if defined(__LITTLE_ENDIAN)
-#define LZ4_LITTLE_ENDIAN 1
-#else
-#define LZ4_LITTLE_ENDIAN 0
-#endif
-
-/*-************************************
- * Constants
- **************************************/
-#define MINMATCH 4
-
-#define WILDCOPYLENGTH 8
-#define LASTLITERALS 5
-#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
-/*
- * ensure it's possible to write 2 x wildcopyLength
- * without overflowing output buffer
- */
-#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
-
-/* Increase this value ==> compression run slower on incompressible data */
-#define LZ4_SKIPTRIGGER 6
-
-#define HASH_UNIT sizeof(size_t)
-
-#define KB (1 << 10)
-#define MB (1 << 20)
-#define GB (1U << 30)
-
-#define MAXD_LOG 16
-#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
-#define STEPSIZE sizeof(size_t)
-
-#define ML_BITS 4
-#define ML_MASK ((1U << ML_BITS) - 1)
-#define RUN_BITS (8 - ML_BITS)
-#define RUN_MASK ((1U << RUN_BITS) - 1)
-
-/*-************************************
- * Reading and writing into memory
- **************************************/
-static FORCE_INLINE U16 LZ4_read16(const void *ptr)
-{
- return get_unaligned((const U16 *)ptr);
-}
-
-static FORCE_INLINE U32 LZ4_read32(const void *ptr)
-{
- return get_unaligned((const U32 *)ptr);
-}
-
-static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr)
-{
- return get_unaligned((const size_t *)ptr);
-}
-
-static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value)
-{
- put_unaligned(value, (U16 *)memPtr);
-}
-
-static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
-{
- put_unaligned(value, (U32 *)memPtr);
-}
-
-static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
-{
- return get_unaligned_le16(memPtr);
-}
-
-static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
-{
- return put_unaligned_le16(value, memPtr);
-}
-
-/*
- * LZ4 relies on memcpy with a constant size being inlined. In freestanding
- * environments, the compiler can't assume the implementation of memcpy() is
- * standard compliant, so apply its specialized memcpy() inlining logic. When
- * possible, use __builtin_memcpy() to tell the compiler to analyze memcpy()
- * as-if it were standard compliant, so it can inline it in freestanding
- * environments. This is needed when decompressing the Linux Kernel, for example.
- */
-#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
-#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
-
-static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
-{
-#if LZ4_ARCH64
- U64 a = get_unaligned((const U64 *)src);
-
- put_unaligned(a, (U64 *)dst);
-#else
- U32 a = get_unaligned((const U32 *)src);
- U32 b = get_unaligned((const U32 *)src + 1);
-
- put_unaligned(a, (U32 *)dst);
- put_unaligned(b, (U32 *)dst + 1);
-#endif
-}
-
-/*
- * customized variant of memcpy,
- * which can overwrite up to 7 bytes beyond dstEnd
- */
-static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
- const void *srcPtr, void *dstEnd)
-{
- BYTE *d = (BYTE *)dstPtr;
- const BYTE *s = (const BYTE *)srcPtr;
- BYTE *const e = (BYTE *)dstEnd;
-
- do {
- LZ4_copy8(d, s);
- d += 8;
- s += 8;
- } while (d < e);
-}
-
-static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
-{
-#if LZ4_LITTLE_ENDIAN
- return __ffs(val) >> 3;
-#else
- return (BITS_PER_LONG - 1 - __fls(val)) >> 3;
-#endif
-}
-
-static FORCE_INLINE unsigned int LZ4_count(
- const BYTE *pIn,
- const BYTE *pMatch,
- const BYTE *pInLimit)
-{
- const BYTE *const pStart = pIn;
-
- while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
- size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
-
- if (!diff) {
- pIn += STEPSIZE;
- pMatch += STEPSIZE;
- continue;
- }
-
- pIn += LZ4_NbCommonBytes(diff);
-
- return (unsigned int)(pIn - pStart);
- }
-
-#if LZ4_ARCH64
- if ((pIn < (pInLimit - 3))
- && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
- pIn += 4;
- pMatch += 4;
- }
-#endif
-
- if ((pIn < (pInLimit - 1))
- && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
- pIn += 2;
- pMatch += 2;
- }
-
- if ((pIn < pInLimit) && (*pMatch == *pIn))
- pIn++;
-
- return (unsigned int)(pIn - pStart);
-}
-
-typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
-typedef enum { byPtr, byU32, byU16 } tableType_t;
-
-typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
-typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
-
-typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
-typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
-
-#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
-
-#endif
Index: lib/lz4/lz4hc_compress.c
===================================================================
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
deleted file mode 100644
--- a/lib/lz4/lz4hc_compress.c (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
+++ /dev/null (revision 802d968fb2c726f0a9dd88fed80a003d724769d4)
@@ -1,768 +0,0 @@
-/*
- * LZ4 HC - High Compression Mode of LZ4
- * Copyright (C) 2011-2015, Yann Collet.
- *
- * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * You can contact the author at :
- * - LZ4 homepage : http://www.lz4.org
- * - LZ4 source repository : https://github.com/lz4/lz4
- *
- * Changed for kernel usage by:
- * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
- */
-
-/*-************************************
- * Dependencies
- **************************************/
-#include
-#include "lz4defs.h"
-#include
-#include
-#include /* memset */
-
-/* *************************************
- * Local Constants and types
- ***************************************/
-
-#define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
-
-#define HASH_FUNCTION(i) (((i) * 2654435761U) \
- >> ((MINMATCH*8) - LZ4HC_HASH_LOG))
-#define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */
-
-static U32 LZ4HC_hashPtr(const void *ptr)
-{
- return HASH_FUNCTION(LZ4_read32(ptr));
-}
-
-/**************************************
- * HC Compression
- **************************************/
-static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
-{
- memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
- memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
- hc4->nextToUpdate = 64 * KB;
- hc4->base = start - 64 * KB;
- hc4->end = start;
- hc4->dictBase = start - 64 * KB;
- hc4->dictLimit = 64 * KB;
- hc4->lowLimit = 64 * KB;
-}
-
-/* Update chains up to ip (excluded) */
-static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
- const BYTE *ip)
-{
- U16 * const chainTable = hc4->chainTable;
- U32 * const hashTable = hc4->hashTable;
- const BYTE * const base = hc4->base;
- U32 const target = (U32)(ip - base);
- U32 idx = hc4->nextToUpdate;
-
- while (idx < target) {
- U32 const h = LZ4HC_hashPtr(base + idx);
- size_t delta = idx - hashTable[h];
-
- if (delta > MAX_DISTANCE)
- delta = MAX_DISTANCE;
-
- DELTANEXTU16(idx) = (U16)delta;
-
- hashTable[h] = idx;
- idx++;
- }
-
- hc4->nextToUpdate = target;
-}
-
-static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
- LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
- const BYTE *ip,
- const BYTE * const iLimit,
- const BYTE **matchpos,
- const int maxNbAttempts)
-{
- U16 * const chainTable = hc4->chainTable;
- U32 * const HashTable = hc4->hashTable;
- const BYTE * const base = hc4->base;
- const BYTE * const dictBase = hc4->dictBase;
- const U32 dictLimit = hc4->dictLimit;
- const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
- ? hc4->lowLimit
- : (U32)(ip - base) - (64 * KB - 1);
- U32 matchIndex;
- int nbAttempts = maxNbAttempts;
- size_t ml = 0;
-
- /* HC4 match finder */
- LZ4HC_Insert(hc4, ip);
- matchIndex = HashTable[LZ4HC_hashPtr(ip)];
-
- while ((matchIndex >= lowLimit)
- && (nbAttempts)) {
- nbAttempts--;
- if (matchIndex >= dictLimit) {
- const BYTE * const match = base + matchIndex;
-
- if (*(match + ml) == *(ip + ml)
- && (LZ4_read32(match) == LZ4_read32(ip))) {
- size_t const mlt = LZ4_count(ip + MINMATCH,
- match + MINMATCH, iLimit) + MINMATCH;
-
- if (mlt > ml) {
- ml = mlt;
- *matchpos = match;
- }
- }
- } else {
- const BYTE * const match = dictBase + matchIndex;
-
- if (LZ4_read32(match) == LZ4_read32(ip)) {
- size_t mlt;
- const BYTE *vLimit = ip
- + (dictLimit - matchIndex);
-
- if (vLimit > iLimit)
- vLimit = iLimit;
- mlt = LZ4_count(ip + MINMATCH,
- match + MINMATCH, vLimit) + MINMATCH;
- if ((ip + mlt == vLimit)
- && (vLimit < iLimit))
- mlt += LZ4_count(ip + mlt,
- base + dictLimit,
- iLimit);
- if (mlt > ml) {
- /* virtual matchpos */
- ml = mlt;
- *matchpos = base + matchIndex;
- }
- }
- }
- matchIndex -= DELTANEXTU16(matchIndex);
- }
-
- return (int)ml;
-}
-
-static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
- LZ4HC_CCtx_internal *hc4,
- const BYTE * const ip,
- const BYTE * const iLowLimit,
- const BYTE * const iHighLimit,
- int longest,
- const BYTE **matchpos,
- const BYTE **startpos,
- const int maxNbAttempts)
-{
- U16 * const chainTable = hc4->chainTable;
- U32 * const HashTable = hc4->hashTable;
- const BYTE * const base = hc4->base;
- const U32 dictLimit = hc4->dictLimit;
- const BYTE * const lowPrefixPtr = base + dictLimit;
- const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
- ? hc4->lowLimit
- : (U32)(ip - base) - (64 * KB - 1);
- const BYTE * const dictBase = hc4->dictBase;
- U32 matchIndex;
- int nbAttempts = maxNbAttempts;
- int delta = (int)(ip - iLowLimit);
-
- /* First Match */
- LZ4HC_Insert(hc4, ip);
- matchIndex = HashTable[LZ4HC_hashPtr(ip)];
-
- while ((matchIndex >= lowLimit)
- && (nbAttempts)) {
- nbAttempts--;
- if (matchIndex >= dictLimit) {
- const BYTE *matchPtr = base + matchIndex;
-
- if (*(iLowLimit + longest)
- == *(matchPtr - delta + longest)) {
- if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
- int mlt = MINMATCH + LZ4_count(
- ip + MINMATCH,
- matchPtr + MINMATCH,
- iHighLimit);
- int back = 0;
-
- while ((ip + back > iLowLimit)
- && (matchPtr + back > lowPrefixPtr)
- && (ip[back - 1] == matchPtr[back - 1]))
- back--;
-
- mlt -= back;
-
- if (mlt > longest) {
- longest = (int)mlt;
- *matchpos = matchPtr + back;
- *startpos = ip + back;
- }
- }
- }
- } else {
- const BYTE * const matchPtr = dictBase + matchIndex;
-
- if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
- size_t mlt;
- int back = 0;
- const BYTE *vLimit = ip + (dictLimit - matchIndex);
-
- if (vLimit > iHighLimit)
- vLimit = iHighLimit;
-
- mlt = LZ4_count(ip + MINMATCH,
- matchPtr + MINMATCH, vLimit) + MINMATCH;
-
- if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
- mlt += LZ4_count(ip + mlt, base + dictLimit,
- iHighLimit);
- while ((ip + back > iLowLimit)
- && (matchIndex + back > lowLimit)
- && (ip[back - 1] == matchPtr[back - 1]))
- back--;
-
- mlt -= back;
-
- if ((int)mlt > longest) {
- longest = (int)mlt;
- *matchpos = base + matchIndex + back;
- *startpos = ip + back;
- }
- }
- }
-
- matchIndex -= DELTANEXTU16(matchIndex);
- }
-
- return longest;
-}
-
-static FORCE_INLINE int LZ4HC_encodeSequence(
- const BYTE **ip,
- BYTE **op,
- const BYTE **anchor,
- int matchLength,
- const BYTE * const match,
- limitedOutput_directive limitedOutputBuffer,
- BYTE *oend)
-{
- int length;
- BYTE *token;
-
- /* Encode Literal length */
- length = (int)(*ip - *anchor);
- token = (*op)++;
-
- if ((limitedOutputBuffer)
- && ((*op + (length>>8)
- + length + (2 + 1 + LASTLITERALS)) > oend)) {
- /* Check output limit */
- return 1;
- }
- if (length >= (int)RUN_MASK) {
- int len;
-
- *token = (RUN_MASK< 254 ; len -= 255)
- *(*op)++ = 255;
- *(*op)++ = (BYTE)len;
- } else
- *token = (BYTE)(length<>8)
- + (1 + LASTLITERALS) > oend)) {
- /* Check output limit */
- return 1;
- }
-
- if (length >= (int)ML_MASK) {
- *token += ML_MASK;
- length -= ML_MASK;
-
- for (; length > 509 ; length -= 510) {
- *(*op)++ = 255;
- *(*op)++ = 255;
- }
-
- if (length > 254) {
- length -= 255;
- *(*op)++ = 255;
- }
-
- *(*op)++ = (BYTE)length;
- } else
- *token += (BYTE)(length);
-
- /* Prepare next loop */
- *ip += matchLength;
- *anchor = *ip;
-
- return 0;
-}
-
-static int LZ4HC_compress_generic(
- LZ4HC_CCtx_internal *const ctx,
- const char * const source,
- char * const dest,
- int const inputSize,
- int const maxOutputSize,
- int compressionLevel,
- limitedOutput_directive limit
- )
-{
- const BYTE *ip = (const BYTE *) source;
- const BYTE *anchor = ip;
- const BYTE * const iend = ip + inputSize;
- const BYTE * const mflimit = iend - MFLIMIT;
- const BYTE * const matchlimit = (iend - LASTLITERALS);
-
- BYTE *op = (BYTE *) dest;
- BYTE * const oend = op + maxOutputSize;
-
- unsigned int maxNbAttempts;
- int ml, ml2, ml3, ml0;
- const BYTE *ref = NULL;
- const BYTE *start2 = NULL;
- const BYTE *ref2 = NULL;
- const BYTE *start3 = NULL;
- const BYTE *ref3 = NULL;
- const BYTE *start0;
- const BYTE *ref0;
-
- /* init */
- if (compressionLevel > LZ4HC_MAX_CLEVEL)
- compressionLevel = LZ4HC_MAX_CLEVEL;
- if (compressionLevel < 1)
- compressionLevel = LZ4HC_DEFAULT_CLEVEL;
- maxNbAttempts = 1 << (compressionLevel - 1);
- ctx->end += inputSize;
-
- ip++;
-
- /* Main Loop */
- while (ip < mflimit) {
- ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
- matchlimit, (&ref), maxNbAttempts);
- if (!ml) {
- ip++;
- continue;
- }
-
- /* saved, in case we would skip too much */
- start0 = ip;
- ref0 = ref;
- ml0 = ml;
-
-_Search2:
- if (ip + ml < mflimit)
- ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
- ip + ml - 2, ip + 0,
- matchlimit, ml, &ref2,
- &start2, maxNbAttempts);
- else
- ml2 = ml;
-
- if (ml2 == ml) {
- /* No better match */
- if (LZ4HC_encodeSequence(&ip, &op,
- &anchor, ml, ref, limit, oend))
- return 0;
- continue;
- }
-
- if (start0 < ip) {
- if (start2 < ip + ml0) {
- /* empirical */
- ip = start0;
- ref = ref0;
- ml = ml0;
- }
- }
-
- /* Here, start0 == ip */
- if ((start2 - ip) < 3) {
- /* First Match too small : removed */
- ml = ml2;
- ip = start2;
- ref = ref2;
- goto _Search2;
- }
-
-_Search3:
- /*
- * Currently we have :
- * ml2 > ml1, and
- * ip1 + 3 <= ip2 (usually < ip1 + ml1)
- */
- if ((start2 - ip) < OPTIMAL_ML) {
- int correction;
- int new_ml = ml;
-
- if (new_ml > OPTIMAL_ML)
- new_ml = OPTIMAL_ML;
- if (ip + new_ml > start2 + ml2 - MINMATCH)
- new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
-
- correction = new_ml - (int)(start2 - ip);
-
- if (correction > 0) {
- start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- }
- }
- /*
- * Now, we have start2 = ip + new_ml,
- * with new_ml = min(ml, OPTIMAL_ML = 18)
- */
-
- if (start2 + ml2 < mflimit)
- ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
- start2 + ml2 - 3, start2,
- matchlimit, ml2, &ref3, &start3,
- maxNbAttempts);
- else
- ml3 = ml2;
-
- if (ml3 == ml2) {
- /* No better match : 2 sequences to encode */
- /* ip & ref are known; Now for ml */
- if (start2 < ip + ml)
- ml = (int)(start2 - ip);
- /* Now, encode 2 sequences */
- if (LZ4HC_encodeSequence(&ip, &op, &anchor,
- ml, ref, limit, oend))
- return 0;
- ip = start2;
- if (LZ4HC_encodeSequence(&ip, &op, &anchor,
- ml2, ref2, limit, oend))
- return 0;
- continue;
- }
-
- if (start3 < ip + ml + 3) {
- /* Not enough space for match 2 : remove it */
- if (start3 >= (ip + ml)) {
- /* can write Seq1 immediately
- * ==> Seq2 is removed,
- * so Seq3 becomes Seq1
- */
- if (start2 < ip + ml) {
- int correction = (int)(ip + ml - start2);
-
- start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- if (ml2 < MINMATCH) {
- start2 = start3;
- ref2 = ref3;
- ml2 = ml3;
- }
- }
-
- if (LZ4HC_encodeSequence(&ip, &op, &anchor,
- ml, ref, limit, oend))
- return 0;
- ip = start3;
- ref = ref3;
- ml = ml3;
-
- start0 = start2;
- ref0 = ref2;
- ml0 = ml2;
- goto _Search2;
- }
-
- start2 = start3;
- ref2 = ref3;
- ml2 = ml3;
- goto _Search3;
- }
-
- /*
- * OK, now we have 3 ascending matches;
- * let's write at least the first one
- * ip & ref are known; Now for ml
- */
- if (start2 < ip + ml) {
- if ((start2 - ip) < (int)ML_MASK) {
- int correction;
-
- if (ml > OPTIMAL_ML)
- ml = OPTIMAL_ML;
- if (ip + ml > start2 + ml2 - MINMATCH)
- ml = (int)(start2 - ip) + ml2 - MINMATCH;
- correction = ml - (int)(start2 - ip);
- if (correction > 0) {
- start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- }
- } else
- ml = (int)(start2 - ip);
- }
- if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
- ref, limit, oend))
- return 0;
-
- ip = start2;
- ref = ref2;
- ml = ml2;
-
- start2 = start3;
- ref2 = ref3;
- ml2 = ml3;
-
- goto _Search3;
- }
-
- /* Encode Last Literals */
- {
- int lastRun = (int)(iend - anchor);
-
- if ((limit)
- && (((char *)op - dest) + lastRun + 1
- + ((lastRun + 255 - RUN_MASK)/255)
- > (U32)maxOutputSize)) {
- /* Check output limit */
- return 0;
- }
- if (lastRun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK< 254 ; lastRun -= 255)
- *op++ = 255;
- *op++ = (BYTE) lastRun;
- } else
- *op++ = (BYTE)(lastRun<internal_donotuse;
-
- if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
- /* Error : state is not aligned
- * for pointers (32 or 64 bits)
- */
- return 0;
- }
-
- LZ4HC_init(ctx, (const BYTE *)src);
-
- if (maxDstSize < LZ4_compressBound(srcSize))
- return LZ4HC_compress_generic(ctx, src, dst,
- srcSize, maxDstSize, compressionLevel, limitedOutput);
- else
- return LZ4HC_compress_generic(ctx, src, dst,
- srcSize, maxDstSize, compressionLevel, noLimit);
-}
-
-int LZ4_compress_HC(const char *src, char *dst, int srcSize,
- int maxDstSize, int compressionLevel, void *wrkmem)
-{
- return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
- srcSize, maxDstSize, compressionLevel);
-}
-EXPORT_SYMBOL(LZ4_compress_HC);
-
-/**************************************
- * Streaming Functions
- **************************************/
-void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
-{
- LZ4_streamHCPtr->internal_donotuse.base = NULL;
- LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
-}
-
-int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
- const char *dictionary,
- int dictSize)
-{
- LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
-
- if (dictSize > 64 * KB) {
- dictionary += dictSize - 64 * KB;
- dictSize = 64 * KB;
- }
- LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
- if (dictSize >= 4)
- LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
- ctxPtr->end = (const BYTE *)dictionary + dictSize;
- return dictSize;
-}
-EXPORT_SYMBOL(LZ4_loadDictHC);
-
-/* compression */
-
-static void LZ4HC_setExternalDict(
- LZ4HC_CCtx_internal *ctxPtr,
- const BYTE *newBlock)
-{
- if (ctxPtr->end >= ctxPtr->base + 4) {
- /* Referencing remaining dictionary content */
- LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
- }
-
- /*
- * Only one memory segment for extDict,
- * so any previous extDict is lost at this stage
- */
- ctxPtr->lowLimit = ctxPtr->dictLimit;
- ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
- ctxPtr->dictBase = ctxPtr->base;
- ctxPtr->base = newBlock - ctxPtr->dictLimit;
- ctxPtr->end = newBlock;
- /* match referencing will resume from there */
- ctxPtr->nextToUpdate = ctxPtr->dictLimit;
-}
-
-static int LZ4_compressHC_continue_generic(
- LZ4_streamHC_t *LZ4_streamHCPtr,
- const char *source,
- char *dest,
- int inputSize,
- int maxOutputSize,
- limitedOutput_directive limit)
-{
- LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
-
- /* auto - init if forgotten */
- if (ctxPtr->base == NULL)
- LZ4HC_init(ctxPtr, (const BYTE *) source);
-
- /* Check overflow */
- if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
- size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
- - ctxPtr->dictLimit;
- if (dictSize > 64 * KB)
- dictSize = 64 * KB;
- LZ4_loadDictHC(LZ4_streamHCPtr,
- (const char *)(ctxPtr->end) - dictSize, (int)dictSize);
- }
-
- /* Check if blocks follow each other */
- if ((const BYTE *)source != ctxPtr->end)
- LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
-
- /* Check overlapping input/dictionary space */
- {
- const BYTE *sourceEnd = (const BYTE *) source + inputSize;
- const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
- const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
-
- if ((sourceEnd > dictBegin)
- && ((const BYTE *)source < dictEnd)) {
- if (sourceEnd > dictEnd)
- sourceEnd = dictEnd;
- ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
-
- if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
- ctxPtr->lowLimit = ctxPtr->dictLimit;
- }
- }
-
- return LZ4HC_compress_generic(ctxPtr, source, dest,
- inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
-}
-
-int LZ4_compress_HC_continue(
- LZ4_streamHC_t *LZ4_streamHCPtr,
- const char *source,
- char *dest,
- int inputSize,
- int maxOutputSize)
-{
- if (maxOutputSize < LZ4_compressBound(inputSize))
- return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
- source, dest, inputSize, maxOutputSize, limitedOutput);
- else
- return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
- source, dest, inputSize, maxOutputSize, noLimit);
-}
-EXPORT_SYMBOL(LZ4_compress_HC_continue);
-
-/* dictionary saving */
-
-int LZ4_saveDictHC(
- LZ4_streamHC_t *LZ4_streamHCPtr,
- char *safeBuffer,
- int dictSize)
-{
- LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
- int const prefixSize = (int)(streamPtr->end
- - (streamPtr->base + streamPtr->dictLimit));
-
- if (dictSize > 64 * KB)
- dictSize = 64 * KB;
- if (dictSize < 4)
- dictSize = 0;
- if (dictSize > prefixSize)
- dictSize = prefixSize;
-
- memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
-
- {
- U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
-
- streamPtr->end = (const BYTE *)safeBuffer + dictSize;
- streamPtr->base = streamPtr->end - endIndex;
- streamPtr->dictLimit = endIndex - dictSize;
- streamPtr->lowLimit = endIndex - dictSize;
-
- if (streamPtr->nextToUpdate < streamPtr->dictLimit)
- streamPtr->nextToUpdate = streamPtr->dictLimit;
- }
- return dictSize;
-}
-EXPORT_SYMBOL(LZ4_saveDictHC);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4 HC compressor");
diff --git a/fs/f2fs/lz4armv8/lz4accel.h b/lib/lz4/lz4armv8/lz4accel.h
rename from fs/f2fs/lz4armv8/lz4accel.h
rename to lib/lz4/lz4armv8/lz4accel.h
diff --git a/fs/f2fs/lz4armv8/lz4armv8.S b/lib/lz4/lz4armv8/lz4armv8.S
rename from fs/f2fs/lz4armv8/lz4armv8.S
rename to lib/lz4/lz4armv8/lz4armv8.S
index b9e6e0f7191575015669e3e954bf4959f43c7836..1d836f686dd9e2322a24a8707c6cafc8a83c6906
GIT binary patch
literal 8088
zc$~dhdvDvw5&xU~6k8hwY=e=wq$JC_IA|_buEx1`VCRaqX+TiqN+Lp$G?$VUANt)p
zv&+}=Vf*gt8iqjbynZ`7ujTMWCnqF)9J%RcHyzyJ+tt5D#AjZ-Nm9mn5Jx0VvS1TD
zx*5JKT+Vy~4+Mqz_L7R0ps+6*>oW%*f|8BTJWOZRiH>t+5+7T*cWs4K@UX
zQD+)&vmjyy-zG_%X5_D1_kjguetYxrjx2+a@$Woi8v=U_c0sfv%Qz()cOPuFn}%+l
zFy;?X={(BP?<9$XDC+_(c>a>OsXN1`y-CiAw@p(9>^|dwM|#`!eH0-{mI^)!e04g{
zTtJ+}Jjk6-_Ev+1MZU^l*KF~4n|iEeW&|ubsU
zK$}6{P<&mwLAXsBkcIr|44jvFYbRvUDqD-S>5POy#!@$g)nf5%M!Y!rUhJ{(fSE4~
zU}6yhPCxY75)M&Ot!ewyl2GZ-Nz;;q>YQa7ZyeAElrsW)bOTVpjv|r8IX^XP
z{B_r>LJv;1Ni=_xbJJ<;}J8
z;pSpa-kp=5=eO7Mt77?RY><&b%wyFlj&f=ble#xl+8$^uKSsu?fZmoz`<;>T+)$i=R^&Rmy|x09cxF08Mpu}%^FSX!
z(*Ii?K03+AWRqukIEJEx5e7$X;4+3-$ceSoHDo6V&q53O>BfMgF_8VMh&;?nOE{5S
zfb)C?MfMwuX2oePG?DM}*uf6XPc>z(Pd+0n`SE+kBs)(vwdflX;o(RMtytFR3Njud
zUu%|)LektXOVK(#4*TJ|xLlNYdE
zV;RDM;AHa1AY)MpPAJHYWDcX6lmjubP=o{4fToKM=}4ZIHd*$9ItNf=0M&;GOfF$j
z9tobvKn89cK@e}5n3{Y9+^ZmmXv<*u2^x+HL54(EdF~1mc{t6jc0`Bti9Bp;JVsBj
zd(B=5Pn1AKqX+vVi4F^a6UNc1#JJ`T9nZ>?Qw9u?W!18rB4jcmNKp^%Cb*nt%KU25
zUYfDvKD=QZCuNdp_gFF&%)*HiEZB9bgr3u?j#8uLk#q`qztG0u#Yr7OVLU6H1=fks
z59cPV^?1WmQ|BLfnt&VTj$Et^J4@6G3*&!Q
z*p}HLVt4VV6#^e&L}tD$OA0bbG?N4zdf(0s178U8?V=S?uy3hL9z&B%#X-cHw9^xP
zd8#8y=#p7Xvb7xlvBd~f;%EB>YeAT;V8J5Y4KRFL
z=hHcM^%sN!y-56a`EC{S?&>gIR6AO;6kBg5d{={K$V*J!jrpF=Y8fF7%2}bdpN`2b
zkEW*M#UMvV;9|HY<$5E(2kNwIvk?4*aY3gEb4I4p%8o
zA7D|iU|H$Hu2ZXZ9=MB+38Xy`O1od>T&r5hOQrOUl6lHY8DrgbltQFYyXR19Mf{K1
ze--rWK%>w`p$*WyndYYiHy)W30Gy86LAzq@pna7lC7e5MJGTI!G|8ri>KHJIdVMJA
zPr7MbxT&~{Wo)16gT#Uku!_h?*YE!MH|OIo*B_j_&+l(PA9Z|RItGxvH?~TCetpp`
zEm$tEujjYU)%zdkS4XmQ7zEd^oU5A;@2`$!ACcwo4Yn9*P$fxjkI_
z#l2MYuIjqcY^8_d4q4<_V|4yf$mMZZ-eby#M(0P1vpPmo*Gqsid5X2$XFYr>h~*?w
zyEn;s1of89qo^y&&9~49cR5M4P5kJ9pRq0@;<+3rk|Gaa&bKLfTW=b0BituAtY9-q
z+zfUX$=hOvZ88ww<5HF|H^bgpaPZ)f9o>GqJ2oUAO2aGOpPN@!@flJ8p~o{q5kv)t
zj*;30!Smu&wCj9GC@;%3bg&+z9q0hXy$N~o11d*Yuy2dJAxH50=iK_S;*=7jbv&z8
z4e%_+ZX=RcculB2$v=oz;@j4HRNxNy%HeTUbF*)imIWqp&uZqcWx5;r!
z@(-;jCMUfnCtdQtb5SvQ`jpAj9$mkAT`@U*#^khL`)?pACeNNXdG@607g5zxV7|mA
zHJ?%Z9D{0;zR)J6&ztS%QEj#gP`^HEy~u1An`*PG0`)7S_DjvGoT|;964bAWwu{X3
z%&N_?UT>z&`J?Emwq@l1X7Otp))b$uQ}GNBBg2}AMbWXaZw&@$K&qeUTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h
--- a/lib/zstd/compress/zstd_ldm_geartab.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_ldm_geartab.h (date 1740124241493)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,7 +12,10 @@
#ifndef ZSTD_LDM_GEARTAB_H
#define ZSTD_LDM_GEARTAB_H
-static U64 ZSTD_ldm_gearTab[256] = {
+#include "../common/compiler.h" /* UNUSED_ATTR */
+#include "../common/mem.h" /* U64 */
+
+static UNUSED_ATTR const U64 ZSTD_ldm_gearTab[256] = {
0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
Index: lib/zstd/compress/zstd_cwksp.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
--- a/lib/zstd/compress/zstd_cwksp.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_cwksp.h (date 1740124241440)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -14,8 +15,10 @@
/*-*************************************
* Dependencies
***************************************/
+#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
#include "../common/zstd_internal.h"
-
+#include "../common/portability_macros.h"
+#include "../common/compiler.h" /* ZS2_isPower2 */
/*-*************************************
* Constants
@@ -32,13 +35,18 @@
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
#endif
+
+/* Set our tables and aligneds to align by 64 bytes */
+#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
+
/*-*************************************
* Structures
***************************************/
typedef enum {
ZSTD_cwksp_alloc_objects,
- ZSTD_cwksp_alloc_buffers,
- ZSTD_cwksp_alloc_aligned
+ ZSTD_cwksp_alloc_aligned_init_once,
+ ZSTD_cwksp_alloc_aligned,
+ ZSTD_cwksp_alloc_buffers
} ZSTD_cwksp_alloc_phase_e;
/*
@@ -91,8 +99,8 @@
*
* Workspace Layout:
*
- * [ ... workspace ... ]
- * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
+ * [ ... workspace ... ]
+ * [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
*
* The various objects that live in the workspace are divided into the
* following categories, and are allocated separately:
@@ -114,10 +122,20 @@
* - Tables: these are any of several different datastructures (hash tables,
* chain tables, binary trees) that all respect a common format: they are
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
- * Their sizes depend on the cparams.
+ * Their sizes depend on the cparams. These tables are 64-byte aligned.
*
- * - Aligned: these buffers are used for various purposes that require 4 byte
- * alignment, but don't require any initialization before they're used.
+ * - Init once: these buffers require to be initialized at least once before
+ * use. They should be used when we want to skip memory initialization
+ * while not triggering memory checkers (like Valgrind) when reading from
+ * from this memory without writing to it first.
+ * These buffers should be used carefully as they might contain data
+ * from previous compressions.
+ * Buffers are aligned to 64 bytes.
+ *
+ * - Aligned: these buffers don't require any initialization before they're
+ * used. The user of the buffer should make sure they write into a buffer
+ * location before reading from it.
+ * Buffers are aligned to 64 bytes.
*
* - Buffers: these buffers are used for various purposes that don't require
* any alignment or initialization before they're used. This means they can
@@ -129,9 +147,9 @@
* correctly packed into the workspace buffer. That order is:
*
* 1. Objects
- * 2. Buffers
- * 3. Aligned
- * 4. Tables
+ * 2. Init once / Tables
+ * 3. Aligned / Tables
+ * 4. Buffers / Tables
*
* Attempts to reserve objects of different types out of order will fail.
*/
@@ -143,6 +161,7 @@
void* tableEnd;
void* tableValidEnd;
void* allocStart;
+ void* initOnceStart;
BYTE allocFailed;
int workspaceOversizedDuration;
@@ -155,6 +174,7 @@
***************************************/
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
+MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
(void)ws;
@@ -164,14 +184,16 @@
assert(ws->tableEnd <= ws->allocStart);
assert(ws->tableValidEnd <= ws->allocStart);
assert(ws->allocStart <= ws->workspaceEnd);
+ assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
+ assert(ws->workspace <= ws->initOnceStart);
}
/*
* Align must be a power of 2.
*/
-MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
+MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) {
size_t const mask = align - 1;
- assert((align & mask) == 0);
+ assert(ZSTD_isPower2(align));
return (size + mask) & ~mask;
}
@@ -184,6 +206,8 @@
* Since tables aren't currently redzoned, you don't need to call through this
* to figure out how much space you need for the matchState tables. Everything
* else is though.
+ *
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size().
*/
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
if (size == 0)
@@ -191,54 +215,68 @@
return size;
}
-MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
- ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
- assert(phase >= ws->phase);
- if (phase > ws->phase) {
- if (ws->phase < ZSTD_cwksp_alloc_buffers &&
- phase >= ZSTD_cwksp_alloc_buffers) {
- ws->tableValidEnd = ws->objectEnd;
- }
- if (ws->phase < ZSTD_cwksp_alloc_aligned &&
- phase >= ZSTD_cwksp_alloc_aligned) {
- /* If unaligned allocations down from a too-large top have left us
- * unaligned, we need to realign our alloc ptr. Technically, this
- * can consume space that is unaccounted for in the neededSpace
- * calculation. However, I believe this can only happen when the
- * workspace is too large, and specifically when it is too large
- * by a larger margin than the space that will be consumed. */
- /* TODO: cleaner, compiler warning friendly way to do this??? */
- ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
- if (ws->allocStart < ws->tableValidEnd) {
- ws->tableValidEnd = ws->allocStart;
- }
- }
- ws->phase = phase;
- }
+MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) {
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment));
+}
+
+/*
+ * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
+ * Used to determine the number of bytes required for a given "aligned".
+ */
+MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) {
+ return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES);
+}
+
+/*
+ * Returns the amount of additional space the cwksp must allocate
+ * for internal purposes (currently only alignment).
+ */
+MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
+ /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
+ * bytes to align the beginning of tables section and end of buffers;
+ */
+ size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
+ return slackSpace;
+}
+
+
+/*
+ * Return the number of additional bytes required to align a pointer to the given number of bytes.
+ * alignBytes must be a power of two.
+ */
+MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
+ size_t const alignBytesMask = alignBytes - 1;
+ size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
+ assert(ZSTD_isPower2(alignBytes));
+ assert(bytes < alignBytes);
+ return bytes;
}
/*
- * Returns whether this object/buffer/etc was allocated in this workspace.
+ * Returns the initial value for allocStart which is used to determine the position from
+ * which we can allocate from the end of the workspace.
*/
-MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
- return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
+MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws)
+{
+ char* endPtr = (char*)ws->workspaceEnd;
+ assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES));
+ endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES);
+ return (void*)endPtr;
}
/*
* Internal function. Do not use directly.
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
+ * which counts from the end of the wksp (as opposed to the object/table segment).
+ *
+ * Returns a pointer to the beginning of that space.
*/
-MEM_STATIC void* ZSTD_cwksp_reserve_internal(
- ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
- void* alloc;
- void* bottom = ws->tableEnd;
- ZSTD_cwksp_internal_advance_phase(ws, phase);
- alloc = (BYTE *)ws->allocStart - bytes;
-
- if (bytes == 0)
- return NULL;
-
-
- DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
+MEM_STATIC void*
+ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
+{
+ void* const alloc = (BYTE*)ws->allocStart - bytes;
+ void* const bottom = ws->tableEnd;
+ DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(alloc >= bottom);
@@ -247,10 +285,72 @@
ws->allocFailed = 1;
return NULL;
}
+ /* the area is reserved from the end of wksp.
+ * If it overlaps with tableValidEnd, it voids guarantees on values' range */
if (alloc < ws->tableValidEnd) {
ws->tableValidEnd = alloc;
}
ws->allocStart = alloc;
+ return alloc;
+}
+
+/*
+ * Moves the cwksp to the next phase, and does any necessary allocations.
+ * cwksp initialization must necessarily go through each phase in order.
+ * Returns a 0 on success, or zstd error
+ */
+MEM_STATIC size_t
+ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
+{
+ assert(phase >= ws->phase);
+ if (phase > ws->phase) {
+ /* Going from allocating objects to allocating initOnce / tables */
+ if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
+ phase >= ZSTD_cwksp_alloc_aligned_init_once) {
+ ws->tableValidEnd = ws->objectEnd;
+ ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
+
+ { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
+ void *const alloc = ws->objectEnd;
+ size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
+ void *const objectEnd = (BYTE *) alloc + bytesToAlign;
+ DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
+ RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
+ "table phase - alignment initial allocation failed!");
+ ws->objectEnd = objectEnd;
+ ws->tableEnd = objectEnd; /* table area starts being empty */
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ws->tableValidEnd = ws->tableEnd;
+ }
+ }
+ }
+ ws->phase = phase;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ }
+ return 0;
+}
+
+/*
+ * Returns whether this object/buffer/etc was allocated in this workspace.
+ */
+MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
+{
+ return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
+}
+
+/*
+ * Internal function. Do not use directly.
+ */
+MEM_STATIC void*
+ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
+{
+ void* alloc;
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
+ return NULL;
+ }
+
+
+ alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
return alloc;
@@ -259,33 +359,76 @@
/*
* Reserves and returns unaligned memory.
*/
-MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
+MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
+{
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
}
/*
- * Reserves and returns memory sized on and aligned on sizeof(unsigned).
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
+ * This memory has been initialized at least once in the past.
+ * This doesn't mean it has been initialized this time, and it might contain data from previous
+ * operations.
+ * The main usage is for algorithms that might need read access into uninitialized memory.
+ * The algorithm must maintain safety under these conditions and must make sure it doesn't
+ * leak any of the past data (directly or in side channels).
*/
-MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
- assert((bytes & (sizeof(U32)-1)) == 0);
- return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
+MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
+{
+ size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
+ if(ptr && ptr < ws->initOnceStart) {
+ /* We assume the memory following the current allocation is either:
+ * 1. Not usable as initOnce memory (end of workspace)
+ * 2. Another initOnce buffer that has been allocated before (and so was previously memset)
+ * 3. An ASAN redzone, in which case we don't want to write on it
+ * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
+ * Note that we assume here that MSAN and ASAN cannot run in the same time. */
+ ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
+ ws->initOnceStart = ptr;
+ }
+ return ptr;
}
/*
- * Aligned on sizeof(unsigned). These buffers have the special property that
- * their values remain constrained, allowing us to re-use them without
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes)
+{
+ void* const ptr = ZSTD_cwksp_reserve_internal(ws,
+ ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
+ ZSTD_cwksp_alloc_aligned);
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
+ return ptr;
+}
+
+/*
+ * Aligned on 64 bytes. These buffers have the special property that
+ * their values remain constrained, allowing us to reuse them without
* memset()-ing them.
*/
-MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
- const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
- void* alloc = ws->tableEnd;
- void* end = (BYTE *)alloc + bytes;
- void* top = ws->allocStart;
+MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
+{
+ const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
+ void* alloc;
+ void* end;
+ void* top;
+
+ /* We can only start allocating tables after we are done reserving space for objects at the
+ * start of the workspace */
+ if(ws->phase < phase) {
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
+ return NULL;
+ }
+ }
+ alloc = ws->tableEnd;
+ end = (BYTE *)alloc + bytes;
+ top = ws->allocStart;
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
assert((bytes & (sizeof(U32)-1)) == 0);
- ZSTD_cwksp_internal_advance_phase(ws, phase);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(end <= top);
if (end > top) {
@@ -296,27 +439,31 @@
ws->tableEnd = end;
+ assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
return alloc;
}
/*
* Aligned on sizeof(void*).
+ * Note : should happen only once, at workspace first initialization
*/
-MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
- size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
+MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
+{
+ size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
void* alloc = ws->objectEnd;
void* end = (BYTE*)alloc + roundedBytes;
- DEBUGLOG(5,
+ DEBUGLOG(4,
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
- assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
- assert((bytes & (sizeof(void*)-1)) == 0);
+ assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
+ assert(bytes % ZSTD_ALIGNOF(void*) == 0);
ZSTD_cwksp_assert_internal_consistency(ws);
/* we must be in the first phase, no advance is possible */
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
- DEBUGLOG(4, "cwksp: object alloc failed!");
+ DEBUGLOG(3, "cwksp: object alloc failed!");
ws->allocFailed = 1;
return NULL;
}
@@ -327,8 +474,23 @@
return alloc;
}
+/*
+ * with alignment control
+ * Note : should happen only once, at workspace first initialization
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment)
+{
+ size_t const mask = alignment - 1;
+ size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0;
+ void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus);
+ if (start == NULL) return NULL;
+ if (surplus == 0) return start;
+ assert(ZSTD_isPower2(alignment));
+ return (void*)(((size_t)start + surplus) & ~mask);
+}
-MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
+MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
+{
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
@@ -356,7 +518,7 @@
assert(ws->tableValidEnd >= ws->objectEnd);
assert(ws->tableValidEnd <= ws->allocStart);
if (ws->tableValidEnd < ws->tableEnd) {
- ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
+ ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
}
ZSTD_cwksp_mark_tables_clean(ws);
}
@@ -365,7 +527,8 @@
* Invalidates table allocations.
* All other allocations remain valid.
*/
-MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
+MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws)
+{
DEBUGLOG(4, "cwksp: clearing tables!");
@@ -383,14 +546,23 @@
ws->tableEnd = ws->objectEnd;
- ws->allocStart = ws->workspaceEnd;
+ ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
ws->allocFailed = 0;
- if (ws->phase > ZSTD_cwksp_alloc_buffers) {
- ws->phase = ZSTD_cwksp_alloc_buffers;
+ if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
+ ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
}
ZSTD_cwksp_assert_internal_consistency(ws);
}
+MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
+}
+
/*
* The provided workspace takes ownership of the buffer [start, start+size).
* Any existing values in the workspace are ignored (the previously managed
@@ -403,6 +575,7 @@
ws->workspaceEnd = (BYTE*)start + size;
ws->objectEnd = ws->workspace;
ws->tableValidEnd = ws->objectEnd;
+ ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
ws->phase = ZSTD_cwksp_alloc_objects;
ws->isStatic = isStatic;
ZSTD_cwksp_clear(ws);
@@ -434,15 +607,6 @@
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
}
-MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
- return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
-}
-
-MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
- return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
- + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
-}
-
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
return ws->allocFailed;
}
@@ -451,6 +615,18 @@
* Functions Checking Free Space
***************************************/
+/* ZSTD_alignmentSpaceWithinBounds() :
+ * Returns if the estimated space needed for a wksp is within an acceptable limit of the
+ * actual amount of space used.
+ */
+MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
+ /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
+ * the alignment bytes difference between estimation and actual usage */
+ return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
+ ZSTD_cwksp_used(ws) <= estimatedSpace;
+}
+
+
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
}
@@ -478,5 +654,4 @@
}
}
-
#endif /* ZSTD_CWKSP_H */
Index: lib/zstd/compress/zstd_compress_literals.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c
--- a/lib/zstd/compress/zstd_compress_literals.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_compress_literals.c (date 1740124241410)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -13,11 +14,36 @@
***************************************/
#include "zstd_compress_literals.h"
+
+/* **************************************************************
+* Debug Traces
+****************************************************************/
+#if DEBUGLEVEL >= 2
+
+static size_t showHexa(const void* src, size_t srcSize)
+{
+ const BYTE* const ip = (const BYTE*)src;
+ size_t u;
+ for (u=0; u31) + (srcSize>4095);
+ DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity);
+
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
switch(flSize)
@@ -36,16 +62,30 @@
}
ZSTD_memcpy(ostart + flSize, src, srcSize);
- DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
+ DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
return srcSize + flSize;
}
+static int allBytesIdentical(const void* src, size_t srcSize)
+{
+ assert(srcSize >= 1);
+ assert(src != NULL);
+ { const BYTE b = ((const BYTE*)src)[0];
+ size_t p;
+ for (p=1; p31) + (srcSize>4095);
- (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
+ assert(dstCapacity >= 4); (void)dstCapacity;
+ assert(allBytesIdentical(src, srcSize));
switch(flSize)
{
@@ -63,27 +103,51 @@
}
ostart[flSize] = *(const BYTE*)src;
- DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
+ DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1);
return flSize+1;
}
-size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
- ZSTD_hufCTables_t* nextHuf,
- ZSTD_strategy strategy, int disableLiteralCompression,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- void* entropyWorkspace, size_t entropyWorkspaceSize,
- const int bmi2)
+/* ZSTD_minLiteralsToCompress() :
+ * returns minimal amount of literals
+ * for literal compression to even be attempted.
+ * Minimum is made tighter as compression strategy increases.
+ */
+static size_t
+ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat)
+{
+ assert((int)strategy >= 0);
+ assert((int)strategy <= 9);
+ /* btultra2 : min 8 bytes;
+ * then 2x larger for each successive compression strategy
+ * max threshold 64 bytes */
+ { int const shift = MIN(9-(int)strategy, 3);
+ size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift;
+ DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc);
+ return mintc;
+ }
+}
+
+size_t ZSTD_compressLiterals (
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy,
+ int disableLiteralCompression,
+ int suspectUncompressible,
+ int bmi2)
{
- size_t const minGain = ZSTD_minGain(srcSize, strategy);
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
BYTE* const ostart = (BYTE*)dst;
U32 singleStream = srcSize < 256;
- symbolEncodingType_e hType = set_compressed;
+ SymbolEncodingType_e hType = set_compressed;
size_t cLitSize;
- DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
- disableLiteralCompression, (U32)srcSize);
+ DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
+ disableLiteralCompression, (U32)srcSize, dstCapacity);
+
+ DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize));
/* Prepare nextEntropy assuming reusing the existing table */
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
@@ -91,40 +155,51 @@
if (disableLiteralCompression)
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- /* small ? don't even attempt compression (speed opt) */
-# define COMPRESS_LITERALS_SIZE_MIN 63
- { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
- if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- }
+ /* if too small, don't even attempt compression (speed opt) */
+ if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode))
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
{ HUF_repeat repeat = prevHuf->repeatMode;
- int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+ int const flags = 0
+ | (bmi2 ? HUF_flags_bmi2 : 0)
+ | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0)
+ | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0)
+ | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0);
+
+ typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int);
+ huf_compress_f huf_compress;
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
- cLitSize = singleStream ?
- HUF_compress1X_repeat(
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
- HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
- HUF_compress4X_repeat(
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
- HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
+ huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat;
+ cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize,
+ src, srcSize,
+ HUF_SYMBOLVALUE_MAX, LitHufLog,
+ entropyWorkspace, entropyWorkspaceSize,
+ (HUF_CElt*)nextHuf->CTable,
+ &repeat, flags);
+ DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize);
if (repeat != HUF_repeat_none) {
/* reused the existing table */
- DEBUGLOG(5, "Reusing previous huffman table");
+ DEBUGLOG(5, "reusing statistics from previous huffman block");
hType = set_repeat;
}
}
- if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- }
+ { size_t const minGain = ZSTD_minGain(srcSize, strategy);
+ if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ } }
if (cLitSize==1) {
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
- }
+ /* A return value of 1 signals that the alphabet consists of a single symbol.
+ * However, in some rare circumstances, it could be the compressed size (a single byte).
+ * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`.
+ * (it's also necessary to not generate statistics).
+ * Therefore, in such a case, actively check that all bytes are identical. */
+ if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+ } }
if (hType == set_compressed) {
/* using a newly constructed table */
@@ -135,16 +210,19 @@
switch(lhSize)
{
case 3: /* 2 - 2 - 10 - 10 */
- { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+ if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
+ { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
MEM_writeLE24(ostart, lhc);
break;
}
case 4: /* 2 - 2 - 14 - 14 */
+ assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
{ U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
MEM_writeLE32(ostart, lhc);
break;
}
case 5: /* 2 - 2 - 18 - 18 */
+ assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
{ U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
MEM_writeLE32(ostart, lhc);
ostart[4] = (BYTE)(cLitSize >> 10);
Index: lib/zstd/compress/zstd_opt.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
--- a/lib/zstd/compress/zstd_opt.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_opt.c (date 1740124333090)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -24,42 +25,52 @@
#include "hist.h"
#include "zstd_opt.h"
+#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
-#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */
#define ZSTD_MAX_PRICE (1<<30)
-#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
+#define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
/*-*************************************
* Price functions for optimal parser
***************************************/
-#if 0 /* approximation at bit level */
+#if 0 /* approximation at bit level (for tests) */
# define BITCOST_ACCURACY 0
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
-# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat))
-#elif 0 /* fractional bit accuracy */
+# define WEIGHT(stat, opt) ((void)(opt), ZSTD_bitWeight(stat))
+#elif 0 /* fractional bit accuracy (for tests) */
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
-# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
+# define WEIGHT(stat,opt) ((void)(opt), ZSTD_fracWeight(stat))
#else /* opt==approx, ultra==accurate */
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
-# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
+# define WEIGHT(stat,opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
#endif
+/* ZSTD_bitWeight() :
+ * provide estimated "cost" of a stat in full bits only */
MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
{
return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
}
+/* ZSTD_fracWeight() :
+ * provide fractional-bit "cost" of a stat,
+ * using linear interpolation approximation */
MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
{
U32 const stat = rawStat + 1;
U32 const hb = ZSTD_highbit32(stat);
U32 const BWeight = hb * BITCOST_MULTIPLIER;
+ /* Fweight was meant for "Fractional weight"
+ * but it's effectively a value between 1 and 2
+ * using fixed point arithmetic */
U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
U32 const weight = BWeight + FWeight;
assert(hb + BITCOST_ACCURACY < 31);
@@ -70,7 +81,7 @@
/* debugging function,
* @return price in bytes as fractional value
* for debug messages only */
-MEM_STATIC double ZSTD_fCost(U32 price)
+MEM_STATIC double ZSTD_fCost(int price)
{
return (double)price / (BITCOST_MULTIPLIER*8);
}
@@ -78,7 +89,7 @@
static int ZSTD_compressedLiterals(optState_t const* const optPtr)
{
- return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
+ return optPtr->literalCompressionMode != ZSTD_ps_disable;
}
static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
@@ -91,25 +102,52 @@
}
-/* ZSTD_downscaleStat() :
- * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
- * return the resulting sum of elements */
-static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
+static U32 sum_u32(const unsigned table[], size_t nbElts)
+{
+ size_t n;
+ U32 total = 0;
+ for (n=0; n 0 && ZSTD_FREQ_DIV+malus < 31);
+ DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)",
+ (unsigned)lastEltIndex+1, (unsigned)shift );
+ assert(shift < 30);
for (s=0; s> (ZSTD_FREQ_DIV+malus));
- sum += table[s];
+ unsigned const base = base1 ? 1 : (table[s]>0);
+ unsigned const newStat = base + (table[s] >> shift);
+ sum += newStat;
+ table[s] = newStat;
}
return sum;
}
+/* ZSTD_scaleStats() :
+ * reduce all elt frequencies in table if sum too large
+ * return the resulting sum of elements */
+static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
+{
+ U32 const prevsum = sum_u32(table, lastEltIndex+1);
+ U32 const factor = prevsum >> logTarget;
+ DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
+ assert(logTarget < 30);
+ if (factor <= 1) return prevsum;
+ return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed);
+}
+
/* ZSTD_rescaleFreqs() :
* if first block (detected by optPtr->litLengthSum == 0) : init statistics
* take hints from dictionary if there is one
- * or init from zero, using src for literals stats, or flat 1 for match symbols
+ * and init from zero if there is none,
+ * using src for literals stats, and baseline stats for sequence symbols
* otherwise downscale existing stats, to be used as seed for next block.
*/
static void
@@ -121,24 +159,28 @@
DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
optPtr->priceType = zop_dynamic;
- if (optPtr->litLengthSum == 0) { /* first block : init */
- if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
- DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
+ if (optPtr->litLengthSum == 0) { /* no literals stats collected -> first block assumed -> init */
+
+ /* heuristic: use pre-defined stats for too small inputs */
+ if (srcSize <= ZSTD_PREDEF_THRESHOLD) {
+ DEBUGLOG(5, "srcSize <= %i : use predefined stats", ZSTD_PREDEF_THRESHOLD);
optPtr->priceType = zop_predef;
}
assert(optPtr->symbolCosts != NULL);
if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
- /* huffman table presumed generated by dictionary */
+
+ /* huffman stats covering the full value set : table presumed generated by dictionary */
optPtr->priceType = zop_dynamic;
if (compressedLiterals) {
+ /* generate literals statistics from huffman table */
unsigned lit;
assert(optPtr->litFreq != NULL);
optPtr->litSum = 0;
for (lit=0; lit<=MaxLit; lit++) {
U32 const scaleLog = 11; /* scale to 2K */
- U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
+ U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit);
assert(bitCost <= scaleLog);
optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
optPtr->litSum += optPtr->litFreq[lit];
@@ -180,20 +222,26 @@
optPtr->offCodeSum += optPtr->offCodeFreq[of];
} }
- } else { /* not a dictionary */
+ } else { /* first block, no dictionary */
assert(optPtr->litFreq != NULL);
if (compressedLiterals) {
+ /* base initial cost of literals on direct frequency within src */
unsigned lit = MaxLit;
HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
- optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+ optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8, base_0possible);
}
- { unsigned ll;
- for (ll=0; ll<=MaxLL; ll++)
- optPtr->litLengthFreq[ll] = 1;
+ { unsigned const baseLLfreqs[MaxLL+1] = {
+ 4, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1
+ };
+ ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
+ optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
}
- optPtr->litLengthSum = MaxLL+1;
{ unsigned ml;
for (ml=0; ml<=MaxML; ml++)
@@ -201,21 +249,25 @@
}
optPtr->matchLengthSum = MaxML+1;
- { unsigned of;
- for (of=0; of<=MaxOff; of++)
- optPtr->offCodeFreq[of] = 1;
+ { unsigned const baseOFCfreqs[MaxOff+1] = {
+ 6, 2, 1, 1, 2, 3, 4, 4,
+ 4, 3, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1
+ };
+ ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
+ optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
}
- optPtr->offCodeSum = MaxOff+1;
}
- } else { /* new block : re-use previous statistics, scaled down */
+ } else { /* new block : scale down accumulated statistics */
if (compressedLiterals)
- optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
- optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
- optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
- optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
+ optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
+ optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
+ optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
+ optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
}
ZSTD_setBasePrices(optPtr, optLevel);
@@ -228,6 +280,7 @@
const optState_t* const optPtr,
int optLevel)
{
+ DEBUGLOG(8, "ZSTD_rawLiteralsCost (%u literals)", litLength);
if (litLength == 0) return 0;
if (!ZSTD_compressedLiterals(optPtr))
@@ -237,11 +290,14 @@
return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
/* dynamic statistics */
- { U32 price = litLength * optPtr->litSumBasePrice;
+ { U32 price = optPtr->litSumBasePrice * litLength;
+ U32 const litPriceMax = optPtr->litSumBasePrice - BITCOST_MULTIPLIER;
U32 u;
+ assert(optPtr->litSumBasePrice >= BITCOST_MULTIPLIER);
for (u=0; u < litLength; u++) {
- assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
- price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+ U32 litPrice = WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+ if (UNLIKELY(litPrice > litPriceMax)) litPrice = litPriceMax;
+ price -= litPrice;
}
return price;
}
@@ -251,7 +307,17 @@
* cost of literalLength symbol */
static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
{
- if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
+ assert(litLength <= ZSTD_BLOCKSIZE_MAX);
+ if (optPtr->priceType == zop_predef)
+ return WEIGHT(litLength, optLevel);
+
+ /* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
+ * because it isn't representable in the zstd format.
+ * So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1.
+ * In such a case, the block would be all literals.
+ */
+ if (litLength == ZSTD_BLOCKSIZE_MAX)
+ return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
/* dynamic statistics */
{ U32 const llCode = ZSTD_LLcode(litLength);
@@ -262,22 +328,25 @@
}
/* ZSTD_getMatchPrice() :
- * Provides the cost of the match part (offset + matchLength) of a sequence
+ * Provides the cost of the match part (offset + matchLength) of a sequence.
* Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
- * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
+ * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq()
+ * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
+ */
FORCE_INLINE_TEMPLATE U32
-ZSTD_getMatchPrice(U32 const offset,
+ZSTD_getMatchPrice(U32 const offBase,
U32 const matchLength,
const optState_t* const optPtr,
int const optLevel)
{
U32 price;
- U32 const offCode = ZSTD_highbit32(offset+1);
+ U32 const offCode = ZSTD_highbit32(offBase);
U32 const mlBase = matchLength - MINMATCH;
assert(matchLength >= MINMATCH);
- if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
- return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
+ if (optPtr->priceType == zop_predef) /* fixed scheme, does not use statistics */
+ return WEIGHT(mlBase, optLevel)
+ + ((16 + offCode) * BITCOST_MULTIPLIER); /* emulated offset cost */
/* dynamic statistics */
price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
@@ -296,10 +365,10 @@
}
/* ZSTD_updateStats() :
- * assumption : literals + litLengtn <= iend */
+ * assumption : literals + litLength <= iend */
static void ZSTD_updateStats(optState_t* const optPtr,
U32 litLength, const BYTE* literals,
- U32 offsetCode, U32 matchLength)
+ U32 offBase, U32 matchLength)
{
/* literals */
if (ZSTD_compressedLiterals(optPtr)) {
@@ -315,8 +384,8 @@
optPtr->litLengthSum++;
}
- /* match offset code (0-2=>repCode; 3+=>offset+2) */
- { U32 const offCode = ZSTD_highbit32(offsetCode+1);
+ /* offset code : follows storeSeq() numeric representation */
+ { U32 const offCode = ZSTD_highbit32(offBase);
assert(offCode <= MaxOff);
optPtr->offCodeFreq[offCode]++;
optPtr->offCodeSum++;
@@ -350,9 +419,11 @@
/* Update hashTable3 up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
-static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
- U32* nextToUpdate3,
- const BYTE* const ip)
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_MatchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip)
{
U32* const hashTable3 = ms->hashTable3;
U32 const hashLog3 = ms->hashLog3;
@@ -376,11 +447,15 @@
* Binary Tree search
***************************************/
/* ZSTD_insertBt1() : add one or multiple positions to tree.
- * ip : assumed <= iend-8 .
+ * @param ip assumed <= iend-8 .
+ * @param target The target of ZSTD_updateTree_internal() - we are filling to this position
* @return : nb of positions added */
-static U32 ZSTD_insertBt1(
- ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_insertBt1(
+ const ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
+ U32 const target,
U32 const mls, const int extDict)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -403,7 +478,10 @@
U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = smallerPtr + 1;
U32 dummy32; /* to be nullified at the end */
- U32 const windowLow = ms->window.lowLimit;
+ /* windowLow is based on target because
+ * we only need positions that will be in the window at the end of the tree update.
+ */
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
U32 matchEndIdx = curr+8+1;
size_t bestLength = 8;
U32 nbCompares = 1U << cParams->searchLog;
@@ -416,6 +494,7 @@
DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
+ assert(curr <= target);
assert(ip <= iend-8); /* required for h calculation */
hashTable[h] = curr; /* Update Hash Table */
@@ -492,19 +571,20 @@
}
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_updateTree_internal(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
const U32 mls, const ZSTD_dictMode_e dictMode)
{
const BYTE* const base = ms->window.base;
U32 const target = (U32)(ip - base);
U32 idx = ms->nextToUpdate;
- DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
+ DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
idx, target, dictMode);
while(idx < target) {
- U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
+ U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict);
assert(idx < (U32)(idx + forward));
idx += forward;
}
@@ -513,20 +593,23 @@
ms->nextToUpdate = target;
}
-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend) {
ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
}
FORCE_INLINE_TEMPLATE
-U32 ZSTD_insertBtAndGetAllMatches (
- ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
- ZSTD_matchState_t* ms,
- U32* nextToUpdate3,
- const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
- const U32 rep[ZSTD_REP_NUM],
- U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
- const U32 lengthToBeat,
- U32 const mls /* template */)
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32
+ZSTD_insertBtAndGetAllMatches (
+ ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
+ ZSTD_MatchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip, const BYTE* const iLimit,
+ const ZSTD_dictMode_e dictMode,
+ const U32 rep[ZSTD_REP_NUM],
+ const U32 ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
+ const U32 lengthToBeat,
+ const U32 mls /* template */)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
@@ -555,7 +638,7 @@
U32 mnum = 0;
U32 nbCompares = 1U << cParams->searchLog;
- const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+ const ZSTD_MatchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
const ZSTD_compressionParameters* const dmsCParams =
dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
@@ -594,13 +677,13 @@
assert(curr >= windowLow);
if ( dictMode == ZSTD_extDict
&& ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
- & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
+ & (ZSTD_index_overlap_check(dictLimit, repIndex)) )
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
}
if (dictMode == ZSTD_dictMatchState
&& ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
- & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (ZSTD_index_overlap_check(dictLimit, repIndex)) )
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
} }
@@ -609,7 +692,7 @@
DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
repCode, ll0, repOffset, repLen);
bestLength = repLen;
- matches[mnum].off = repCode - ll0;
+ matches[mnum].off = REPCODE_TO_OFFBASE(repCode - ll0 + 1); /* expect value between 1 and 3 */
matches[mnum].len = (U32)repLen;
mnum++;
if ( (repLen > sufficient_len)
@@ -638,7 +721,7 @@
bestLength = mlen;
assert(curr > matchIndex3);
assert(mnum==0); /* no prior solution */
- matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE;
+ matches[0].off = OFFSET_TO_OFFBASE(curr - matchIndex3);
matches[0].len = (U32)mlen;
mnum = 1;
if ( (mlen > sufficient_len) |
@@ -647,7 +730,7 @@
return 1;
} } }
/* no dictMatchState lookup: dicts don't have a populated HC3 table */
- }
+ } /* if (mls == 3) */
hashTable[h] = curr; /* Update Hash Table */
@@ -671,21 +754,20 @@
}
if (matchLength > bestLength) {
- DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
- (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
+ DEBUGLOG(8, "found match of length %u at distance %u (offBase=%u)",
+ (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex));
assert(matchEndIdx > matchIndex);
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
bestLength = matchLength;
- matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
+ matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex);
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
| (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
break; /* drop, to preserve bt consistency (miss a little bit of compression) */
- }
- }
+ } }
if (match[matchLength] < ip[matchLength]) {
/* match smaller than current */
@@ -720,19 +802,18 @@
if (matchLength > bestLength) {
matchIndex = dictMatchIndex + dmsIndexDelta;
- DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
- (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
+ DEBUGLOG(8, "found dms match of length %u at distance %u (offBase=%u)",
+ (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex));
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
bestLength = matchLength;
- matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
+ matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex);
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
| (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
break; /* drop, to guarantee consistency (miss a little bit of compression) */
- }
- }
+ } }
if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
if (match[matchLength] < ip[matchLength]) {
@@ -742,39 +823,93 @@
/* match is larger than current */
commonLengthLarger = matchLength;
dictMatchIndex = nextPtr[0];
- }
- }
- }
+ } } } /* if (dictMode == ZSTD_dictMatchState) */
assert(matchEndIdx > curr+8);
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
return mnum;
}
+typedef U32 (*ZSTD_getAllMatchesFn)(
+ ZSTD_match_t*,
+ ZSTD_MatchState_t*,
+ U32*,
+ const BYTE*,
+ const BYTE*,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0,
+ U32 const lengthToBeat);
-FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
- ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */
- ZSTD_matchState_t* ms,
- U32* nextToUpdate3,
- const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
- const U32 rep[ZSTD_REP_NUM],
- U32 const ll0,
- U32 const lengthToBeat)
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_btGetAllMatches_internal(
+ ZSTD_match_t* matches,
+ ZSTD_MatchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* ip,
+ const BYTE* const iHighLimit,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0,
+ U32 const lengthToBeat,
+ const ZSTD_dictMode_e dictMode,
+ const U32 mls)
{
- const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32 const matchLengthSearch = cParams->minMatch;
- DEBUGLOG(8, "ZSTD_BtGetAllMatches");
- if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
- ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
- switch(matchLengthSearch)
- {
- case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
- default :
- case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
- case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
- case 7 :
- case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
+ assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
+ DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
+ if (ip < ms->window.base + ms->nextToUpdate)
+ return 0; /* skipped area */
+ ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode);
+ return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls);
+}
+
+#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
+
+#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
+ static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
+ ZSTD_match_t* matches, \
+ ZSTD_MatchState_t* ms, \
+ U32* nextToUpdate3, \
+ const BYTE* ip, \
+ const BYTE* const iHighLimit, \
+ const U32 rep[ZSTD_REP_NUM], \
+ U32 const ll0, \
+ U32 const lengthToBeat) \
+ { \
+ return ZSTD_btGetAllMatches_internal( \
+ matches, ms, nextToUpdate3, ip, iHighLimit, \
+ rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
+ }
+
+#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)
+
+GEN_ZSTD_BT_GET_ALL_MATCHES(noDict)
+GEN_ZSTD_BT_GET_ALL_MATCHES(extDict)
+GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
+
+#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
+ { \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
}
+
+static ZSTD_getAllMatchesFn
+ZSTD_selectBtGetAllMatches(ZSTD_MatchState_t const* ms, ZSTD_dictMode_e const dictMode)
+{
+ ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict),
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState)
+ };
+ U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6);
+ assert((U32)dictMode < 3);
+ assert(mls - 3 < 4);
+ return getAllMatchesFns[(int)dictMode][mls - 3];
}
/* ***********************
@@ -783,16 +918,18 @@
/* Struct containing info needed to make decision about ldm inclusion */
typedef struct {
- rawSeqStore_t seqStore; /* External match candidates store for this block */
- U32 startPosInBlock; /* Start position of the current match candidate */
- U32 endPosInBlock; /* End position of the current match candidate */
- U32 offset; /* Offset of the match candidate */
+ RawSeqStore_t seqStore; /* External match candidates store for this block */
+ U32 startPosInBlock; /* Start position of the current match candidate */
+ U32 endPosInBlock; /* End position of the current match candidate */
+ U32 offset; /* Offset of the match candidate */
} ZSTD_optLdm_t;
/* ZSTD_optLdm_skipRawSeqStoreBytes():
- * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'.
+ * Moves forward in @rawSeqStore by @nbBytes,
+ * which will update the fields 'pos' and 'posInSequence'.
*/
-static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes)
+{
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
@@ -813,8 +950,10 @@
* Calculates the beginning and end of the next match in the current block.
* Updates 'pos' and 'posInSequence' of the ldmSeqStore.
*/
-static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
- U32 blockBytesRemaining) {
+static void
+ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
+ U32 blockBytesRemaining)
+{
rawSeq currSeq;
U32 currBlockEndPos;
U32 literalsBytesRemaining;
@@ -826,8 +965,8 @@
optLdm->endPosInBlock = UINT_MAX;
return;
}
- /* Calculate appropriate bytes left in matchLength and litLength after adjusting
- based on ldmSeqStore->posInSequence */
+ /* Calculate appropriate bytes left in matchLength and litLength
+ * after adjusting based on ldmSeqStore->posInSequence */
currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
currBlockEndPos = currPosInBlock + blockBytesRemaining;
@@ -846,7 +985,7 @@
return;
}
- /* Matches may be < MINMATCH by this process. In that case, we will reject them
+ /* Matches may be < minMatch by this process. In that case, we will reject them
when we are deciding whether or not to add the ldm */
optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
@@ -863,28 +1002,31 @@
}
/* ZSTD_optLdm_maybeAddMatch():
- * Adds a match if it's long enough, based on it's 'matchStartPosInBlock'
- * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches'
+ * Adds a match if it's long enough,
+ * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock',
+ * into 'matches'. Maintains the correct ordering of 'matches'.
*/
static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
- ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
- U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
- /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
- U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
- U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
+ const ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
+ U32 minMatch)
+{
+ U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
+ /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */
+ U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
/* Ensure that current block position is not outside of the match */
if (currPosInBlock < optLdm->startPosInBlock
|| currPosInBlock >= optLdm->endPosInBlock
- || candidateMatchLength < MINMATCH) {
+ || candidateMatchLength < minMatch) {
return;
}
if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
- DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
- candidateOffCode, candidateMatchLength, currPosInBlock);
+ U32 const candidateOffBase = OFFSET_TO_OFFBASE(optLdm->offset);
+ DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offBase: %u matchLength %u) at block position=%u",
+ candidateOffBase, candidateMatchLength, currPosInBlock);
matches[*nbMatches].len = candidateMatchLength;
- matches[*nbMatches].off = candidateOffCode;
+ matches[*nbMatches].off = candidateOffBase;
(*nbMatches)++;
}
}
@@ -892,8 +1034,12 @@
/* ZSTD_optLdm_processMatchCandidate():
* Wrapper function to update ldm seq store and call ldm functions as necessary.
*/
-static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches,
- U32 currPosInBlock, U32 remainingBytes) {
+static void
+ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
+ ZSTD_match_t* matches, U32* nbMatches,
+ U32 currPosInBlock, U32 remainingBytes,
+ U32 minMatch)
+{
if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
return;
}
@@ -904,24 +1050,19 @@
* at the end of a match from the ldm seq store, and will often be some bytes
* over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
*/
- U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock;
+ U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock;
ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
}
ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
}
- ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
+ ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock, minMatch);
}
+
/*-*******************************
* Optimal parser
*********************************/
-
-static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
-{
- return sol.litlen + sol.mlen;
-}
-
#if 0 /* debug */
static void
@@ -939,9 +1080,15 @@
#endif
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
- seqStore_t* seqStore,
+#define LIT_PRICE(_p) (int)ZSTD_rawLiteralsCost(_p, 1, optStatePtr, optLevel)
+#define LL_PRICE(_l) (int)ZSTD_litLengthPrice(_l, optStatePtr, optLevel)
+#define LL_INCPRICE(_l) (LL_PRICE(_l) - LL_PRICE(_l-1))
+
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t
+ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms,
+ SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const int optLevel,
@@ -957,15 +1104,19 @@
const BYTE* const prefixStart = base + ms->window.dictLimit;
const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode);
+
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
U32 nextToUpdate3 = ms->nextToUpdate;
ZSTD_optimal_t* const opt = optStatePtr->priceTable;
ZSTD_match_t* const matches = optStatePtr->matchTable;
- ZSTD_optimal_t lastSequence;
+ ZSTD_optimal_t lastStretch;
ZSTD_optLdm_t optLdm;
+ ZSTD_memset(&lastStretch, 0, sizeof(ZSTD_optimal_t));
+
optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
@@ -984,104 +1135,142 @@
/* find first match */
{ U32 const litlen = (U32)(ip - anchor);
U32 const ll0 = !litlen;
- U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
- (U32)(ip-istart), (U32)(iend - ip));
- if (!nbMatches) { ip++; continue; }
+ (U32)(ip-istart), (U32)(iend-ip),
+ minMatch);
+ if (!nbMatches) {
+ DEBUGLOG(8, "no match found at cPos %u", (unsigned)(ip-istart));
+ ip++;
+ continue;
+ }
+
+ /* Match found: let's store this solution, and eventually find more candidates.
+ * During this forward pass, @opt is used to store stretches,
+ * defined as "a match followed by N literals".
+ * Note how this is different from a Sequence, which is "N literals followed by a match".
+ * Storing stretches allows us to store different match predecessors
+ * for each literal position part of a literals run. */
/* initialize opt[0] */
- { U32 i ; for (i=0; i immediate encoding */
{ U32 const maxML = matches[nbMatches-1].len;
- U32 const maxOffset = matches[nbMatches-1].off;
- DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
- nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
+ U32 const maxOffBase = matches[nbMatches-1].off;
+ DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffBase=%u at cPos=%u => start new series",
+ nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart));
if (maxML > sufficient_len) {
- lastSequence.litlen = litlen;
- lastSequence.mlen = maxML;
- lastSequence.off = maxOffset;
- DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+ lastStretch.litlen = 0;
+ lastStretch.mlen = maxML;
+ lastStretch.off = maxOffBase;
+ DEBUGLOG(6, "large match (%u>%u) => immediate encoding",
maxML, sufficient_len);
cur = 0;
- last_pos = ZSTD_totalLen(lastSequence);
+ last_pos = maxML;
goto _shortestPath;
} }
/* set prices for first matches starting position == 0 */
- { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
- U32 pos;
+ assert(opt[0].price >= 0);
+ { U32 pos;
U32 matchNb;
for (pos = 1; pos < minMatch; pos++) {
- opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
+ opt[pos].price = ZSTD_MAX_PRICE;
+ opt[pos].mlen = 0;
+ opt[pos].litlen = litlen + pos;
}
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
- U32 const offset = matches[matchNb].off;
+ U32 const offBase = matches[matchNb].off;
U32 const end = matches[matchNb].len;
for ( ; pos <= end ; pos++ ) {
- U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
- U32 const sequencePrice = literalsPrice + matchPrice;
+ int const matchPrice = (int)ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel);
+ int const sequencePrice = opt[0].price + matchPrice;
DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
pos, ZSTD_fCost(sequencePrice));
opt[pos].mlen = pos;
- opt[pos].off = offset;
- opt[pos].litlen = litlen;
- opt[pos].price = sequencePrice;
- } }
+ opt[pos].off = offBase;
+ opt[pos].litlen = 0; /* end of match */
+ opt[pos].price = sequencePrice + LL_PRICE(0);
+ }
+ }
last_pos = pos-1;
+ opt[pos].price = ZSTD_MAX_PRICE;
}
}
/* check further positions */
for (cur = 1; cur <= last_pos; cur++) {
const BYTE* const inr = ip + cur;
- assert(cur < ZSTD_OPT_NUM);
- DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
+ assert(cur <= ZSTD_OPT_NUM);
+ DEBUGLOG(7, "cPos:%i==rPos:%u", (int)(inr-istart), cur);
/* Fix current position with one literal if cheaper */
- { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
+ { U32 const litlen = opt[cur-1].litlen + 1;
int const price = opt[cur-1].price
- + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
- + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
- - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
+ + LIT_PRICE(ip+cur-1)
+ + LL_INCPRICE(litlen);
assert(price < 1000000000); /* overflow check */
if (price <= opt[cur].price) {
- DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
- inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+ ZSTD_optimal_t const prevMatch = opt[cur];
+ DEBUGLOG(7, "cPos:%i==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+ (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
- opt[cur].mlen = 0;
- opt[cur].off = 0;
+ opt[cur] = opt[cur-1];
opt[cur].litlen = litlen;
opt[cur].price = price;
+ if ( (optLevel >= 1) /* additional check only for higher modes */
+ && (prevMatch.litlen == 0) /* replace a match */
+ && (LL_INCPRICE(1) < 0) /* ll1 is cheaper than ll0 */
+ && LIKELY(ip + cur < iend)
+ ) {
+ /* check next position, in case it would be cheaper */
+ int with1literal = prevMatch.price + LIT_PRICE(ip+cur) + LL_INCPRICE(1);
+ int withMoreLiterals = price + LIT_PRICE(ip+cur) + LL_INCPRICE(litlen+1);
+ DEBUGLOG(7, "then at next rPos %u : match+1lit %.2f vs %ulits %.2f",
+ cur+1, ZSTD_fCost(with1literal), litlen+1, ZSTD_fCost(withMoreLiterals));
+ if ( (with1literal < withMoreLiterals)
+ && (with1literal < opt[cur+1].price) ) {
+ /* update offset history - before it disappears */
+ U32 const prev = cur - prevMatch.mlen;
+ Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0);
+ assert(cur >= prevMatch.mlen);
+ DEBUGLOG(7, "==> match+1lit is cheaper (%.2f < %.2f) (hist:%u,%u,%u) !",
+ ZSTD_fCost(with1literal), ZSTD_fCost(withMoreLiterals),
+ newReps.rep[0], newReps.rep[1], newReps.rep[2] );
+ opt[cur+1] = prevMatch; /* mlen & offbase */
+ ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(Repcodes_t));
+ opt[cur+1].litlen = 1;
+ opt[cur+1].price = with1literal;
+ if (last_pos < cur+1) last_pos = cur+1;
+ }
+ }
} else {
- DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
- inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
- opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
+ DEBUGLOG(7, "cPos:%i==rPos:%u : literal would cost more (%.2f>%.2f)",
+ (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price));
}
}
- /* Set the repcodes of the current position. We must do it here
- * because we rely on the repcodes of the 2nd to last sequence being
- * correct to set the next chunks repcodes during the backward
- * traversal.
+ /* Offset history is not updated during match comparison.
+ * Do it here, now that the match is selected and confirmed.
*/
- ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
+ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(Repcodes_t));
assert(cur >= opt[cur].mlen);
- if (opt[cur].mlen != 0) {
+ if (opt[cur].litlen == 0) {
+ /* just finished a match => alter offset history */
U32 const prev = cur - opt[cur].mlen;
- repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
- ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
- } else {
- ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
+ Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0);
+ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(Repcodes_t));
}
/* last match must start at a minimum distance of 8 from oend */
@@ -1091,37 +1280,37 @@
if ( (optLevel==0) /*static_test*/
&& (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
- DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
+ DEBUGLOG(7, "skip current position : next rPos(%u) price is cheaper", cur+1);
continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
}
- { U32 const ll0 = (opt[cur].mlen != 0);
- U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
- U32 const previousPrice = opt[cur].price;
- U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
- U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
+ assert(opt[cur].price >= 0);
+ { U32 const ll0 = (opt[cur].litlen == 0);
+ int const previousPrice = opt[cur].price;
+ int const basePrice = previousPrice + LL_PRICE(0);
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
U32 matchNb;
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
- (U32)(inr-istart), (U32)(iend-inr));
+ (U32)(inr-istart), (U32)(iend-inr),
+ minMatch);
if (!nbMatches) {
DEBUGLOG(7, "rPos:%u : no match found", cur);
continue;
}
- { U32 const maxML = matches[nbMatches-1].len;
- DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
- inr-istart, cur, nbMatches, maxML);
+ { U32 const longestML = matches[nbMatches-1].len;
+ DEBUGLOG(7, "cPos:%i==rPos:%u, found %u matches, of longest ML=%u",
+ (int)(inr-istart), cur, nbMatches, longestML);
- if ( (maxML > sufficient_len)
- || (cur + maxML >= ZSTD_OPT_NUM) ) {
- lastSequence.mlen = maxML;
- lastSequence.off = matches[nbMatches-1].off;
- lastSequence.litlen = litlen;
- cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
- last_pos = cur + ZSTD_totalLen(lastSequence);
- if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
+ if ( (longestML > sufficient_len)
+ || (cur + longestML >= ZSTD_OPT_NUM)
+ || (ip + cur + longestML >= iend) ) {
+ lastStretch.mlen = longestML;
+ lastStretch.off = matches[nbMatches-1].off;
+ lastStretch.litlen = 0;
+ last_pos = cur + longestML;
goto _shortestPath;
} }
@@ -1132,20 +1321,25 @@
U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
U32 mlen;
- DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
- matchNb, matches[matchNb].off, lastML, litlen);
+ DEBUGLOG(7, "testing match %u => offBase=%4u, mlen=%2u, llen=%2u",
+ matchNb, matches[matchNb].off, lastML, opt[cur].litlen);
for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
U32 const pos = cur + mlen;
- int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
+ int const price = basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
if ((pos > last_pos) || (price < opt[pos].price)) {
DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
- while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
+ while (last_pos < pos) {
+ /* fill empty positions, for future comparisons */
+ last_pos++;
+ opt[last_pos].price = ZSTD_MAX_PRICE;
+ opt[last_pos].litlen = !0; /* just needs to be != 0, to mean "not an end of match" */
+ }
opt[pos].mlen = mlen;
opt[pos].off = offset;
- opt[pos].litlen = litlen;
+ opt[pos].litlen = 0;
opt[pos].price = price;
} else {
DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
@@ -1153,55 +1347,89 @@
if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
}
} } }
+ opt[last_pos+1].price = ZSTD_MAX_PRICE;
} /* for (cur = 1; cur <= last_pos; cur++) */
- lastSequence = opt[last_pos];
- cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
- assert(cur < ZSTD_OPT_NUM); /* control overflow*/
+ lastStretch = opt[last_pos];
+ assert(cur >= lastStretch.mlen);
+ cur = last_pos - lastStretch.mlen;
_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
assert(opt[0].mlen == 0);
+ assert(last_pos >= lastStretch.mlen);
+ assert(cur == last_pos - lastStretch.mlen);
- /* Set the next chunk's repcodes based on the repcodes of the beginning
- * of the last match, and the last sequence. This avoids us having to
- * update them while traversing the sequences.
- */
- if (lastSequence.mlen != 0) {
- repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
- ZSTD_memcpy(rep, &reps, sizeof(reps));
+ if (lastStretch.mlen==0) {
+ /* no solution : all matches have been converted into literals */
+ assert(lastStretch.litlen == (ip - anchor) + last_pos);
+ ip += last_pos;
+ continue;
+ }
+ assert(lastStretch.off > 0);
+
+ /* Update offset history */
+ if (lastStretch.litlen == 0) {
+ /* finishing on a match : update offset history */
+ Repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0);
+ ZSTD_memcpy(rep, &reps, sizeof(Repcodes_t));
} else {
- ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
+ ZSTD_memcpy(rep, lastStretch.rep, sizeof(Repcodes_t));
+ assert(cur >= lastStretch.litlen);
+ cur -= lastStretch.litlen;
}
- { U32 const storeEnd = cur + 1;
+ /* Let's write the shortest path solution.
+ * It is stored in @opt in reverse order,
+ * starting from @storeEnd (==cur+2),
+ * effectively partially @opt overwriting.
+ * Content is changed too:
+ * - So far, @opt stored stretches, aka a match followed by literals
+ * - Now, it will store sequences, aka literals followed by a match
+ */
+ { U32 const storeEnd = cur + 2;
U32 storeStart = storeEnd;
- U32 seqPos = cur;
+ U32 stretchPos = cur;
DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
last_pos, cur); (void)last_pos;
- assert(storeEnd < ZSTD_OPT_NUM);
- DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
- storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
- opt[storeEnd] = lastSequence;
- while (seqPos > 0) {
- U32 const backDist = ZSTD_totalLen(opt[seqPos]);
+ assert(storeEnd < ZSTD_OPT_SIZE);
+ DEBUGLOG(6, "last stretch copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ storeEnd, lastStretch.litlen, lastStretch.mlen, lastStretch.off);
+ if (lastStretch.litlen > 0) {
+ /* last "sequence" is unfinished: just a bunch of literals */
+ opt[storeEnd].litlen = lastStretch.litlen;
+ opt[storeEnd].mlen = 0;
+ storeStart = storeEnd-1;
+ opt[storeStart] = lastStretch;
+ } {
+ opt[storeEnd] = lastStretch; /* note: litlen will be fixed */
+ storeStart = storeEnd;
+ }
+ while (1) {
+ ZSTD_optimal_t nextStretch = opt[stretchPos];
+ opt[storeStart].litlen = nextStretch.litlen;
+ DEBUGLOG(6, "selected sequence (llen=%u,mlen=%u,ofc=%u)",
+ opt[storeStart].litlen, opt[storeStart].mlen, opt[storeStart].off);
+ if (nextStretch.mlen == 0) {
+ /* reaching beginning of segment */
+ break;
+ }
storeStart--;
- DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
- seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
- opt[storeStart] = opt[seqPos];
- seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
+ opt[storeStart] = nextStretch; /* note: litlen will be fixed */
+ assert(nextStretch.litlen + nextStretch.mlen <= stretchPos);
+ stretchPos -= nextStretch.litlen + nextStretch.mlen;
}
/* save sequences */
- DEBUGLOG(6, "sending selected sequences into seqStore")
+ DEBUGLOG(6, "sending selected sequences into seqStore");
{ U32 storePos;
for (storePos=storeStart; storePos <= storeEnd; storePos++) {
U32 const llen = opt[storePos].litlen;
U32 const mlen = opt[storePos].mlen;
- U32 const offCode = opt[storePos].off;
+ U32 const offBase = opt[storePos].off;
U32 const advance = llen + mlen;
- DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
- anchor - istart, (unsigned)llen, (unsigned)mlen);
+ DEBUGLOG(6, "considering seq starting at %i, llen=%u, mlen=%u",
+ (int)(anchor - istart), (unsigned)llen, (unsigned)mlen);
if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
assert(storePos == storeEnd); /* must be last sequence */
@@ -1210,11 +1438,14 @@
}
assert(anchor + llen <= iend);
- ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
- ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
+ ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen);
+ ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen);
anchor += advance;
ip = anchor;
} }
+ DEBUGLOG(7, "new offset history : %u, %u, %u", rep[0], rep[1], rep[2]);
+
+ /* update all costs */
ZSTD_setBasePrices(optStatePtr, optLevel);
}
} /* while (ip < ilimit) */
@@ -1222,50 +1453,51 @@
/* Return the last literals size */
return (size_t)(iend - anchor);
}
+#endif /* build exclusions */
+#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
+static size_t ZSTD_compressBlock_opt0(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
+}
+#endif
+#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
+static size_t ZSTD_compressBlock_opt2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
+}
+#endif
+
+#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btopt(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock_btopt");
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
}
+#endif
-/* used in 2-pass strategy */
-static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
-{
- U32 s, sum=0;
- assert(ZSTD_FREQ_DIV+bonus >= 0);
- for (s=0; slitSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
- optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
- optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
- optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
-}
+#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
/* ZSTD_initStats_ultra():
* make a first compression pass, just to seed stats with more accurate starting values.
* only works on first block, with no dictionary and no ldm.
- * this function cannot error, hence its contract must be respected.
+ * this function cannot error out, its narrow contract must be respected.
*/
-static void
-ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
- seqStore_t* seqStore,
- U32 rep[ZSTD_REP_NUM],
- const void* src, size_t srcSize)
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms,
+ SeqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
{
U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
@@ -1276,38 +1508,36 @@
assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
- ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/
+ ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
- /* invalidate first scan from history */
+ /* invalidate first scan from history, only keep entropy stats */
ZSTD_resetSeqStore(seqStore);
ms->window.base -= srcSize;
ms->window.dictLimit += (U32)srcSize;
ms->window.lowLimit = ms->window.dictLimit;
ms->nextToUpdate = ms->window.dictLimit;
- /* re-inforce weight of collected statistics */
- ZSTD_upscaleStats(&ms->opt);
}
size_t ZSTD_compressBlock_btultra(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
}
size_t ZSTD_compressBlock_btultra2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
U32 const curr = (U32)((const BYTE*)src - ms->window.base);
DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
- /* 2-pass strategy:
+ /* 2-passes strategy:
* this strategy makes a first pass over first block to collect statistics
- * and seed next round's statistics with it.
- * After 1st pass, function forgets everything, and starts a new block.
+ * in order to seed next round's statistics with it.
+ * After 1st pass, function forgets history, and starts a new block.
* Consequently, this can only work if no data has been previously loaded in tables,
* aka, no dictionary, no prefix, no ldm preprocessing.
* The compression ratio gain is generally small (~0.5% on first block),
@@ -1316,42 +1546,47 @@
if ( (ms->opt.litLengthSum==0) /* first block */
&& (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
&& (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
- && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
- && (srcSize > ZSTD_PREDEF_THRESHOLD)
+ && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
+ && (srcSize > ZSTD_PREDEF_THRESHOLD) /* input large enough to not employ default stats */
) {
ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
}
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
}
+#endif
+#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btopt_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
}
+#endif
+#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btultra_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- const void* src, size_t srcSize)
-{
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
-}
-
-size_t ZSTD_compressBlock_btopt_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_btultra_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
}
+#endif
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
Index: lib/zstd/compress/zstd_ldm.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
--- a/lib/zstd/compress/zstd_ldm.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_ldm.h (date 1740124241487)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,7 +12,6 @@
#ifndef ZSTD_LDM_H
#define ZSTD_LDM_H
-
#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
#include /* ZSTD_CCtx, size_t */
@@ -40,7 +40,7 @@
* sequences.
*/
size_t ZSTD_ldm_generateSequences(
- ldmState_t* ldms, rawSeqStore_t* sequences,
+ ldmState_t* ldms, RawSeqStore_t* sequences,
ldmParams_t const* params, void const* src, size_t srcSize);
/*
@@ -61,8 +61,9 @@
* two. We handle that case correctly, and update `rawSeqStore` appropriately.
* NOTE: This function does not return any errors.
*/
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_ParamSwitch_e useRowMatchFinder,
void const* src, size_t srcSize);
/*
@@ -72,7 +73,7 @@
* Avoids emitting matches less than `minMatch` bytes.
* Must be called for data that is not passed to ZSTD_ldm_blockCompress().
*/
-void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize,
U32 const minMatch);
/* ZSTD_ldm_skipRawSeqStoreBytes():
@@ -80,7 +81,7 @@
* Not to be used in conjunction with ZSTD_ldm_skipSequences().
* Must be called for data with is not passed to ZSTD_ldm_blockCompress().
*/
-void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
+void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes);
/* ZSTD_ldm_getTableSize() :
* Estimate the space needed for long distance matching tables or 0 if LDM is
@@ -106,5 +107,4 @@
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
ZSTD_compressionParameters const* cParams);
-
#endif /* ZSTD_FAST_H */
Index: lib/zstd/compress/zstd_compress_superblock.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
--- a/lib/zstd/compress/zstd_compress_superblock.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_compress_superblock.c (date 1740124241431)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -15,289 +16,10 @@
#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
#include "hist.h" /* HIST_countFast_wksp */
-#include "zstd_compress_internal.h"
+#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
#include "zstd_compress_sequences.h"
#include "zstd_compress_literals.h"
-/*-*************************************
-* Superblock entropy buffer structs
-***************************************/
-/* ZSTD_hufCTablesMetadata_t :
- * Stores Literals Block Type for a super-block in hType, and
- * huffman tree description in hufDesBuffer.
- * hufDesSize refers to the size of huffman tree description in bytes.
- * This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
-typedef struct {
- symbolEncodingType_e hType;
- BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
- size_t hufDesSize;
-} ZSTD_hufCTablesMetadata_t;
-
-/* ZSTD_fseCTablesMetadata_t :
- * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
- * fse tables in fseTablesBuffer.
- * fseTablesSize refers to the size of fse tables in bytes.
- * This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */
-typedef struct {
- symbolEncodingType_e llType;
- symbolEncodingType_e ofType;
- symbolEncodingType_e mlType;
- BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
- size_t fseTablesSize;
- size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
-} ZSTD_fseCTablesMetadata_t;
-
-typedef struct {
- ZSTD_hufCTablesMetadata_t hufMetadata;
- ZSTD_fseCTablesMetadata_t fseMetadata;
-} ZSTD_entropyCTablesMetadata_t;
-
-
-/* ZSTD_buildSuperBlockEntropy_literal() :
- * Builds entropy for the super-block literals.
- * Stores literals block type (raw, rle, compressed, repeat) and
- * huffman description table to hufMetadata.
- * @return : size of huffman description table or error code */
-static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,
- const ZSTD_hufCTables_t* prevHuf,
- ZSTD_hufCTables_t* nextHuf,
- ZSTD_hufCTablesMetadata_t* hufMetadata,
- const int disableLiteralsCompression,
- void* workspace, size_t wkspSize)
-{
- BYTE* const wkspStart = (BYTE*)workspace;
- BYTE* const wkspEnd = wkspStart + wkspSize;
- BYTE* const countWkspStart = wkspStart;
- unsigned* const countWksp = (unsigned*)workspace;
- const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
- BYTE* const nodeWksp = countWkspStart + countWkspSize;
- const size_t nodeWkspSize = wkspEnd-nodeWksp;
- unsigned maxSymbolValue = 255;
- unsigned huffLog = HUF_TABLELOG_DEFAULT;
- HUF_repeat repeat = prevHuf->repeatMode;
-
- DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize);
-
- /* Prepare nextEntropy assuming reusing the existing table */
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
-
- if (disableLiteralsCompression) {
- DEBUGLOG(5, "set_basic - disabled");
- hufMetadata->hType = set_basic;
- return 0;
- }
-
- /* small ? don't even attempt compression (speed opt) */
-# define COMPRESS_LITERALS_SIZE_MIN 63
- { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
- if (srcSize <= minLitSize) {
- DEBUGLOG(5, "set_basic - too small");
- hufMetadata->hType = set_basic;
- return 0;
- }
- }
-
- /* Scan input and build symbol stats */
- { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
- FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
- if (largest == srcSize) {
- DEBUGLOG(5, "set_rle");
- hufMetadata->hType = set_rle;
- return 0;
- }
- if (largest <= (srcSize >> 7)+4) {
- DEBUGLOG(5, "set_basic - no gain");
- hufMetadata->hType = set_basic;
- return 0;
- }
- }
-
- /* Validate the previous Huffman table */
- if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
- repeat = HUF_repeat_none;
- }
-
- /* Build Huffman Tree */
- ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
- huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
- { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
- maxSymbolValue, huffLog,
- nodeWksp, nodeWkspSize);
- FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
- huffLog = (U32)maxBits;
- { /* Build and write the CTable */
- size_t const newCSize = HUF_estimateCompressedSize(
- (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
- size_t const hSize = HUF_writeCTable_wksp(
- hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
- (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
- nodeWksp, nodeWkspSize);
- /* Check against repeating the previous CTable */
- if (repeat != HUF_repeat_none) {
- size_t const oldCSize = HUF_estimateCompressedSize(
- (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
- if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
- DEBUGLOG(5, "set_repeat - smaller");
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- hufMetadata->hType = set_repeat;
- return 0;
- }
- }
- if (newCSize + hSize >= srcSize) {
- DEBUGLOG(5, "set_basic - no gains");
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- hufMetadata->hType = set_basic;
- return 0;
- }
- DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
- hufMetadata->hType = set_compressed;
- nextHuf->repeatMode = HUF_repeat_check;
- return hSize;
- }
- }
-}
-
-/* ZSTD_buildSuperBlockEntropy_sequences() :
- * Builds entropy for the super-block sequences.
- * Stores symbol compression modes and fse table to fseMetadata.
- * @return : size of fse tables or error code */
-static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,
- const ZSTD_fseCTables_t* prevEntropy,
- ZSTD_fseCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- ZSTD_fseCTablesMetadata_t* fseMetadata,
- void* workspace, size_t wkspSize)
-{
- BYTE* const wkspStart = (BYTE*)workspace;
- BYTE* const wkspEnd = wkspStart + wkspSize;
- BYTE* const countWkspStart = wkspStart;
- unsigned* const countWksp = (unsigned*)workspace;
- const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);
- BYTE* const cTableWksp = countWkspStart + countWkspSize;
- const size_t cTableWkspSize = wkspEnd-cTableWksp;
- ZSTD_strategy const strategy = cctxParams->cParams.strategy;
- FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
- FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
- FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
- const BYTE* const ofCodeTable = seqStorePtr->ofCode;
- const BYTE* const llCodeTable = seqStorePtr->llCode;
- const BYTE* const mlCodeTable = seqStorePtr->mlCode;
- size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
- BYTE* const ostart = fseMetadata->fseTablesBuffer;
- BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
- BYTE* op = ostart;
-
- assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));
- DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq);
- ZSTD_memset(workspace, 0, wkspSize);
-
- fseMetadata->lastCountSize = 0;
- /* convert length/distances into codes */
- ZSTD_seqToCodes(seqStorePtr);
- /* build CTable for Literal Lengths */
- { U32 LLtype;
- unsigned max = MaxLL;
- size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
- DEBUGLOG(5, "Building LL table");
- nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
- LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
- countWksp, max, mostFrequent, nbSeq,
- LLFSELog, prevEntropy->litlengthCTable,
- LL_defaultNorm, LL_defaultNormLog,
- ZSTD_defaultAllowed, strategy);
- assert(set_basic < set_compressed && set_rle < set_compressed);
- assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
- countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
- prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
- cTableWksp, cTableWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
- if (LLtype == set_compressed)
- fseMetadata->lastCountSize = countSize;
- op += countSize;
- fseMetadata->llType = (symbolEncodingType_e) LLtype;
- } }
- /* build CTable for Offsets */
- { U32 Offtype;
- unsigned max = MaxOff;
- size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
- /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
- ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
- DEBUGLOG(5, "Building OF table");
- nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
- Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
- countWksp, max, mostFrequent, nbSeq,
- OffFSELog, prevEntropy->offcodeCTable,
- OF_defaultNorm, OF_defaultNormLog,
- defaultPolicy, strategy);
- assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
- countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
- prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
- cTableWksp, cTableWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
- if (Offtype == set_compressed)
- fseMetadata->lastCountSize = countSize;
- op += countSize;
- fseMetadata->ofType = (symbolEncodingType_e) Offtype;
- } }
- /* build CTable for MatchLengths */
- { U32 MLtype;
- unsigned max = MaxML;
- size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
- DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
- nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
- MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
- countWksp, max, mostFrequent, nbSeq,
- MLFSELog, prevEntropy->matchlengthCTable,
- ML_defaultNorm, ML_defaultNormLog,
- ZSTD_defaultAllowed, strategy);
- assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
- countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
- prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
- cTableWksp, cTableWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
- if (MLtype == set_compressed)
- fseMetadata->lastCountSize = countSize;
- op += countSize;
- fseMetadata->mlType = (symbolEncodingType_e) MLtype;
- } }
- assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));
- return op-ostart;
-}
-
-
-/* ZSTD_buildSuperBlockEntropy() :
- * Builds entropy for the super-block.
- * @return : 0 on success or error code */
-static size_t
-ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
- const ZSTD_entropyCTables_t* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- ZSTD_entropyCTablesMetadata_t* entropyMetadata,
- void* workspace, size_t wkspSize)
-{
- size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
- DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy");
- entropyMetadata->hufMetadata.hufDesSize =
- ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,
- &prevEntropy->huf, &nextEntropy->huf,
- &entropyMetadata->hufMetadata,
- ZSTD_disableLiteralsCompression(cctxParams),
- workspace, wkspSize);
- FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed");
- entropyMetadata->fseMetadata.fseTablesSize =
- ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,
- &prevEntropy->fse, &nextEntropy->fse,
- cctxParams,
- &entropyMetadata->fseMetadata,
- workspace, wkspSize);
- FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed");
- return 0;
-}
-
/* ZSTD_compressSubBlock_literal() :
* Compresses literals section for a sub-block.
* When we have to write the Huffman table we will sometimes choose a header
@@ -315,13 +37,14 @@
* If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
* and the following sub-blocks' literals sections will be Treeless_Literals_Block.
* @return : compressed size of literals section of a sub-block
- * Or 0 if it unable to compress.
+ * Or 0 if unable to compress.
* Or error code */
-static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
- const ZSTD_hufCTablesMetadata_t* hufMetadata,
- const BYTE* literals, size_t litSize,
- void* dst, size_t dstSize,
- const int bmi2, int writeEntropy, int* entropyWritten)
+static size_t
+ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const BYTE* literals, size_t litSize,
+ void* dst, size_t dstSize,
+ const int bmi2, int writeEntropy, int* entropyWritten)
{
size_t const header = writeEntropy ? 200 : 0;
size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
@@ -329,11 +52,9 @@
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart + lhSize;
U32 const singleStream = lhSize == 3;
- symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
+ SymbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
size_t cLitSize = 0;
- (void)bmi2; /* TODO bmi2... */
-
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
*entropyWritten = 0;
@@ -355,9 +76,9 @@
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
}
- /* TODO bmi2 */
- { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
- : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
+ { int const flags = bmi2 ? HUF_flags_bmi2 : 0;
+ const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags)
+ : HUF_compress4X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags);
op += cSize;
cLitSize += cSize;
if (cSize == 0 || ERR_isError(cSize)) {
@@ -382,7 +103,7 @@
switch(lhSize)
{
case 3: /* 2 - 2 - 10 - 10 */
- { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
+ { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
MEM_writeLE24(ostart, lhc);
break;
}
@@ -402,27 +123,30 @@
}
*entropyWritten = 1;
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
- return op-ostart;
+ return (size_t)(op-ostart);
}
-static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
- const seqDef* const sstart = sequences;
- const seqDef* const send = sequences + nbSeq;
- const seqDef* sp = sstart;
+static size_t
+ZSTD_seqDecompressedSize(SeqStore_t const* seqStore,
+ const SeqDef* sequences, size_t nbSeqs,
+ size_t litSize, int lastSubBlock)
+{
size_t matchLengthSum = 0;
size_t litLengthSum = 0;
- /* Only used by assert(), suppress unused variable warnings in production. */
- (void)litLengthSum;
- while (send-sp > 0) {
- ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
+ size_t n;
+ for (n=0; ncParams.windowLog > STREAM_ACCUMULATOR_MIN;
BYTE* const ostart = (BYTE*)dst;
@@ -456,14 +181,14 @@
/* Sequences Header */
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
dstSize_tooSmall, "");
- if (nbSeq < 0x7F)
+ if (nbSeq < 128)
*op++ = (BYTE)nbSeq;
else if (nbSeq < LONGNBSEQ)
op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
else
op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
if (nbSeq==0) {
- return op - ostart;
+ return (size_t)(op - ostart);
}
/* seqHead : flags for FSE encoding type */
@@ -485,7 +210,7 @@
}
{ size_t const bitstreamSize = ZSTD_encodeSequences(
- op, oend - op,
+ op, (size_t)(oend - op),
fseTables->matchlengthCTable, mlCode,
fseTables->offcodeCTable, ofCode,
fseTables->litlengthCTable, llCode,
@@ -529,7 +254,7 @@
#endif
*entropyWritten = 1;
- return op - ostart;
+ return (size_t)(op - ostart);
}
/* ZSTD_compressSubBlock() :
@@ -538,7 +263,7 @@
* Or 0 if it failed to compress. */
static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
- const seqDef* sequences, size_t nbSeq,
+ const SeqDef* sequences, size_t nbSeq,
const BYTE* literals, size_t litSize,
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
const ZSTD_CCtx_params* cctxParams,
@@ -555,7 +280,8 @@
litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
{ size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
&entropyMetadata->hufMetadata, literals, litSize,
- op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
+ op, (size_t)(oend-op),
+ bmi2, writeLitEntropy, litEntropyWritten);
FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
if (cLitSize == 0) return 0;
op += cLitSize;
@@ -565,18 +291,18 @@
sequences, nbSeq,
llCode, mlCode, ofCode,
cctxParams,
- op, oend-op,
+ op, (size_t)(oend-op),
bmi2, writeSeqEntropy, seqEntropyWritten);
FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
if (cSeqSize == 0) return 0;
op += cSeqSize;
}
/* Write block header */
- { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
+ { size_t cSize = (size_t)(op-ostart) - ZSTD_blockHeaderSize;
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(ostart, cBlockHeader24);
}
- return op-ostart;
+ return (size_t)(op-ostart);
}
static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
@@ -602,10 +328,10 @@
return 0;
}
-static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
+static size_t ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e type,
const BYTE* codeTable, unsigned maxCode,
size_t nbSeq, const FSE_CTable* fseCTable,
- const U32* additionalBits,
+ const U8* additionalBits,
short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
void* workspace, size_t wkspSize)
{
@@ -646,8 +372,9 @@
void* workspace, size_t wkspSize,
int writeEntropy)
{
- size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
+ size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
size_t cSeqSizeEstimate = 0;
+ if (nbSeq == 0) return sequencesSectionHeaderSize;
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
nbSeq, fseTables->offcodeCTable, NULL,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
@@ -664,7 +391,11 @@
return cSeqSizeEstimate + sequencesSectionHeaderSize;
}
-static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
+typedef struct {
+ size_t estLitSize;
+ size_t estBlockSize;
+} EstimatedBlockSize;
+static EstimatedBlockSize ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
@@ -672,15 +403,17 @@
const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize,
- int writeLitEntropy, int writeSeqEntropy) {
- size_t cSizeEstimate = 0;
- cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
- &entropy->huf, &entropyMetadata->hufMetadata,
- workspace, wkspSize, writeLitEntropy);
- cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ int writeLitEntropy, int writeSeqEntropy)
+{
+ EstimatedBlockSize ebs;
+ ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize,
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
+ ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
workspace, wkspSize, writeSeqEntropy);
- return cSizeEstimate + ZSTD_blockHeaderSize;
+ ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize;
+ return ebs;
}
static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
@@ -694,14 +427,57 @@
return 0;
}
+static size_t countLiterals(SeqStore_t const* seqStore, const SeqDef* sp, size_t seqCount)
+{
+ size_t n, total = 0;
+ assert(sp != NULL);
+ for (n=0; n %zu bytes", seqCount, (const void*)sp, total);
+ return total;
+}
+
+#define BYTESCALE 256
+
+static size_t sizeBlockSequences(const SeqDef* sp, size_t nbSeqs,
+ size_t targetBudget, size_t avgLitCost, size_t avgSeqCost,
+ int firstSubBlock)
+{
+ size_t n, budget = 0, inSize=0;
+ /* entropy headers */
+ size_t const headerSize = (size_t)firstSubBlock * 120 * BYTESCALE; /* generous estimate */
+ assert(firstSubBlock==0 || firstSubBlock==1);
+ budget += headerSize;
+
+ /* first sequence => at least one sequence*/
+ budget += sp[0].litLength * avgLitCost + avgSeqCost;
+ if (budget > targetBudget) return 1;
+ inSize = sp[0].litLength + (sp[0].mlBase+MINMATCH);
+
+ /* loop over sequences */
+ for (n=1; n targetBudget)
+ /* though continue to expand until the sub-block is deemed compressible */
+ && (budget < inSize * BYTESCALE) )
+ break;
+ }
+
+ return n;
+}
+
/* ZSTD_compressSubBlock_multi() :
* Breaks super-block into multiple sub-blocks and compresses them.
- * Entropy will be written to the first block.
- * The following blocks will use repeat mode to compress.
- * All sub-blocks are compressed blocks (no raw or rle blocks).
- * @return : compressed size of the super block (which is multiple ZSTD blocks)
- * Or 0 if it failed to compress. */
-static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
+ * Entropy will be written into the first block.
+ * The following blocks use repeat_mode to compress.
+ * Sub-blocks are all compressed, except the last one when beneficial.
+ * @return : compressed size of the super block (which features multiple ZSTD blocks)
+ * or 0 if it failed to compress. */
+static size_t ZSTD_compressSubBlock_multi(const SeqStore_t* seqStorePtr,
const ZSTD_compressedBlockState_t* prevCBlock,
ZSTD_compressedBlockState_t* nextCBlock,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
@@ -711,12 +487,14 @@
const int bmi2, U32 lastBlock,
void* workspace, size_t wkspSize)
{
- const seqDef* const sstart = seqStorePtr->sequencesStart;
- const seqDef* const send = seqStorePtr->sequences;
- const seqDef* sp = sstart;
+ const SeqDef* const sstart = seqStorePtr->sequencesStart;
+ const SeqDef* const send = seqStorePtr->sequences;
+ const SeqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */
+ size_t const nbSeqs = (size_t)(send - sstart);
const BYTE* const lstart = seqStorePtr->litStart;
const BYTE* const lend = seqStorePtr->lit;
const BYTE* lp = lstart;
+ size_t const nbLiterals = (size_t)(lend - lstart);
BYTE const* ip = (BYTE const*)src;
BYTE const* const iend = ip + srcSize;
BYTE* const ostart = (BYTE*)dst;
@@ -725,120 +503,179 @@
const BYTE* llCodePtr = seqStorePtr->llCode;
const BYTE* mlCodePtr = seqStorePtr->mlCode;
const BYTE* ofCodePtr = seqStorePtr->ofCode;
- size_t targetCBlockSize = cctxParams->targetCBlockSize;
- size_t litSize, seqCount;
- int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
+ size_t const minTarget = ZSTD_TARGETCBLOCKSIZE_MIN; /* enforce minimum size, to reduce undesirable side effects */
+ size_t const targetCBlockSize = MAX(minTarget, cctxParams->targetCBlockSize);
+ int writeLitEntropy = (entropyMetadata->hufMetadata.hType == set_compressed);
int writeSeqEntropy = 1;
- int lastSequence = 0;
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
- (unsigned)(lend-lp), (unsigned)(send-sstart));
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi (srcSize=%u, litSize=%u, nbSeq=%u)",
+ (unsigned)srcSize, (unsigned)(lend-lstart), (unsigned)(send-sstart));
- litSize = 0;
- seqCount = 0;
- do {
- size_t cBlockSizeEstimate = 0;
- if (sstart == send) {
- lastSequence = 1;
- } else {
- const seqDef* const sequence = sp + seqCount;
- lastSequence = sequence == send - 1;
- litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
- seqCount++;
- }
- if (lastSequence) {
- assert(lp <= lend);
- assert(litSize <= (size_t)(lend - lp));
- litSize = (size_t)(lend - lp);
- }
- /* I think there is an optimization opportunity here.
- * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
- * since it recalculates estimate from scratch.
- * For example, it would recount literal distribution and symbol codes everytime.
- */
- cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
- &nextCBlock->entropy, entropyMetadata,
- workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
- if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
- int litEntropyWritten = 0;
- int seqEntropyWritten = 0;
- const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
- const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
- sp, seqCount,
- lp, litSize,
- llCodePtr, mlCodePtr, ofCodePtr,
- cctxParams,
- op, oend-op,
- bmi2, writeLitEntropy, writeSeqEntropy,
- &litEntropyWritten, &seqEntropyWritten,
- lastBlock && lastSequence);
- FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
- if (cSize > 0 && cSize < decompressedSize) {
- DEBUGLOG(5, "Committed the sub-block");
- assert(ip + decompressedSize <= iend);
- ip += decompressedSize;
- sp += seqCount;
- lp += litSize;
- op += cSize;
- llCodePtr += seqCount;
- mlCodePtr += seqCount;
- ofCodePtr += seqCount;
- litSize = 0;
- seqCount = 0;
- /* Entropy only needs to be written once */
- if (litEntropyWritten) {
- writeLitEntropy = 0;
- }
- if (seqEntropyWritten) {
- writeSeqEntropy = 0;
- }
- }
- }
- } while (!lastSequence);
+ /* let's start by a general estimation for the full block */
+ if (nbSeqs > 0) {
+ EstimatedBlockSize const ebs =
+ ZSTD_estimateSubBlockSize(lp, nbLiterals,
+ ofCodePtr, llCodePtr, mlCodePtr, nbSeqs,
+ &nextCBlock->entropy, entropyMetadata,
+ workspace, wkspSize,
+ writeLitEntropy, writeSeqEntropy);
+ /* quick estimation */
+ size_t const avgLitCost = nbLiterals ? (ebs.estLitSize * BYTESCALE) / nbLiterals : BYTESCALE;
+ size_t const avgSeqCost = ((ebs.estBlockSize - ebs.estLitSize) * BYTESCALE) / nbSeqs;
+ const size_t nbSubBlocks = MAX((ebs.estBlockSize + (targetCBlockSize/2)) / targetCBlockSize, 1);
+ size_t n, avgBlockBudget, blockBudgetSupp=0;
+ avgBlockBudget = (ebs.estBlockSize * BYTESCALE) / nbSubBlocks;
+ DEBUGLOG(5, "estimated fullblock size=%u bytes ; avgLitCost=%.2f ; avgSeqCost=%.2f ; targetCBlockSize=%u, nbSubBlocks=%u ; avgBlockBudget=%.0f bytes",
+ (unsigned)ebs.estBlockSize, (double)avgLitCost/BYTESCALE, (double)avgSeqCost/BYTESCALE,
+ (unsigned)targetCBlockSize, (unsigned)nbSubBlocks, (double)avgBlockBudget/BYTESCALE);
+ /* simplification: if estimates states that the full superblock doesn't compress, just bail out immediately
+ * this will result in the production of a single uncompressed block covering @srcSize.*/
+ if (ebs.estBlockSize > srcSize) return 0;
+
+ /* compress and write sub-blocks */
+ assert(nbSubBlocks>0);
+ for (n=0; n < nbSubBlocks-1; n++) {
+ /* determine nb of sequences for current sub-block + nbLiterals from next sequence */
+ size_t const seqCount = sizeBlockSequences(sp, (size_t)(send-sp),
+ avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n==0);
+ /* if reached last sequence : break to last sub-block (simplification) */
+ assert(seqCount <= (size_t)(send-sp));
+ if (sp + seqCount == send) break;
+ assert(seqCount > 0);
+ /* compress sub-block */
+ { int litEntropyWritten = 0;
+ int seqEntropyWritten = 0;
+ size_t litSize = countLiterals(seqStorePtr, sp, seqCount);
+ const size_t decompressedSize =
+ ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0);
+ size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
+ sp, seqCount,
+ lp, litSize,
+ llCodePtr, mlCodePtr, ofCodePtr,
+ cctxParams,
+ op, (size_t)(oend-op),
+ bmi2, writeLitEntropy, writeSeqEntropy,
+ &litEntropyWritten, &seqEntropyWritten,
+ 0);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
+
+ /* check compressibility, update state components */
+ if (cSize > 0 && cSize < decompressedSize) {
+ DEBUGLOG(5, "Committed sub-block compressing %u bytes => %u bytes",
+ (unsigned)decompressedSize, (unsigned)cSize);
+ assert(ip + decompressedSize <= iend);
+ ip += decompressedSize;
+ lp += litSize;
+ op += cSize;
+ llCodePtr += seqCount;
+ mlCodePtr += seqCount;
+ ofCodePtr += seqCount;
+ /* Entropy only needs to be written once */
+ if (litEntropyWritten) {
+ writeLitEntropy = 0;
+ }
+ if (seqEntropyWritten) {
+ writeSeqEntropy = 0;
+ }
+ sp += seqCount;
+ blockBudgetSupp = 0;
+ } }
+ /* otherwise : do not compress yet, coalesce current sub-block with following one */
+ }
+ } /* if (nbSeqs > 0) */
+
+ /* write last block */
+ DEBUGLOG(5, "Generate last sub-block: %u sequences remaining", (unsigned)(send - sp));
+ { int litEntropyWritten = 0;
+ int seqEntropyWritten = 0;
+ size_t litSize = (size_t)(lend - lp);
+ size_t seqCount = (size_t)(send - sp);
+ const size_t decompressedSize =
+ ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1);
+ size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
+ sp, seqCount,
+ lp, litSize,
+ llCodePtr, mlCodePtr, ofCodePtr,
+ cctxParams,
+ op, (size_t)(oend-op),
+ bmi2, writeLitEntropy, writeSeqEntropy,
+ &litEntropyWritten, &seqEntropyWritten,
+ lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
+
+ /* update pointers, the nb of literals borrowed from next sequence must be preserved */
+ if (cSize > 0 && cSize < decompressedSize) {
+ DEBUGLOG(5, "Last sub-block compressed %u bytes => %u bytes",
+ (unsigned)decompressedSize, (unsigned)cSize);
+ assert(ip + decompressedSize <= iend);
+ ip += decompressedSize;
+ lp += litSize;
+ op += cSize;
+ llCodePtr += seqCount;
+ mlCodePtr += seqCount;
+ ofCodePtr += seqCount;
+ /* Entropy only needs to be written once */
+ if (litEntropyWritten) {
+ writeLitEntropy = 0;
+ }
+ if (seqEntropyWritten) {
+ writeSeqEntropy = 0;
+ }
+ sp += seqCount;
+ }
+ }
+
+
if (writeLitEntropy) {
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
+ DEBUGLOG(5, "Literal entropy tables were never written");
ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
}
if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
/* If we haven't written our entropy tables, then we've violated our contract and
* must emit an uncompressed block.
*/
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
+ DEBUGLOG(5, "Sequence entropy tables were never written => cancel, emit an uncompressed block");
return 0;
}
+
if (ip < iend) {
- size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
+ /* some data left : last part of the block sent uncompressed */
+ size_t const rSize = (size_t)((iend - ip));
+ size_t const cSize = ZSTD_noCompressBlock(op, (size_t)(oend - op), ip, rSize, lastBlock);
+ DEBUGLOG(5, "Generate last uncompressed sub-block of %u bytes", (unsigned)(rSize));
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
assert(cSize != 0);
op += cSize;
/* We have to regenerate the repcodes because we've skipped some sequences */
if (sp < send) {
- seqDef const* seq;
- repcodes_t rep;
+ const SeqDef* seq;
+ Repcodes_t rep;
ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
for (seq = sstart; seq < sp; ++seq) {
- rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
+ ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
}
ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
}
}
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
- return op-ostart;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed all subBlocks: total compressed size = %u",
+ (unsigned)(op-ostart));
+ return (size_t)(op-ostart);
}
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
- void const* src, size_t srcSize,
- unsigned lastBlock) {
+ const void* src, size_t srcSize,
+ unsigned lastBlock)
+{
ZSTD_entropyCTablesMetadata_t entropyMetadata;
- FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore,
+ FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
&zc->blockState.prevCBlock->entropy,
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
&entropyMetadata,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */), "");
return ZSTD_compressSubBlock_multi(&zc->seqStore,
zc->blockState.prevCBlock,
@@ -848,5 +685,5 @@
dst, dstCapacity,
src, srcSize,
zc->bmi2, lastBlock,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */);
}
Index: lib/zstd/compress/zstd_double_fast.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
--- a/lib/zstd/compress/zstd_double_fast.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_double_fast.h (date 1740124241451)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,22 +12,32 @@
#ifndef ZSTD_DOUBLE_FAST_H
#define ZSTD_DOUBLE_FAST_H
-
#include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
- void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+
+void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp);
+
size_t ZSTD_compressBlock_doubleFast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST ZSTD_compressBlock_doubleFast
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE ZSTD_compressBlock_doubleFast_dictMatchState
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT ZSTD_compressBlock_doubleFast_extDict
+#else
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST NULL
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT NULL
+#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */
#endif /* ZSTD_DOUBLE_FAST_H */
Index: lib/zstd/compress/hist.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/hist.h b/lib/zstd/compress/hist.h
--- a/lib/zstd/compress/hist.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/hist.h (date 1740124241365)
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* hist : Histogram functions
* part of Finite State Entropy project
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -73,3 +74,10 @@
*/
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
const void* src, size_t srcSize);
+
+/*! HIST_add() :
+ * Lowest level: just add nb of occurrences of characters from @src into @count.
+ * @count is not reset. @count array is presumed large enough (i.e. 1 KB).
+ @ This function does not need any additional stack memory.
+ */
+void HIST_add(unsigned* count, const void* src, size_t srcSize);
Index: lib/zstd/compress/zstd_lazy.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
--- a/lib/zstd/compress/zstd_lazy.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_lazy.h (date 1740124241473)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,7 +12,6 @@
#ifndef ZSTD_LAZY_H
#define ZSTD_LAZY_H
-
#include "zstd_compress_internal.h"
/*
@@ -22,60 +22,173 @@
*/
#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
+#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
-void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
+#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip);
+void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip);
+
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip);
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+#endif
-size_t ZSTD_compressBlock_btlazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
-size_t ZSTD_compressBlock_btlazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#define ZSTD_COMPRESSBLOCK_GREEDY ZSTD_compressBlock_greedy
+#define ZSTD_COMPRESSBLOCK_GREEDY_ROW ZSTD_compressBlock_greedy_row
+#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE ZSTD_compressBlock_greedy_dictMatchState
+#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW ZSTD_compressBlock_greedy_dictMatchState_row
+#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH ZSTD_compressBlock_greedy_dedicatedDictSearch
+#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_greedy_dedicatedDictSearch_row
+#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT ZSTD_compressBlock_greedy_extDict
+#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW ZSTD_compressBlock_greedy_extDict_row
+#else
+#define ZSTD_COMPRESSBLOCK_GREEDY NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_ROW NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW NULL
+#endif
+
+#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
-size_t ZSTD_compressBlock_greedy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+#define ZSTD_COMPRESSBLOCK_LAZY ZSTD_compressBlock_lazy
+#define ZSTD_COMPRESSBLOCK_LAZY_ROW ZSTD_compressBlock_lazy_row
+#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE ZSTD_compressBlock_lazy_dictMatchState
+#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy_dictMatchState_row
+#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy_dedicatedDictSearch
+#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy_dedicatedDictSearch_row
+#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT ZSTD_compressBlock_lazy_extDict
+#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW ZSTD_compressBlock_lazy_extDict_row
+#else
+#define ZSTD_COMPRESSBLOCK_LAZY NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW NULL
+#endif
+
+#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+#define ZSTD_COMPRESSBLOCK_LAZY2 ZSTD_compressBlock_lazy2
+#define ZSTD_COMPRESSBLOCK_LAZY2_ROW ZSTD_compressBlock_lazy2_row
+#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE ZSTD_compressBlock_lazy2_dictMatchState
+#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy2_dictMatchState_row
+#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy2_dedicatedDictSearch
+#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy2_dedicatedDictSearch_row
+#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT ZSTD_compressBlock_lazy2_extDict
+#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW ZSTD_compressBlock_lazy2_extDict_row
+#else
+#define ZSTD_COMPRESSBLOCK_LAZY2 NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW NULL
+#endif
+
+#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btlazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
+#define ZSTD_COMPRESSBLOCK_BTLAZY2 ZSTD_compressBlock_btlazy2
+#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE ZSTD_compressBlock_btlazy2_dictMatchState
+#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT ZSTD_compressBlock_btlazy2_extDict
+#else
+#define ZSTD_COMPRESSBLOCK_BTLAZY2 NULL
+#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT NULL
+#endif
#endif /* ZSTD_LAZY_H */
Index: lib/zstd/decompress/zstd_decompress_block.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
--- a/lib/zstd/decompress/zstd_decompress_block.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/decompress/zstd_decompress_block.c (date 1740124333279)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -20,12 +21,12 @@
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/zstd_internal.h"
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h"
+#include "../common/bits.h" /* ZSTD_highbit32 */
/*_*******************************************************
* Macros
@@ -40,7 +41,7 @@
#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
#endif
-#define DSLAB
+
/*_*******************************************************
* Memory operations
**********************************************************/
@@ -51,6 +52,13 @@
* Block decoding
***************************************************************/
+static size_t ZSTD_blockSizeMax(ZSTD_DCtx const* dctx)
+{
+ size_t const blockSizeMax = dctx->isFrameDecompression ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX;
+ assert(blockSizeMax <= ZSTD_BLOCKSIZE_MAX);
+ return blockSizeMax;
+}
+
/*! ZSTD_getcBlockSize() :
* Provides the size of compressed block from block header `src` */
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
@@ -69,21 +77,71 @@
}
}
+/* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */
+static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize,
+ const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately)
+{
+ size_t const blockSizeMax = ZSTD_blockSizeMax(dctx);
+ assert(litSize <= blockSizeMax);
+ assert(dctx->isFrameDecompression || streaming == not_streaming);
+ assert(expectedWriteSize <= blockSizeMax);
+ if (streaming == not_streaming && dstCapacity > blockSizeMax + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) {
+ /* If we aren't streaming, we can just put the literals after the output
+ * of the current block. We don't need to worry about overwriting the
+ * extDict of our window, because it doesn't exist.
+ * So if we have space after the end of the block, just put it there.
+ */
+ dctx->litBuffer = (BYTE*)dst + blockSizeMax + WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd = dctx->litBuffer + litSize;
+ dctx->litBufferLocation = ZSTD_in_dst;
+ } else if (litSize <= ZSTD_LITBUFFEREXTRASIZE) {
+ /* Literals fit entirely within the extra buffer, put them there to avoid
+ * having to split the literals.
+ */
+ dctx->litBuffer = dctx->litExtraBuffer;
+ dctx->litBufferEnd = dctx->litBuffer + litSize;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ } else {
+ assert(blockSizeMax > ZSTD_LITBUFFEREXTRASIZE);
+ /* Literals must be split between the output block and the extra lit
+ * buffer. We fill the extra lit buffer with the tail of the literals,
+ * and put the rest of the literals at the end of the block, with
+ * WILDCOPY_OVERLENGTH of buffer room to allow for overreads.
+ * This MUST not write more than our maxBlockSize beyond dst, because in
+ * streaming mode, that could overwrite part of our extDict window.
+ */
+ if (splitImmediately) {
+ /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
+ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE;
+ } else {
+ /* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */
+ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize;
+ dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize;
+ }
+ dctx->litBufferLocation = ZSTD_split;
+ assert(dctx->litBufferEnd <= (BYTE*)dst + expectedWriteSize);
+ }
+}
-/* Hidden declaration for fullbench */
-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
- const void* src, size_t srcSize);
/*! ZSTD_decodeLiteralsBlock() :
+ * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored
+ * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current
+ * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being
+ * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write.
+ *
* @return : nb of bytes read from src (< srcSize )
* note : symbol not declared but exposed for fullbench */
-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
- const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */
+ void* dst, size_t dstCapacity, const streaming_operation streaming)
{
DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
{ const BYTE* const istart = (const BYTE*) src;
- symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+ SymbolEncodingType_e const litEncType = (SymbolEncodingType_e)(istart[0] & 3);
+ size_t const blockSizeMax = ZSTD_blockSizeMax(dctx);
switch(litEncType)
{
@@ -93,12 +151,16 @@
ZSTD_FALLTHROUGH;
case set_compressed:
- RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
+ RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need up to 5 for case 3");
{ size_t lhSize, litSize, litCSize;
U32 singleStream=0;
U32 const lhlCode = (istart[0] >> 2) & 3;
U32 const lhc = MEM_readLE32(istart);
size_t hufSuccess;
+ size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity);
+ int const flags = 0
+ | (ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0)
+ | (dctx->disableHufAsm ? HUF_flags_disableAsm : 0);
switch(lhlCode)
{
case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
@@ -121,8 +183,15 @@
litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
break;
}
- RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, "");
+ if (!singleStream)
+ RETURN_ERROR_IF(litSize < MIN_LITERALS_FOR_4_STREAMS, literals_headerWrong,
+ "Not enough literals (%zu) for the 4-streams mode (min %u)",
+ litSize, MIN_LITERALS_FOR_4_STREAMS);
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
+ RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0);
/* prefetch huffman table if cold */
if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
@@ -131,13 +200,14 @@
if (litEncType==set_repeat) {
if (singleStream) {
- hufSuccess = HUF_decompress1X_usingDTable_bmi2(
+ hufSuccess = HUF_decompress1X_usingDTable(
dctx->litBuffer, litSize, istart+lhSize, litCSize,
- dctx->HUFptr, dctx->bmi2);
+ dctx->HUFptr, flags);
} else {
- hufSuccess = HUF_decompress4X_usingDTable_bmi2(
+ assert(litSize >= MIN_LITERALS_FOR_4_STREAMS);
+ hufSuccess = HUF_decompress4X_usingDTable(
dctx->litBuffer, litSize, istart+lhSize, litCSize,
- dctx->HUFptr, dctx->bmi2);
+ dctx->HUFptr, flags);
}
} else {
if (singleStream) {
@@ -145,20 +215,29 @@
hufSuccess = HUF_decompress1X_DCtx_wksp(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
- sizeof(dctx->workspace));
+ sizeof(dctx->workspace), flags);
#else
- hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
+ hufSuccess = HUF_decompress1X1_DCtx_wksp(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
- sizeof(dctx->workspace), dctx->bmi2);
+ sizeof(dctx->workspace), flags);
#endif
} else {
- hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
+ hufSuccess = HUF_decompress4X_hufOnly_wksp(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
- sizeof(dctx->workspace), dctx->bmi2);
+ sizeof(dctx->workspace), flags);
}
}
+ if (dctx->litBufferLocation == ZSTD_split)
+ {
+ assert(litSize > ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE);
+ dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd -= WILDCOPY_OVERLENGTH;
+ assert(dctx->litBufferEnd <= (BYTE*)dst + blockSizeMax);
+ }
RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
@@ -166,13 +245,13 @@
dctx->litSize = litSize;
dctx->litEntropy = 1;
if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
- ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return litCSize + lhSize;
}
case set_basic:
{ size_t litSize, lhSize;
U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity);
switch(lhlCode)
{
case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
@@ -185,27 +264,42 @@
break;
case 3:
lhSize = 3;
+ RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize = 3");
litSize = MEM_readLE24(istart) >> 4;
break;
}
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, "");
+ RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
- ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize);
+ if (dctx->litBufferLocation == ZSTD_split)
+ {
+ ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
+ }
+ else
+ {
+ ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize);
+ }
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
- ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return lhSize+litSize;
}
/* direct reference into compressed stream */
dctx->litPtr = istart+lhSize;
dctx->litSize = litSize;
+ dctx->litBufferEnd = dctx->litPtr + litSize;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
return lhSize+litSize;
}
case set_rle:
{ U32 const lhlCode = ((istart[0]) >> 2) & 3;
size_t litSize, lhSize;
+ size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity);
switch(lhlCode)
{
case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
@@ -214,16 +308,28 @@
break;
case 1:
lhSize = 2;
+ RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 3");
litSize = MEM_readLE16(istart) >> 4;
break;
case 3:
lhSize = 3;
+ RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 4");
litSize = MEM_readLE24(istart) >> 4;
- RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
break;
}
- RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
- ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, "");
+ RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
+ if (dctx->litBufferLocation == ZSTD_split)
+ {
+ ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE);
+ }
+ else
+ {
+ ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize);
+ }
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+1;
@@ -234,6 +340,18 @@
}
}
+/* Hidden declaration for fullbench */
+size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize,
+ void* dst, size_t dstCapacity);
+size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize,
+ void* dst, size_t dstCapacity)
+{
+ dctx->isFrameDecompression = 0;
+ return ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, not_streaming);
+}
+
/* Default FSE distribution tables.
* These are pre-calculated FSE decoding tables using default distributions as defined in specification :
* https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
@@ -241,7 +359,7 @@
* - start from default distributions, present in /lib/common/zstd_internal.h
* - generate tables normally, using ZSTD_buildFSETable()
* - printout the content of tables
- * - pretify output, report below, test with fuzzer to ensure it's correct */
+ * - prettify output, report below, test with fuzzer to ensure it's correct */
/* Default FSE distribution table for Literal Lengths */
static const ZSTD_seqSymbol LL_defaultDTable[(1<nbBits = 0;
cell->nextState = 0;
assert(nbAddBits < 255);
- cell->nbAdditionalBits = (BYTE)nbAddBits;
+ cell->nbAdditionalBits = nbAddBits;
cell->baseValue = baseValue;
}
@@ -367,7 +485,7 @@
FORCE_INLINE_TEMPLATE
void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
- const U32* baseValue, const U32* nbAdditionalBits,
+ const U32* baseValue, const U8* nbAdditionalBits,
unsigned tableLog, void* wksp, size_t wkspSize)
{
ZSTD_seqSymbol* const tableDecode = dt+1;
@@ -430,14 +548,15 @@
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
- pos += n;
+ assert(n>=0);
+ pos += (size_t)n;
}
}
/* Now we spread those positions across the table.
- * The benefit of doing it in two stages is that we avoid the the
+ * The benefit of doing it in two stages is that we avoid the
* variable size inner loop, which caused lots of branch misses.
* Now we can run through all the positions without any branch misses.
- * We unroll the loop twice, since that is what emperically worked best.
+ * We unroll the loop twice, since that is what empirically worked best.
*/
{
size_t position = 0;
@@ -464,7 +583,7 @@
for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ while (UNLIKELY(position > highThreshold)) position = (position + step) & tableMask; /* lowprob area */
} }
assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
}
@@ -475,10 +594,10 @@
for (u=0; u max, corruption_detected, "");
{ U32 const symbol = *(const BYTE*)src;
U32 const baseline = baseValue[symbol];
- U32 const nbBits = nbAdditionalBits[symbol];
+ U8 const nbBits = nbAdditionalBits[symbol];
ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
}
*DTablePtr = DTableSpace;
@@ -588,11 +707,6 @@
/* SeqHead */
nbSeq = *ip++;
- if (!nbSeq) {
- *nbSeqPtr=0;
- RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
- return 1;
- }
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
@@ -605,11 +719,19 @@
}
*nbSeqPtr = nbSeq;
+ if (nbSeq == 0) {
+ /* No sequence : section ends immediately */
+ RETURN_ERROR_IF(ip != iend, corruption_detected,
+ "extraneous data present in the Sequences section");
+ return (size_t)(ip - istart);
+ }
+
/* FSE table descriptors */
RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
- { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
- symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
- symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+ RETURN_ERROR_IF(*ip & 3, corruption_detected, ""); /* The last field, Reserved, must be all-zeroes. */
+ { SymbolEncodingType_e const LLtype = (SymbolEncodingType_e)(*ip >> 6);
+ SymbolEncodingType_e const OFtype = (SymbolEncodingType_e)((*ip >> 4) & 3);
+ SymbolEncodingType_e const MLtype = (SymbolEncodingType_e)((*ip >> 2) & 3);
ip++;
/* Build DTables */
@@ -620,7 +742,7 @@
LL_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
- dctx->bmi2);
+ ZSTD_DCtx_get_bmi2(dctx));
RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += llhSize;
}
@@ -632,7 +754,7 @@
OF_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
- dctx->bmi2);
+ ZSTD_DCtx_get_bmi2(dctx));
RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += ofhSize;
}
@@ -644,7 +766,7 @@
ML_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
- dctx->bmi2);
+ ZSTD_DCtx_get_bmi2(dctx));
RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += mlhSize;
}
@@ -658,7 +780,6 @@
size_t litLength;
size_t matchLength;
size_t offset;
- const BYTE* match;
} seq_t;
typedef struct {
@@ -672,9 +793,6 @@
ZSTD_fseState stateOffb;
ZSTD_fseState stateML;
size_t prevOffset[ZSTD_REP_NUM];
- const BYTE* prefixStart;
- const BYTE* dictEnd;
- size_t pos;
} seqState_t;
/*! ZSTD_overlapCopy8() :
@@ -717,7 +835,7 @@
* - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
* The src buffer must be before the dst buffer.
*/
-static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
+static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length;
@@ -733,6 +851,7 @@
/* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
assert(length >= 8);
ZSTD_overlapCopy8(&op, &ip, diff);
+ length -= 8;
assert(op - ip >= 8);
assert(op <= oend);
}
@@ -747,8 +866,31 @@
assert(oend > oend_w);
ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
ip += oend_w - op;
- op = oend_w;
+ op += oend_w - op;
}
+ /* Handle the leftovers. */
+ while (op < oend) *op++ = *ip++;
+}
+
+/* ZSTD_safecopyDstBeforeSrc():
+ * This version allows overlap with dst before src, or handles the non-overlap case with dst after src
+ * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
+static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, ptrdiff_t length) {
+ ptrdiff_t const diff = op - ip;
+ BYTE* const oend = op + length;
+
+ if (length < 8 || diff > -8) {
+ /* Handle short lengths, close overlaps, and dst not before src. */
+ while (op < oend) *op++ = *ip++;
+ return;
+ }
+
+ if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) {
+ ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap);
+ ip += oend - WILDCOPY_OVERLENGTH - op;
+ op += oend - WILDCOPY_OVERLENGTH - op;
+ }
+
/* Handle the leftovers. */
while (op < oend) *op++ = *ip++;
}
@@ -762,10 +904,11 @@
* to be optimized for many small sequences, since those fall into ZSTD_execSequence().
*/
FORCE_NOINLINE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequenceEnd(BYTE* op,
- BYTE* const oend, seq_t sequence,
- const BYTE** litPtr, const BYTE* const litLimit,
- const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
@@ -788,27 +931,78 @@
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
- match = dictEnd - (prefixStart-match);
+ match = dictEnd - (prefixStart - match);
if (match + sequence.matchLength <= dictEnd) {
ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
- ZSTD_memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = prefixStart;
- } }
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ }
+ }
+ ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
+ return sequenceLength;
+}
+
+/* ZSTD_execSequenceEndSplitLitBuffer():
+ * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case.
+ */
+FORCE_NOINLINE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
+ BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+
+ /* bounds checks : careful of address space overflow in 32-bit mode */
+ RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
+ RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
+ assert(op < op + sequenceLength);
+ assert(oLitEnd < op + sequenceLength);
+
+ /* copy literals */
+ RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer");
+ ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength);
+ op = oLitEnd;
+ *litPtr = iLitEnd;
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix */
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
+ match = dictEnd - (prefixStart - match);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ }
+ }
ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
return sequenceLength;
}
HINT_INLINE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequence(BYTE* op,
- BYTE* const oend, seq_t sequence,
- const BYTE** litPtr, const BYTE* const litLimit,
- const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
@@ -819,17 +1013,115 @@
assert(op != NULL /* Precondition */);
assert(oend_w < oend /* No underflow */);
+
+#if defined(__aarch64__)
+ /* prefetch sequence starting from match that will be used for copy later */
+ PREFETCH_L1(match);
+#endif
/* Handle edge cases in a slow path:
* - Read beyond end of literals
* - Match end is within WILDCOPY_OVERLIMIT of oend
* - 32-bit mode and the match length overflows
*/
if (UNLIKELY(
- iLitEnd > litLimit ||
- oMatchEnd > oend_w ||
- (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
+ iLitEnd > litLimit ||
+ oMatchEnd > oend_w ||
+ (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+ /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
+ assert(op <= oLitEnd /* No overflow */);
+ assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
+ assert(oMatchEnd <= oend /* No underflow */);
+ assert(iLitEnd <= litLimit /* Literal length is in bounds */);
+ assert(oLitEnd <= oend_w /* Can wildcopy literals */);
+ assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
+
+ /* Copy Literals:
+ * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
+ * We likely don't need the full 32-byte wildcopy.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(op, (*litPtr));
+ if (UNLIKELY(sequence.litLength > 16)) {
+ ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap);
+ }
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* Copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix -> go into extDict */
+ RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
+ match = dictEnd + (match - prefixStart);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ }
+ }
+ /* Match within prefix of 1 or more bytes */
+ assert(op <= oMatchEnd);
+ assert(oMatchEnd <= oend_w);
+ assert(match >= prefixStart);
+ assert(sequence.matchLength >= 1);
+
+ /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
+ * without overlap checking.
+ */
+ if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
+ /* We bet on a full wildcopy for matches, since we expect matches to be
+ * longer than literals (in general). In silesia, ~10% of matches are longer
+ * than 16 bytes.
+ */
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+ return sequenceLength;
+ }
+ assert(sequence.offset < WILDCOPY_VECLEN);
+
+ /* Copy 8 bytes and spread the offset to be >= 8. */
+ ZSTD_overlapCopy8(&op, &match, sequence.offset);
+
+ /* If the match length is > 8 bytes, then continue with the wildcopy. */
+ if (sequence.matchLength > 8) {
+ assert(op < oMatchEnd);
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst);
+ }
+ return sequenceLength;
+}
+
+HINT_INLINE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
+ BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ assert(op != NULL /* Precondition */);
+ assert(oend_w < oend /* No underflow */);
+ /* Handle edge cases in a slow path:
+ * - Read beyond end of literals
+ * - Match end is within WILDCOPY_OVERLIMIT of oend
+ * - 32-bit mode and the match length overflows
+ */
+ if (UNLIKELY(
+ iLitEnd > litLimit ||
+ oMatchEnd > oend_w ||
+ (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
+ return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
/* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
assert(op <= oLitEnd /* No overflow */);
assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
@@ -896,6 +1188,7 @@
return sequenceLength;
}
+
static void
ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
{
@@ -909,24 +1202,14 @@
}
FORCE_INLINE_TEMPLATE void
-ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
-{
- ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
- U32 const nbBits = DInfo.nbBits;
- size_t const lowBits = BIT_readBits(bitD, nbBits);
- DStatePtr->state = DInfo.nextState + lowBits;
-}
-
-FORCE_INLINE_TEMPLATE void
-ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo)
+ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 nextState, U32 nbBits)
{
- U32 const nbBits = DInfo.nbBits;
size_t const lowBits = BIT_readBits(bitD, nbBits);
- DStatePtr->state = DInfo.nextState + lowBits;
+ DStatePtr->state = nextState + lowBits;
}
/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
- * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * offset bits. But we can only read at most STREAM_ACCUMULATOR_MIN_32
* bits before reloading. This value is the maximum number of bytes we read
* after reloading when we are decoding long offsets.
*/
@@ -936,123 +1219,136 @@
: 0)
typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
-typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e;
+/*
+ * ZSTD_decodeSequence():
+ * @p longOffsets : tells the decoder to reload more bit while decoding large offsets
+ * only used in 32-bit mode
+ * @return : Sequence (litL + matchL + offset)
+ */
FORCE_INLINE_TEMPLATE seq_t
-ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const int isLastSeq)
{
seq_t seq;
- ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
- ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
- ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
- U32 const llBase = llDInfo.baseValue;
- U32 const mlBase = mlDInfo.baseValue;
- U32 const ofBase = ofDInfo.baseValue;
- BYTE const llBits = llDInfo.nbAdditionalBits;
- BYTE const mlBits = mlDInfo.nbAdditionalBits;
- BYTE const ofBits = ofDInfo.nbAdditionalBits;
- BYTE const totalBits = llBits+mlBits+ofBits;
+ /*
+ * ZSTD_seqSymbol is a 64 bits wide structure.
+ * It can be loaded in one operation
+ * and its fields extracted by simply shifting or bit-extracting on aarch64.
+ * GCC doesn't recognize this and generates more unnecessary ldr/ldrb/ldrh
+ * operations that cause performance drop. This can be avoided by using this
+ * ZSTD_memcpy hack.
+ */
+#if defined(__aarch64__) && (defined(__GNUC__) && !defined(__clang__))
+ ZSTD_seqSymbol llDInfoS, mlDInfoS, ofDInfoS;
+ ZSTD_seqSymbol* const llDInfo = &llDInfoS;
+ ZSTD_seqSymbol* const mlDInfo = &mlDInfoS;
+ ZSTD_seqSymbol* const ofDInfo = &ofDInfoS;
+ ZSTD_memcpy(llDInfo, seqState->stateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol));
+ ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol));
+ ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol));
+#else
+ const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state;
+ const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state;
+ const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state;
+#endif
+ seq.matchLength = mlDInfo->baseValue;
+ seq.litLength = llDInfo->baseValue;
+ { U32 const ofBase = ofDInfo->baseValue;
+ BYTE const llBits = llDInfo->nbAdditionalBits;
+ BYTE const mlBits = mlDInfo->nbAdditionalBits;
+ BYTE const ofBits = ofDInfo->nbAdditionalBits;
+ BYTE const totalBits = llBits+mlBits+ofBits;
- /* sequence */
- { size_t offset;
- if (ofBits > 1) {
- ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
- ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
- assert(ofBits <= MaxOff);
- if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
- U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
- offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
- BIT_reloadDStream(&seqState->DStream);
- if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
- assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
- } else {
- offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
- if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
- }
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset;
- } else {
- U32 const ll0 = (llBase == 0);
- if (LIKELY((ofBits == 0))) {
- if (LIKELY(!ll0))
- offset = seqState->prevOffset[0];
- else {
- offset = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
+ U16 const llNext = llDInfo->nextState;
+ U16 const mlNext = mlDInfo->nextState;
+ U16 const ofNext = ofDInfo->nextState;
+ U32 const llnbBits = llDInfo->nbBits;
+ U32 const mlnbBits = mlDInfo->nbBits;
+ U32 const ofnbBits = ofDInfo->nbBits;
+
+ assert(llBits <= MaxLLBits);
+ assert(mlBits <= MaxMLBits);
+ assert(ofBits <= MaxOff);
+ /*
+ * As gcc has better branch and block analyzers, sometimes it is only
+ * valuable to mark likeliness for clang, it gives around 3-4% of
+ * performance.
+ */
+
+ /* sequence */
+ { size_t offset;
+ if (ofBits > 1) {
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32);
+ ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits);
+ if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
+ /* Always read extra bits, this keeps the logic simple,
+ * avoids branches, and avoids accidentally reading 0 bits.
+ */
+ U32 const extraBits = LONG_OFFSETS_MAX_EXTRA_BITS_32;
+ offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+ BIT_reloadDStream(&seqState->DStream);
+ offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+ } else {
+ offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ }
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ } else {
+ U32 const ll0 = (llDInfo->baseValue == 0);
+ if (LIKELY((ofBits == 0))) {
+ offset = seqState->prevOffset[ll0];
+ seqState->prevOffset[1] = seqState->prevOffset[!ll0];
seqState->prevOffset[0] = offset;
- }
- } else {
- offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
- { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
- temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
- if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset = temp;
- } } }
- seq.offset = offset;
- }
+ } else {
+ offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
+ { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+ temp -= !temp; /* 0 is not valid: input corrupted => force offset to -1 => corruption detected at execSequence */
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+ } } }
+ seq.offset = offset;
+ }
- seq.matchLength = mlBase;
- if (mlBits > 0)
- seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
+ if (mlBits > 0)
+ seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
- if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
- BIT_reloadDStream(&seqState->DStream);
- if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
- BIT_reloadDStream(&seqState->DStream);
- /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
- ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
- seq.litLength = llBase;
- if (llBits > 0)
- seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
+ if (llBits > 0)
+ seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
- if (MEM_32bits())
- BIT_reloadDStream(&seqState->DStream);
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
- DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
- (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+ DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
- if (prefetch == ZSTD_p_prefetch) {
- size_t const pos = seqState->pos + seq.litLength;
- const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
- seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
- * No consequence though : no memory access will occur, offset is only used for prefetching */
- seqState->pos = pos + seq.matchLength;
- }
-
- /* ANS state update
- * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
- * clang-9.2.0 does 7% worse with ZSTD_updateFseState().
- * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
- * better option, so it is the default for other compilers. But, if you
- * measure that it is worse, please put up a pull request.
- */
- {
-#if !defined(__clang__)
- const int kUseUpdateFseState = 1;
-#else
- const int kUseUpdateFseState = 0;
-#endif
- if (kUseUpdateFseState) {
- ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
- ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
- if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
- ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
- } else {
- ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */
- ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */
+ if (!isLastSeq) {
+ /* don't update FSE state for last Sequence */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
- ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */
+ BIT_reloadDStream(&seqState->DStream);
}
}
return seq;
}
-#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
-MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+#if DEBUGLEVEL >= 1
+static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
{
size_t const windowSize = dctx->fParams.windowSize;
/* No dictionary used. */
@@ -1066,435 +1362,65 @@
/* Dictionary is active. */
return 1;
}
+#endif
-MEM_STATIC void ZSTD_assertValidSequence(
+static void ZSTD_assertValidSequence(
ZSTD_DCtx const* dctx,
BYTE const* op, BYTE const* oend,
seq_t const seq,
BYTE const* prefixStart, BYTE const* virtualStart)
{
#if DEBUGLEVEL >= 1
- size_t const windowSize = dctx->fParams.windowSize;
- size_t const sequenceSize = seq.litLength + seq.matchLength;
- BYTE const* const oLitEnd = op + seq.litLength;
- DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
- (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
- assert(op <= oend);
- assert((size_t)(oend - op) >= sequenceSize);
- assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
- if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
- size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
- /* Offset must be within the dictionary. */
- assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
- assert(seq.offset <= windowSize + dictSize);
- } else {
- /* Offset must be within our window. */
- assert(seq.offset <= windowSize);
+ if (dctx->isFrameDecompression) {
+ size_t const windowSize = dctx->fParams.windowSize;
+ size_t const sequenceSize = seq.litLength + seq.matchLength;
+ BYTE const* const oLitEnd = op + seq.litLength;
+ DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+ assert(op <= oend);
+ assert((size_t)(oend - op) >= sequenceSize);
+ assert(sequenceSize <= ZSTD_blockSizeMax(dctx));
+ if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
+ size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
+ /* Offset must be within the dictionary. */
+ assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
+ assert(seq.offset <= windowSize + dictSize);
+ } else {
+ /* Offset must be within our window. */
+ assert(seq.offset <= windowSize);
+ }
}
#else
(void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
#endif
}
#endif
-#ifdef DSLAB
-MEM_STATIC FORCE_INLINE_ATTR
-void DSLAB_memcopy(uint8_t* op, const uint8_t* ip, const uint64_t length)
-{
- uint8_t* oend = op + length;
- do {
- ZSTD_copy16(op, ip);
- ZSTD_copy16(op + 16, ip + 16);
- op += 32;
- ip += 32;
- }
- while (op < oend);
-}
-
-MEM_STATIC FORCE_INLINE_ATTR
-void DSLAB_safecopy(uint8_t* const op, const uint8_t* const ip, const uint64_t length){
- if (length < 16){
- uint8_t buff[16];
- ZSTD_copy16(buff, op - 16);
- ZSTD_copy16(op + length - 16, ip + length - 16);
- ZSTD_copy16(op - 16, buff);
- } else if (length < 32) {
- ZSTD_copy16(op, ip);
- ZSTD_copy16(op + length - 16, ip + length - 16);
- } else {
- DSLAB_memcopy(op, ip, length - 32);
- ZSTD_copy16(op + length - 32, ip + length - 32);
- ZSTD_copy16(op + length - 16, ip + length - 16);
- }
-}
-
-MEM_STATIC FORCE_INLINE_ATTR
-void DSLAB_wildcopy(uint8_t* const op, const uint8_t* const ip, const uint64_t length)
-{
- // 这里也是一个重要的优化,因为这里小概率大于48,所以先使用copy16,
- // 然后在大于48的时候才用memcopy,使用copy16可以提升性能。
- ZSTD_copy16(op, ip);
- // vst1q_u8((uint8_t*)op, vld1q_u8((const uint8_t*)ip));
- if (length > 16){
- ZSTD_copy16(op + 16, ip + 16);
- ZSTD_copy16(op + 32, ip + 32);
- // vst1q_u8((uint8_t*)(op + 16), vld1q_u8((const uint8_t*)( ip + 16)));
- // vst1q_u8((uint8_t*)(op + 16), vld1q_u8((const uint8_t*)( ip + )));
- if (length > 48){
- DSLAB_memcopy(op + 48, ip + 48, length - 48);
- }
- }
-}
-
-
-MEM_STATIC
-uint64_t DSLAB_lookBitsFast(const uint32_t bitsConsumed, const uint64_t bitContainer, const uint32_t nbBits)
-{
- assert(nbBits >= 1);
- return (bitContainer << (bitsConsumed & 63U)) >> ((64U - nbBits) & 63U);
-}
-
-MEM_STATIC
-uint64_t DSLAB_lookBits(const uint32_t bitsConsumed, const uint64_t bitContainer, const uint32_t nbBits)
-{
- assert(nbBits < BIT_MASK_SIZE);
- return (bitContainer >> ((64U - bitsConsumed) & 63U)) & (~((uint64_t)-1 << nbBits));
-}
-
-FORCE_INLINE_TEMPLATE
-uint64_t DSLAB_updateSeq(uint32_t* const bitsConsumed, ZSTD_seqSymbol DInfo, const uint64_t bitContainer)
-{
- uint64_t value = DInfo.baseValue;
- uint32_t nbBits = DInfo.nbAdditionalBits;
- if (nbBits > 0){
- value += DSLAB_lookBitsFast(*bitsConsumed, bitContainer, nbBits);
- *bitsConsumed += nbBits;
- }
- return value;
-}
-
-FORCE_INLINE_TEMPLATE
-uint64_t DSLAB_updateFseState(uint32_t* const bitsConsumed, ZSTD_seqSymbol DInfo, const uint64_t bitContainer)
-{
- uint32_t const nbBits = DInfo.nbBits;
- *bitsConsumed += nbBits;
- return DSLAB_lookBits(*bitsConsumed, bitContainer, nbBits) + DInfo.nextState;
-}
-
-FORCE_INLINE_TEMPLATE
-void DSLAB_updateOffset(uint64_t* const prevOffset0, uint64_t* const prevOffset1, uint64_t* const prevOffset2, uint32_t* const bitsConsumed, const uint64_t bitContainer, const ZSTD_seqSymbol ofDInfo, const uint32_t llbase)
-{
- const uint32_t ofBits = ofDInfo.nbAdditionalBits;
- if (ofBits > 1) {
- *prevOffset2 = *prevOffset0;
- *prevOffset0 = DSLAB_lookBitsFast(*bitsConsumed, bitContainer, ofBits) + ofDInfo.baseValue;
- *bitsConsumed += ofBits;
- } else if (UNLIKELY(ofBits == 1)) {
- uint32_t const ll0 = (llbase == 0);
- uint64_t temp = ofDInfo.baseValue + ll0 + DSLAB_lookBitsFast(*bitsConsumed, bitContainer, 1);
- *bitsConsumed += 1;
- if (temp == 0){
- temp = *prevOffset1;
- *prevOffset2 = *prevOffset0;
- } else if (temp == 1) {
- temp = *prevOffset0;
- } else if (temp == 2) {
- temp = *prevOffset2;
- *prevOffset2 = *prevOffset0;
- } else {
- temp = *prevOffset1 - 1;
- *prevOffset2 = *prevOffset0;
- }
- temp += !temp;
- *prevOffset0 = temp;
- } else if (llbase) {
- uint64_t temp = *prevOffset1;
- *prevOffset1 = *prevOffset0;
- *prevOffset0 = temp;
- }
-}
-
-FORCE_INLINE_TEMPLATE
-void DSLAB_reloadDStream(uint32_t* const bitsConsumed, uint64_t* const bitContainer, const uint8_t** DStream_ptr, const uint8_t* const DStream_start){
- uint32_t nbBytes = *bitsConsumed >> 3;
- *DStream_ptr -= nbBytes;
- *bitsConsumed &= 7;
- if (UNLIKELY(*DStream_ptr < DStream_start)) {
- uint32_t nbBytes2 = (uint32_t)(DStream_start - *DStream_ptr);
- *bitsConsumed += nbBytes2 << 3;
- *DStream_ptr = DStream_start;
- if (nbBytes == nbBytes2) {
- return;
- }
- }
- *bitContainer = MEM_readLEST(*DStream_ptr);
-}
-
-HINT_INLINE
-size_t DSLAB_execSequence(
- BYTE* op,
- BYTE* const oend,
- size_t offset,
- size_t matchLength,
- size_t litLength,
- const BYTE** litPtr,
- const BYTE* const litLimit,
- const BYTE* const prefixStart,
- const BYTE* const virtualStart,
- const BYTE* const dictEnd)
-{
- BYTE* const oLitEnd = op + litLength;
- size_t const sequenceLength = litLength + matchLength;
- BYTE* const oMatchEnd = op + sequenceLength;
- BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
- const BYTE* const iLitEnd = *litPtr + litLength;
- const BYTE* match = oLitEnd - offset;
-
- if (UNLIKELY(iLitEnd > litLimit || oMatchEnd > oend_w)){
- seq_t sequence;
- sequence.offset = offset;
- sequence.matchLength = matchLength;
- sequence.litLength = litLength;
- return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
- }
-
- ZSTD_copy16(op, (*litPtr));
- if (UNLIKELY(litLength > 16)) {
- ZSTD_wildcopy(op+16, (*litPtr)+16, litLength-16, ZSTD_no_overlap);
- }
- op = oLitEnd;
- *litPtr = iLitEnd;
-
- if (offset > (size_t)(oLitEnd - prefixStart)) {
- RETURN_ERROR_IF(UNLIKELY(offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
- match = dictEnd + (match - prefixStart);
- if (match + matchLength <= dictEnd - 32) {
- DSLAB_wildcopy(oLitEnd, match, matchLength);
- return sequenceLength;
- }
- if (match + matchLength <= dictEnd) {
- ZSTD_memmove(oLitEnd, match, matchLength);
- return sequenceLength;
- }
- { size_t const length1 = dictEnd - match;
- ZSTD_memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- matchLength -= length1;
- match = prefixStart;
- }
- }
-
- if (LIKELY(offset >= WILDCOPY_VECLEN)) {
- // ZSTD_wildcopy(op, match, (ptrdiff_t)matchLength, ZSTD_no_overlap);
- DSLAB_wildcopy(op, match, (ptrdiff_t)matchLength);
- return sequenceLength;
- }
-
- ZSTD_overlapCopy8(&op, &match, offset);
-
- if (matchLength > 8) {
- ZSTD_wildcopy(op, match, (ptrdiff_t)matchLength-8, ZSTD_overlap_src_before_dst);
- }
- return sequenceLength;
-}
-
-FORCE_INLINE_TEMPLATE seq_t
-DSLAB_decodeSequence_end(seqState_t* seqState, ZSTD_seqSymbol llDInfo, ZSTD_seqSymbol mlDInfo, ZSTD_seqSymbol ofDInfo)
-{
- seq_t seq;
- U32 const llBase = llDInfo.baseValue;
- U32 const mlBase = mlDInfo.baseValue;
- U32 const ofBase = ofDInfo.baseValue;
- BYTE const llBits = llDInfo.nbAdditionalBits;
- BYTE const mlBits = mlDInfo.nbAdditionalBits;
- BYTE const ofBits = ofDInfo.nbAdditionalBits;
- BYTE const totalBits = llBits+mlBits+ofBits;
-
- BIT_reloadDStream(&(seqState->DStream));
-
- { size_t offset;
- if (ofBits > 1) {
- offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits);
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset;
- } else {
- U32 const ll0 = (llBase == 0);
- if (LIKELY((ofBits == 0))) {
- if (LIKELY(!ll0))
- offset = seqState->prevOffset[0];
- else {
- offset = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset;
- }
- } else {
- offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
- { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
- temp += !temp;
- if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset = temp;
- } } }
- seq.offset = offset;
- }
-
- seq.matchLength = mlBase;
- if (mlBits > 0)
- seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits);
-
- if (UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
- BIT_reloadDStream(&seqState->DStream);
-
- seq.litLength = llBase;
- if (llBits > 0)
- seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits);
-
- ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo);
- ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo);
- ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo);
-
- return seq;
-}
-HINT_INLINE
-int64_t DSLAB_decompressSequences_body(
- uint8_t** op,
- const uint8_t** litPtr_p,
- seqState_t* seqState,
- uint32_t nbSeq,
- uint8_t* const oend,
- ZSTD_DCtx* dctx)
-{
- //const uint8_t* const oend_w = oend - WILDCOPY_OVERLENGTH;
- //const uint8_t* const litLimit = dctx->litPtr + dctx->litSize;
- const uint8_t* const litEnd = dctx->litPtr + dctx->litSize;
- const uint8_t* const prefixStart = (const uint8_t*) (dctx->prefixStart);
- const uint8_t* const virtualStart = (const uint8_t*) (dctx->virtualStart);
- const uint8_t* const dictEnd = (const uint8_t*) (dctx->dictEnd);
- const uint8_t* const DStream_start = (const uint8_t*)seqState->DStream.start;
-
- const uint8_t *litPtr, *DStream_ptr;
- uint8_t *optr;
- uint64_t bitContainer, matchLength, litLength;
- uint32_t bitsConsumed;
- uint64_t prevOffset0, prevOffset1, prevOffset2;
- uint64_t oneSeqSize, error;
- ZSTD_seqSymbol llDInfo, mlDInfo, ofDInfo;
-
- optr = *op;
- litPtr = *litPtr_p;
- // 下面的一些参数都是做的数据局部化的优化,因为这些参数在循环中都是不变的,
- // 且这些参数的值都是从seqState中获取的,而seqState是从外面传入的,
- // 如果直接使用seqState中的值,会导致缓存失效,大大影响性能。
- DStream_ptr = (const uint8_t *)seqState->DStream.ptr;
- bitContainer = seqState->DStream.bitContainer;
- bitsConsumed = seqState->DStream.bitsConsumed;
-
- prevOffset0 = seqState->prevOffset[0];
- prevOffset1 = seqState->prevOffset[1];
- prevOffset2 = seqState->prevOffset[2];
-
- llDInfo = seqState->stateLL.table[seqState->stateLL.state];
- mlDInfo = seqState->stateML.table[seqState->stateML.state];
- ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
- // 上面这三行是错位读取的优化,因为读取的操作需要消耗一定的时间,如果读取完数据后,
- // 直接使用,会导致阻塞,所以这里错位读取,预先对数据进行读取。
- // 在读取的操作中进行一些其他的操作,这样就可以避免阻塞。
- error = 0;
- while(nbSeq > 2) {
- DSLAB_reloadDStream(&bitsConsumed, &bitContainer, &DStream_ptr, DStream_start);
- DSLAB_updateOffset(&prevOffset1, &prevOffset0, &prevOffset2, &bitsConsumed, bitContainer, ofDInfo, llDInfo.baseValue);
- matchLength = DSLAB_updateSeq(&bitsConsumed, mlDInfo, bitContainer);
- if (UNLIKELY(bitsConsumed + llDInfo.nbAdditionalBits >= 64-(LLFSELog+MLFSELog+OffFSELog))){
- DSLAB_reloadDStream(&bitsConsumed, &bitContainer, &DStream_ptr, DStream_start);
- }
- litLength = DSLAB_updateSeq(&bitsConsumed, llDInfo, bitContainer);
- //预先读取,下一次对这些变量的使用在下一次循环。
- ZSTD_copy8(&llDInfo, seqState->stateLL.table + DSLAB_updateFseState(&bitsConsumed, llDInfo, bitContainer));
- ZSTD_copy8(&mlDInfo, seqState->stateML.table + DSLAB_updateFseState(&bitsConsumed, mlDInfo, bitContainer));
- ZSTD_copy8(&ofDInfo, seqState->stateOffb.table + DSLAB_updateFseState(&bitsConsumed, ofDInfo, bitContainer));
-
- oneSeqSize = DSLAB_execSequence(optr, oend, prevOffset1, matchLength, litLength, &litPtr, litEnd, prefixStart, virtualStart, dictEnd);
- optr += oneSeqSize;
- if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
- error = oneSeqSize;
- break;
- }
-
- DSLAB_reloadDStream(&bitsConsumed, &bitContainer, &DStream_ptr, DStream_start);
- DSLAB_updateOffset(&prevOffset0, &prevOffset1, &prevOffset2, &bitsConsumed, bitContainer, ofDInfo, llDInfo.baseValue);
- matchLength = DSLAB_updateSeq(&bitsConsumed, mlDInfo, bitContainer);
- if (UNLIKELY(bitsConsumed + llDInfo.nbAdditionalBits >= 64-(LLFSELog+MLFSELog+OffFSELog))){
- DSLAB_reloadDStream(&bitsConsumed, &bitContainer, &DStream_ptr, DStream_start);
- }
- litLength = DSLAB_updateSeq(&bitsConsumed, llDInfo, bitContainer);
-
- ZSTD_copy8(&llDInfo, seqState->stateLL.table + DSLAB_updateFseState(&bitsConsumed, llDInfo, bitContainer));
- ZSTD_copy8(&mlDInfo, seqState->stateML.table + DSLAB_updateFseState(&bitsConsumed, mlDInfo, bitContainer));
- ZSTD_copy8(&ofDInfo, seqState->stateOffb.table + DSLAB_updateFseState(&bitsConsumed, ofDInfo, bitContainer));
-
- oneSeqSize = DSLAB_execSequence(optr, oend, prevOffset0, matchLength, litLength, &litPtr, litEnd, prefixStart, virtualStart, dictEnd);
- optr += oneSeqSize;
- if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
- error = oneSeqSize;
- break;
- }
-
- nbSeq -= 2;
- }
-
- seqState->DStream.ptr = (const char*)DStream_ptr;
- seqState->DStream.bitContainer = bitContainer;
- seqState->DStream.bitsConsumed = bitsConsumed;
-
- seqState->prevOffset[0] = prevOffset0;
- seqState->prevOffset[1] = prevOffset1;
- seqState->prevOffset[2] = prevOffset2;
-
- *op = optr;
- *litPtr_p = litPtr;
-
- if (nbSeq) {
- seq_t const sequence = DSLAB_decodeSequence_end(seqState, llDInfo, mlDInfo, ofDInfo);
- oneSeqSize = ZSTD_execSequence(*op, oend, sequence, litPtr_p, litEnd, prefixStart, virtualStart, dictEnd);
- BIT_reloadDStream(&(seqState->DStream));
- *op += oneSeqSize;
- nbSeq--;
- if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
- error = oneSeqSize;
- }
- }
-
- if (ZSTD_isError(error)) return error;
-
- return nbSeq;
-}
-#endif
+
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+
+
FORCE_INLINE_TEMPLATE size_t
DONT_VECTORIZE
-ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
+ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = ostart + maxDstSize;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
- const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* litBufferEnd = dctx->litBufferEnd;
const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
- DEBUGLOG(5, "ZSTD_decompressSequences_body");
- (void)frame;
+ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer (%i seqs)", nbSeq);
- /* Regen sequences */
+ /* Literals are split between internal buffer & output buffer */
if (nbSeq) {
seqState_t seqState;
- size_t error = 0;
dctx->fseEntropy = 1;
{ U32 i; for (i=0; ientropy.rep[i]; }
RETURN_ERROR_IF(
@@ -1510,165 +1436,331 @@
BIT_DStream_endOfBuffer < BIT_DStream_completed &&
BIT_DStream_completed < BIT_DStream_overflow);
-#if defined(__x86_64__)
- /* Align the decompression loop to 32 + 16 bytes.
- *
- * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
- * speed swings based on the alignment of the decompression loop. This
- * performance swing is caused by parts of the decompression loop falling
- * out of the DSB. The entire decompression loop should fit in the DSB,
- * when it can't we get much worse performance. You can measure if you've
- * hit the good case or the bad case with this perf command for some
- * compressed file test.zst:
- *
- * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
- * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
- *
- * If you see most cycles served out of the MITE you've hit the bad case.
- * If you see most cycles served out of the DSB you've hit the good case.
- * If it is pretty even then you may be in an okay case.
- *
- * I've been able to reproduce this issue on the following CPUs:
- * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
- * Use Instruments->Counters to get DSB/MITE cycles.
- * I never got performance swings, but I was able to
- * go from the good case of mostly DSB to half of the
- * cycles served from MITE.
- * - Coffeelake: Intel i9-9900k
- *
- * I haven't been able to reproduce the instability or DSB misses on any
- * of the following CPUS:
- * - Haswell
- * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
- * - Skylake
- *
- * If you are seeing performance stability this script can help test.
- * It tests on 4 commits in zstd where I saw performance change.
- *
- * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
- */
- __asm__(".p2align 5");
- __asm__("nop");
- __asm__(".p2align 4");
+ /* decompress without overrunning litPtr begins */
+ { seq_t sequence = {0,0,0}; /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */
+ /* Align the decompression loop to 32 + 16 bytes.
+ *
+ * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
+ * speed swings based on the alignment of the decompression loop. This
+ * performance swing is caused by parts of the decompression loop falling
+ * out of the DSB. The entire decompression loop should fit in the DSB,
+ * when it can't we get much worse performance. You can measure if you've
+ * hit the good case or the bad case with this perf command for some
+ * compressed file test.zst:
+ *
+ * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
+ * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
+ *
+ * If you see most cycles served out of the MITE you've hit the bad case.
+ * If you see most cycles served out of the DSB you've hit the good case.
+ * If it is pretty even then you may be in an okay case.
+ *
+ * This issue has been reproduced on the following CPUs:
+ * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
+ * Use Instruments->Counters to get DSB/MITE cycles.
+ * I never got performance swings, but I was able to
+ * go from the good case of mostly DSB to half of the
+ * cycles served from MITE.
+ * - Coffeelake: Intel i9-9900k
+ * - Coffeelake: Intel i7-9700k
+ *
+ * I haven't been able to reproduce the instability or DSB misses on any
+ * of the following CPUS:
+ * - Haswell
+ * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
+ * - Skylake
+ *
+ * Alignment is done for each of the three major decompression loops:
+ * - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer
+ * - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer
+ * - ZSTD_decompressSequences_body
+ * Alignment choices are made to minimize large swings on bad cases and influence on performance
+ * from changes external to this code, rather than to overoptimize on the current commit.
+ *
+ * If you are seeing performance stability this script can help test.
+ * It tests on 4 commits in zstd where I saw performance change.
+ *
+ * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
+ */
+#if defined(__x86_64__)
+ __asm__(".p2align 6");
+# if __GNUC__ >= 7
+ /* good for gcc-7, gcc-9, and gcc-11 */
+ __asm__("nop");
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 4");
+# if __GNUC__ == 8 || __GNUC__ == 10
+ /* good for gcc-8 and gcc-10 */
+ __asm__("nop");
+ __asm__(".p2align 3");
+# endif
+# endif
#endif
- {
- RETURN_ERROR_IF(nbSeq < 0, corruption_detected, "");
-
- for ( ; ; ) {
- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
- size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
- #ifndef DSLAB
- #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
- assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
- #endif
+ /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */
+ for ( ; nbSeq; nbSeq--) {
+ sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1);
+ if (litPtr + sequence.litLength > dctx->litBufferEnd) break;
+ { size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
- #endif
- BIT_reloadDStream(&(seqState.DStream));
- op += oneSeqSize;
- if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
- error = oneSeqSize;
- break;
+ op += oneSeqSize;
+ } }
+ DEBUGLOG(6, "reached: (litPtr + sequence.litLength > dctx->litBufferEnd)");
+
+ /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */
+ if (nbSeq > 0) {
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength);
+ if (leftoverLit) {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequence.litLength -= leftoverLit;
+ op += leftoverLit;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
}
- if (UNLIKELY(!--nbSeq)) break;
- #ifdef DSLAB
- if (op >= prefixStart + 16) break;
- #endif
- }
- #ifdef DSLAB
- if (nbSeq > 0) {
- nbSeq = (int32_t)DSLAB_decompressSequences_body(
- &op, &litPtr, &seqState, (uint32_t)nbSeq, oend,
- dctx);
- if (UNLIKELY(ZSTD_isError(nbSeq))) {
- return nbSeq;
- }
- }
- while (nbSeq > 0) {
- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
- size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
- BIT_reloadDStream(&(seqState.DStream));
- op += oneSeqSize;
- if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
- error = oneSeqSize;
- break;
- }
- if (UNLIKELY(!--nbSeq)) break;
- }
- #endif
+ nbSeq--;
+ }
+ }
+
+ if (nbSeq > 0) {
+ /* there is remaining lit from extra buffer */
+
+#if defined(__x86_64__)
+ __asm__(".p2align 6");
+ __asm__("nop");
+# if __GNUC__ != 7
+ /* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */
+ __asm__(".p2align 4");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# elif __GNUC__ >= 11
+ __asm__(".p2align 3");
+# else
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# endif
+#endif
+
+ for ( ; nbSeq ; nbSeq--) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ }
}
-
/* check if reached exact end */
- DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
- if (ZSTD_isError(error)) return error;
+ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq);
RETURN_ERROR_IF(nbSeq, corruption_detected, "");
- #ifndef DSLAB
- RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
- #endif
+ DEBUGLOG(5, "bitStream : start=%p, ptr=%p, bitsConsumed=%u", seqState.DStream.start, seqState.DStream.ptr, seqState.DStream.bitsConsumed);
+ RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, "");
+ /* save reps for next block */
+ { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ if (dctx->litBufferLocation == ZSTD_split) {
+ /* split hasn't been reached yet, first get dst then copy litExtraBuffer */
+ size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
+ DEBUGLOG(6, "copy last literals from segment : %u", (U32)lastLLSize);
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ }
+ /* copy last literals from internal buffer */
+ { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
+ DEBUGLOG(6, "copy last literals from internal buffer : %u", (U32)lastLLSize);
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ } }
+
+ DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart));
+ return (size_t)(op - ostart);
+}
+
+FORCE_INLINE_TEMPLATE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, maxDstSize) : dctx->litBuffer;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart);
+ const BYTE* const vBase = (const BYTE*)(dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd);
+ DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq);
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seqState_t seqState;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)),
+ corruption_detected, "");
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+ assert(dst != NULL);
+
+#if defined(__x86_64__)
+ __asm__(".p2align 6");
+ __asm__("nop");
+# if __GNUC__ >= 7
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# else
+ __asm__(".p2align 4");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# endif
+#endif
+
+ for ( ; nbSeq ; nbSeq--) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ }
+
+ /* check if reached exact end */
+ assert(nbSeq == 0);
+ RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, "");
/* save reps for next block */
{ U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
- { size_t const lastLLSize = litEnd - litPtr;
+ { size_t const lastLLSize = (size_t)(litEnd - litPtr);
+ DEBUGLOG(6, "copy last literals : %u", (U32)lastLLSize);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
- }
- }
+ } }
- return op-ostart;
+ DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart));
+ return (size_t)(op - ostart);
}
static size_t
ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+static size_t
+ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+
+FORCE_INLINE_TEMPLATE
+
+size_t ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
+ const BYTE* const prefixStart, const BYTE* const dictEnd)
+{
+ prefetchPos += sequence.litLength;
+ { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
+ /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+ * No consequence though : memory address is only used for prefetching, not for dereferencing */
+ const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, prefetchPos), sequence.offset);
+ PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ }
+ return prefetchPos + sequence.matchLength;
+}
+
+/* This decoding function employs prefetching
+ * to reduce latency impact of cache misses.
+ * It's generally employed when block contains a significant portion of long-distance matches
+ * or when coupled with a "cold" dictionary */
FORCE_INLINE_TEMPLATE size_t
ZSTD_decompressSequencesLong_body(
ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = ostart + maxDstSize;
+ BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
- const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* litBufferEnd = dctx->litBufferEnd;
const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
- (void)frame;
/* Regen sequences */
if (nbSeq) {
-#define STORED_SEQS 4
+#define STORED_SEQS 8
#define STORED_SEQS_MASK (STORED_SEQS-1)
-#define ADVANCED_SEQS 4
+#define ADVANCED_SEQS STORED_SEQS
seq_t sequences[STORED_SEQS];
int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
seqState_t seqState;
int seqNb;
+ size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */
+
dctx->fseEntropy = 1;
{ int i; for (i=0; ientropy.rep[i]; }
- seqState.prefixStart = prefixStart;
- seqState.pos = (size_t)(op-prefixStart);
- seqState.dictEnd = dictEnd;
assert(dst != NULL);
assert(iend >= ip);
RETURN_ERROR_IF(
@@ -1679,37 +1771,95 @@
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
/* prepare in advance */
- for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNblitBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) {
+ /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ if (leftoverLit)
+ {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit;
+ op += leftoverLit;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
- assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
#endif
- if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
- PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
- sequences[seqNb & STORED_SEQS_MASK] = sequence;
- op += oneSeqSize;
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb & STORED_SEQS_MASK] = sequence;
+ op += oneSeqSize;
+ } }
+ else
+ {
+ /* lit buffer is either wholly contained in first or second split, or not split at all*/
+ size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
+ ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
+ ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb & STORED_SEQS_MASK] = sequence;
+ op += oneSeqSize;
+ }
}
- RETURN_ERROR_IF(seqNblitBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) {
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ if (leftoverLit) {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequence->litLength -= leftoverLit;
+ op += leftoverLit;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ { size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+ }
+ else
+ {
+ size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
+ ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
+ ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
- assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
#endif
- if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
- op += oneSeqSize;
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
}
/* save reps for next block */
@@ -1717,25 +1867,34 @@
}
/* last literal segment */
- { size_t const lastLLSize = litEnd - litPtr;
- RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */
+ size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
if (op != NULL) {
- ZSTD_memcpy(op, litPtr, lastLLSize);
+ ZSTD_memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ }
+ { size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memmove(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
- return op-ostart;
+ return (size_t)(op - ostart);
}
static size_t
ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
@@ -1744,53 +1903,65 @@
#if DYNAMIC_BMI2
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
-static TARGET_ATTRIBUTE("bmi2") size_t
+static BMI2_TARGET_ATTRIBUTE size_t
DONT_VECTORIZE
ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+static BMI2_TARGET_ATTRIBUTE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
-static TARGET_ATTRIBUTE("bmi2") size_t
+static BMI2_TARGET_ATTRIBUTE size_t
ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
#endif /* DYNAMIC_BMI2 */
-typedef size_t (*ZSTD_decompressSequences_t)(
- ZSTD_DCtx* dctx,
- void* dst, size_t maxDstSize,
- const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame);
-
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
static size_t
ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
DEBUGLOG(5, "ZSTD_decompressSequences");
#if DYNAMIC_BMI2
- if (dctx->bmi2) {
- return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+ return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif
- return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+static size_t
+ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer");
+#if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+ return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+#endif
+ return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
@@ -1805,69 +1976,114 @@
ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
DEBUGLOG(5, "ZSTD_decompressSequencesLong");
#if DYNAMIC_BMI2
- if (dctx->bmi2) {
- return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+ return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif
- return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+/*
+ * @returns The total size of the history referenceable by zstd, including
+ * both the prefix and the extDict. At @p op any offset larger than this
+ * is invalid.
+ */
+static size_t ZSTD_totalHistorySize(BYTE* op, BYTE const* virtualStart)
+{
+ return (size_t)(op - virtualStart);
+}
-#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
- !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
-/* ZSTD_getLongOffsetsShare() :
+typedef struct {
+ unsigned longOffsetShare;
+ unsigned maxNbAdditionalBits;
+} ZSTD_OffsetInfo;
+
+/* ZSTD_getOffsetInfo() :
* condition : offTable must be valid
* @return : "share" of long offsets (arbitrarily defined as > (1<<23))
- * compared to maximum possible of (1< 22) total += 1;
- }
+ assert(max <= (1 << OffFSELog)); /* max not too large */
+ for (u=0; u 22) info.longOffsetShare += 1;
+ }
- assert(tableLog <= OffFSELog);
- total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
+ assert(tableLog <= OffFSELog);
+ info.longOffsetShare <<= (OffFSELog - tableLog); /* scale to OffFSELog */
+ }
+
+ return info;
+}
- return total;
+/*
+ * @returns The maximum offset we can decode in one read of our bitstream, without
+ * reloading more bits in the middle of the offset bits read. Any offsets larger
+ * than this must use the long offset decoder.
+ */
+static size_t ZSTD_maxShortOffset(void)
+{
+ if (MEM_64bits()) {
+ /* We can decode any offset without reloading bits.
+ * This might change if the max window size grows.
+ */
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+ return (size_t)-1;
+ } else {
+ /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1.
+ * This offBase would require STREAM_ACCUMULATOR_MIN extra bits.
+ * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset.
+ */
+ size_t const maxOffbase = ((size_t)1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1;
+ size_t const maxOffset = maxOffbase - ZSTD_REP_NUM;
+ assert(ZSTD_highbit32((U32)maxOffbase) == STREAM_ACCUMULATOR_MIN);
+ return maxOffset;
+ }
}
-#endif
size_t
ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
- const void* src, size_t srcSize, const int frame)
+ const void* src, size_t srcSize, const streaming_operation streaming)
{ /* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
- /* isLongOffset must be true if there are long offsets.
- * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
- * We don't expect that to be the case in 64-bit mode.
- * In block mode, window size is not known, so we have to be conservative.
- * (note: but it could be evaluated from current-lowLimit)
- */
- ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
- DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
+ DEBUGLOG(5, "ZSTD_decompressBlock_internal (cSize : %u)", (unsigned)srcSize);
- RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
+ /* Note : the wording of the specification
+ * allows compressed block to be sized exactly ZSTD_blockSizeMax(dctx).
+ * This generally does not happen, as it makes little sense,
+ * since an uncompressed block would feature same size and have no decompression cost.
+ * Also, note that decoder from reference libzstd before < v1.5.4
+ * would consider this edge case as an error.
+ * As a consequence, avoid generating compressed blocks of size ZSTD_blockSizeMax(dctx)
+ * for broader compatibility with the deployed ecosystem of zstd decoders */
+ RETURN_ERROR_IF(srcSize > ZSTD_blockSizeMax(dctx), srcSize_wrong, "");
/* Decode literals section */
- { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
- DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
+ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming);
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : cSize=%u, nbLiterals=%zu", (U32)litCSize, dctx->litSize);
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
@@ -1875,6 +2091,23 @@
/* Build Decoding Tables */
{
+ /* Compute the maximum block size, which must also work when !frame and fParams are unset.
+ * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t.
+ */
+ size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx));
+ size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((BYTE*)dst, blockSizeMax), (BYTE const*)dctx->virtualStart);
+ /* isLongOffset must be true if there are long offsets.
+ * Offsets are long if they are larger than ZSTD_maxShortOffset().
+ * We don't expect that to be the case in 64-bit mode.
+ *
+ * We check here to see if our history is large enough to allow long offsets.
+ * If it isn't, then we can't possible have (valid) long offsets. If the offset
+ * is invalid, then it is okay to read it incorrectly.
+ *
+ * If isLongOffsets is true, then we will later check our decoding table to see
+ * if it is even possible to generate long offsets.
+ */
+ ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (totalHistorySize > ZSTD_maxShortOffset()));
/* These macros control at build-time which decompressor implementation
* we use. If neither is defined, we do some inspection and dispatch at
* runtime.
@@ -1882,6 +2115,11 @@
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
int usePrefetchDecoder = dctx->ddictIsCold;
+#else
+ /* Set to 1 to avoid computing offset info if we don't need to.
+ * Otherwise this value is ignored.
+ */
+ int usePrefetchDecoder = 1;
#endif
int nbSeq;
size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
@@ -1889,37 +2127,55 @@
ip += seqHSize;
srcSize -= seqHSize;
- RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF((dst == NULL || dstCapacity == 0) && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(MEM_64bits() && sizeof(size_t) == sizeof(void*) && (size_t)(-1) - (size_t)dst < (size_t)(1 << 20), dstSize_tooSmall,
+ "invalid dst");
-#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
- !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
- if ( !usePrefetchDecoder
- && (!frame || (dctx->fParams.windowSize > (1<<24)))
- && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
- U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
- U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
- usePrefetchDecoder = (shareLongOffsets >= minShare);
+ /* If we could potentially have long offsets, or we might want to use the prefetch decoder,
+ * compute information about the share of long offsets, and the maximum nbAdditionalBits.
+ * NOTE: could probably use a larger nbSeq limit
+ */
+ if (isLongOffset || (!usePrefetchDecoder && (totalHistorySize > (1u << 24)) && (nbSeq > 8))) {
+ ZSTD_OffsetInfo const info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq);
+ if (isLongOffset && info.maxNbAdditionalBits <= STREAM_ACCUMULATOR_MIN) {
+ /* If isLongOffset, but the maximum number of additional bits that we see in our table is small
+ * enough, then we know it is impossible to have too long an offset in this block, so we can
+ * use the regular offset decoder.
+ */
+ isLongOffset = ZSTD_lo_isRegularOffset;
+ }
+ if (!usePrefetchDecoder) {
+ U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
+ usePrefetchDecoder = (info.longOffsetShare >= minShare);
+ }
}
-#endif
dctx->ddictIsCold = 0;
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
- if (usePrefetchDecoder)
+ if (usePrefetchDecoder) {
+#else
+ (void)usePrefetchDecoder;
+ {
#endif
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
- return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
#endif
+ }
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
/* else */
- return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+ if (dctx->litBufferLocation == ZSTD_split)
+ return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+ else
+ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
#endif
}
}
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
{
if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */
@@ -1931,13 +2187,24 @@
}
-size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
+size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
{
size_t dSize;
+ dctx->isFrameDecompression = 0;
ZSTD_checkContinuity(dctx, dst, dstCapacity);
- dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
+ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, not_streaming);
+ FORWARD_IF_ERROR(dSize, "");
dctx->previousDstEnd = (char*)dst + dSize;
return dSize;
}
+
+
+/* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize);
+}
Index: lib/zstd/compress/fse_compress.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
--- a/lib/zstd/compress/fse_compress.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/fse_compress.c (date 1740124241354)
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* FSE : Finite State Entropy encoder
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -25,7 +26,8 @@
#include "../common/error_private.h"
#define ZSTD_DEPS_NEED_MALLOC
#define ZSTD_DEPS_NEED_MATH64
-#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
+#include "../common/zstd_deps.h" /* ZSTD_memset */
+#include "../common/bits.h" /* ZSTD_highbit32 */
/* **************************************************************
@@ -75,13 +77,14 @@
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
U32 const step = FSE_TABLESTEP(tableSize);
+ U32 const maxSV1 = maxSymbolValue+1;
- U32* cumul = (U32*)workSpace;
- FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
+ U16* cumul = (U16*)workSpace; /* size = maxSV1 */
+ FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */
U32 highThreshold = tableSize-1;
- if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
+ assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */
if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
/* CTable header */
tableU16[-2] = (U16) tableLog;
@@ -89,7 +92,7 @@
assert(tableLog < 16); /* required for threshold strategy to work */
/* For explanations on how to distribute symbol values over the table :
- * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+ * https://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
#ifdef __clang_analyzer__
ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
@@ -98,20 +101,61 @@
/* symbol start positions */
{ U32 u;
cumul[0] = 0;
- for (u=1; u <= maxSymbolValue+1; u++) {
+ for (u=1; u <= maxSV1; u++) {
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
cumul[u] = cumul[u-1] + 1;
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
} else {
- cumul[u] = cumul[u-1] + normalizedCounter[u-1];
+ assert(normalizedCounter[u-1] >= 0);
+ cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1];
+ assert(cumul[u] >= cumul[u-1]); /* no overflow */
} }
- cumul[maxSymbolValue+1] = tableSize+1;
+ cumul[maxSV1] = (U16)(tableSize+1);
}
/* Spread symbols */
- { U32 position = 0;
+ if (highThreshold == tableSize - 1) {
+ /* Case for no low prob count symbols. Lay down 8 bytes at a time
+ * to reduce branch misses since we are operating on a small block
+ */
+ BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */
+ { U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s=0);
+ pos += (size_t)n;
+ }
+ }
+ /* Spread symbols across the table. Lack of lowprob symbols means that
+ * we don't need variable sized inner loop, so we can unroll the loop and
+ * reduce branch misses.
+ */
+ { size_t position = 0;
+ size_t s;
+ size_t const unroll = 2; /* Experimentally determined optimal unroll */
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableSymbol[uPosition] = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0); /* Must have initialized all positions */
+ }
+ } else {
+ U32 position = 0;
U32 symbol;
- for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+ for (symbol=0; symbol highThreshold)
position = (position + step) & tableMask; /* Low proba area */
} }
-
assert(position==0); /* Must have initialized all positions */
}
@@ -144,16 +187,17 @@
case -1:
case 1:
symbolTT[s].deltaNbBits = (tableLog << 16) - (1< 1);
+ { U32 const maxBitsOut = tableLog - ZSTD_highbit32 ((U32)normalizedCounter[s]-1);
+ U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
- symbolTT[s].deltaFindState = total - normalizedCounter[s];
- total += normalizedCounter[s];
+ symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
+ total += (unsigned)normalizedCounter[s];
} } } }
#if 0 /* debug : symbol costs */
@@ -164,8 +208,7 @@
symbol, normalizedCounter[symbol],
FSE_getMaxNbBits(symbolTT, symbol),
(double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
- }
- }
+ } }
#endif
return 0;
@@ -173,16 +216,18 @@
-
#ifndef FSE_COMMONDEFS_ONLY
-
/*-**************************************************************
* FSE NCount encoding
****************************************************************/
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
{
- size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
+ size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
+ + 4 /* bitCount initialized at 4 */
+ + 2 /* first two symbols may use one additional bit each */) / 8)
+ + 1 /* round up to whole nb bytes */
+ + 2 /* additional two bytes for bitstream flush */;
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
}
@@ -211,7 +256,7 @@
/* Init */
remaining = tableSize+1; /* +1 for extra accuracy */
threshold = tableSize;
- nbBits = tableLog+1;
+ nbBits = (int)tableLog+1;
while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
if (previousIs0) {
@@ -230,7 +275,7 @@
}
while (symbol >= start+3) {
start+=3;
- bitStream += 3 << bitCount;
+ bitStream += 3U << bitCount;
bitCount += 2;
}
bitStream += (symbol-start) << bitCount;
@@ -250,7 +295,7 @@
count++; /* +1 for extra accuracy */
if (count>=threshold)
count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
- bitStream += count << bitCount;
+ bitStream += (U32)count << bitCount;
bitCount += nbBits;
bitCount -= (count>8);
out+= (bitCount+7) /8;
- return (out-ostart);
+ assert(out >= ostart);
+ return (size_t)(out-ostart);
}
@@ -299,21 +345,11 @@
* FSE Compression Code
****************************************************************/
-FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
-{
- size_t size;
- if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
- size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
- return (FSE_CTable*)ZSTD_malloc(size);
-}
-
-void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
-
/* provides the minimum logSize to safely represent a distribution */
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
{
- U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
- U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+ U32 minBitsSrc = ZSTD_highbit32((U32)(srcSize)) + 1;
+ U32 minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2;
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
assert(srcSize > 1); /* Not supported, RLE should be used instead */
return minBits;
@@ -321,7 +357,7 @@
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
{
- U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+ U32 maxBitsSrc = ZSTD_highbit32((U32)(srcSize - 1)) - minus;
U32 tableLog = maxTableLog;
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
assert(srcSize > 1); /* Not supported, RLE should be used instead */
@@ -489,40 +525,6 @@
return tableLog;
}
-
-/* fake FSE_CTable, for raw (uncompressed) input */
-size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
-{
- const unsigned tableSize = 1 << nbBits;
- const unsigned tableMask = tableSize - 1;
- const unsigned maxSymbolValue = tableMask;
- void* const ptr = ct;
- U16* const tableU16 = ( (U16*) ptr) + 2;
- void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
- FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
- unsigned s;
-
- /* Sanity checks */
- if (nbBits < 1) return ERROR(GENERIC); /* min size */
-
- /* header */
- tableU16[-2] = (U16) nbBits;
- tableU16[-1] = (U16) maxSymbolValue;
-
- /* Build table */
- for (s=0; sUTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
--- a/lib/zstd/compress/zstd_fast.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_fast.c (date 1740124241456)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,8 +12,46 @@
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
#include "zstd_fast.h"
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
+ U32 const mls = cParams->minMatch;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
+ * Feel free to remove this assert if there's a good reason! */
+ assert(dtlm == ZSTD_dtlm_full);
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ /* Always insert every fastHashFillStep position into the hash table.
+ * Insert the other positions if their hash entry is empty.
+ */
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }
+
+ if (dtlm == ZSTD_dtlm_fast) continue;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ { U32 p;
+ for (p = 1; p < fastHashFillStep; ++p) {
+ size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
+ if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
+ } } } }
+}
+
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm)
{
@@ -25,6 +64,10 @@
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
const U32 fastHashFillStep = 3;
+ /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.
+ * Feel free to remove this assert if there's a good reason! */
+ assert(dtlm == ZSTD_dtlm_fast);
+
/* Always insert every fastHashFillStep position into the hash table.
* Insert the other positions if their hash entry is empty.
*/
@@ -42,171 +85,403 @@
} } } }
}
+void ZSTD_fillHashTable(ZSTD_MatchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp)
+{
+ if (tfp == ZSTD_tfp_forCDict) {
+ ZSTD_fillHashTableForCDict(ms, end, dtlm);
+ } else {
+ ZSTD_fillHashTableForCCtx(ms, end, dtlm);
+ }
+}
+
+
+typedef int (*ZSTD_match4Found) (const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit);
+
+static int
+ZSTD_match4Found_cmov(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit)
+{
+ /* Array of ~random data, should have low probability of matching data.
+ * Load from here if the index is invalid.
+ * Used to avoid unpredictable branches. */
+ static const BYTE dummy[] = {0x12,0x34,0x56,0x78};
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_compressBlock_fast_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ /* currentIdx >= lowLimit is a (somewhat) unpredictable branch.
+ * However expression below compiles into conditional move.
+ */
+ const BYTE* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy);
+ /* Note: this used to be written as : return test1 && test2;
+ * Unfortunately, once inlined, these tests become branches,
+ * in which case it becomes critical that they are executed in the right order (test1 then test2).
+ * So we have to write these tests in a specific manner to ensure their ordering.
+ */
+ if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) return 0;
+ /* force ordering of these tests, which matters once the function is inlined, as they become branches */
+ __asm__("");
+ return matchIdx >= idxLowLimit;
+}
+
+static int
+ZSTD_match4Found_branch(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit)
+{
+ /* using a branch instead of a cmov,
+ * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true,
+ * aka almost all candidates are within range */
+ U32 mval;
+ if (matchIdx >= idxLowLimit) {
+ mval = MEM_read32(matchAddress);
+ } else {
+ mval = MEM_read32(currentPtr) ^ 1; /* guaranteed to not match. */
+ }
+
+ return (MEM_read32(currentPtr) == mval);
+}
+
+
+/*
+ * If you squint hard enough (and ignore repcodes), the search operation at any
+ * given position is broken into 4 stages:
+ *
+ * 1. Hash (map position to hash value via input read)
+ * 2. Lookup (map hash val to index via hashtable read)
+ * 3. Load (map index to value at that position via input read)
+ * 4. Compare
+ *
+ * Each of these steps involves a memory read at an address which is computed
+ * from the previous step. This means these steps must be sequenced and their
+ * latencies are cumulative.
+ *
+ * Rather than do 1->2->3->4 sequentially for a single position before moving
+ * onto the next, this implementation interleaves these operations across the
+ * next few positions:
+ *
+ * R = Repcode Read & Compare
+ * H = Hash
+ * T = Table Lookup
+ * M = Match Read & Compare
+ *
+ * Pos | Time -->
+ * ----+-------------------
+ * N | ... M
+ * N+1 | ... TM
+ * N+2 | R H T M
+ * N+3 | H TM
+ * N+4 | R H T M
+ * N+5 | H ...
+ * N+6 | R ...
+ *
+ * This is very much analogous to the pipelining of execution in a CPU. And just
+ * like a CPU, we have to dump the pipeline when we find a match (i.e., take a
+ * branch).
+ *
+ * When this happens, we throw away our current state, and do the following prep
+ * to re-enter the loop:
+ *
+ * Pos | Time -->
+ * ----+-------------------
+ * N | H T
+ * N+1 | H
+ *
+ * This is also the work we do at the beginning to enter the loop initially.
+ */
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_fast_noDict_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
- U32 const mls)
+ U32 const mls, int useCmov)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
- /* support stepSize of 0 */
- size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; /* min 2 */
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
- /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
- const BYTE* ip0 = istart;
- const BYTE* ip1;
- const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
- U32 offset_1=rep[0], offset_2=rep[1];
- U32 offsetSaved = 0;
+
+ const BYTE* anchor = istart;
+ const BYTE* ip0 = istart;
+ const BYTE* ip1;
+ const BYTE* ip2;
+ const BYTE* ip3;
+ U32 current0;
- /* init */
+ U32 rep_offset1 = rep[0];
+ U32 rep_offset2 = rep[1];
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
+
+ size_t hash0; /* hash for ip0 */
+ size_t hash1; /* hash for ip1 */
+ U32 matchIdx; /* match idx for ip0 */
+
+ U32 offcode;
+ const BYTE* match0;
+ size_t mLength;
+
+ /* ip0 and ip1 are always adjacent. The targetLength skipping and
+ * uncompressibility acceleration is applied to every other position,
+ * matching the behavior of #1562. step therefore represents the gap
+ * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
+ size_t step;
+ const BYTE* nextStep;
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
+ const ZSTD_match4Found matchFound = useCmov ? ZSTD_match4Found_cmov : ZSTD_match4Found_branch;
+
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
ip0 += (ip0 == prefixStart);
- ip1 = ip0 + 1;
{ U32 const curr = (U32)(ip0 - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
U32 const maxRep = curr - windowLow;
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
+ if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
}
- /* Main Search Loop */
-#ifdef __INTEL_COMPILER
- /* From intel 'The vector pragma indicates that the loop should be
- * vectorized if it is legal to do so'. Can be used together with
- * #pragma ivdep (but have opted to exclude that because intel
- * warns against using it).*/
- #pragma vector always
-#endif
- while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
- size_t mLength;
- BYTE const* ip2 = ip0 + 2;
- size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
- U32 const val0 = MEM_read32(ip0);
- size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
- U32 const val1 = MEM_read32(ip1);
- U32 const current0 = (U32)(ip0-base);
- U32 const current1 = (U32)(ip1-base);
- U32 const matchIndex0 = hashTable[h0];
- U32 const matchIndex1 = hashTable[h1];
- BYTE const* repMatch = ip2 - offset_1;
- const BYTE* match0 = base + matchIndex0;
- const BYTE* match1 = base + matchIndex1;
- U32 offcode;
+ /* start each op */
+_start: /* Requires: ip0 */
+
+ step = stepSize;
+ nextStep = ip0 + kStepIncr;
-#if defined(__aarch64__)
- PREFETCH_L1(ip0+256);
-#endif
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
+ ip1 = ip0 + 1;
+ ip2 = ip0 + step;
+ ip3 = ip2 + 1;
- hashTable[h0] = current0; /* update hash table */
- hashTable[h1] = current1; /* update hash table */
+ if (ip3 >= ilimit) {
+ goto _cleanup;
+ }
- assert(ip0 + 1 == ip1);
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
- if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
- mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
- ip0 = ip2 - mLength;
- match0 = repMatch - mLength;
+ matchIdx = hashTable[hash0];
+
+ do {
+ /* load repcode match for ip[2]*/
+ const U32 rval = MEM_read32(ip2 - rep_offset1);
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ /* check repcode at ip[2] */
+ if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
+ ip0 = ip2;
+ match0 = ip0 - rep_offset1;
+ mLength = ip0[-1] == match0[-1];
+ ip0 -= mLength;
+ match0 -= mLength;
+ offcode = REPCODE1_TO_OFFBASE;
mLength += 4;
- offcode = 0;
+
+ /* Write next hash table entry: it's already calculated.
+ * This write is known to be safe because ip1 is before the
+ * repcode (ip2). */
+ hashTable[hash1] = (U32)(ip1 - base);
+
goto _match;
}
- if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
- /* found a regular match */
+
+ if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) {
+ /* Write next hash table entry (it's already calculated).
+ * This write is known to be safe because the ip1 == ip0 + 1,
+ * so searching will resume after ip1 */
+ hashTable[hash1] = (U32)(ip1 - base);
+
goto _offset;
}
- if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
- /* found a regular match after one literal */
- ip0 = ip1;
- match0 = match1;
+
+ /* lookup ip[1] */
+ matchIdx = hashTable[hash1];
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip3;
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) {
+ /* Write next hash table entry, since it's already calculated */
+ if (step <= 4) {
+ /* Avoid writing an index if it's >= position where search will resume.
+ * The minimum possible match has length 4, so search can resume at ip0 + 4.
+ */
+ hashTable[hash1] = (U32)(ip1 - base);
+ }
goto _offset;
}
- { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
- assert(step >= 2);
- ip0 += step;
- ip1 += step;
- continue;
+
+ /* lookup ip[1] */
+ matchIdx = hashTable[hash1];
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip0 + step;
+ ip3 = ip1 + step;
+
+ /* calculate step */
+ if (ip2 >= nextStep) {
+ step++;
+ PREFETCH_L1(ip1 + 64);
+ PREFETCH_L1(ip1 + 128);
+ nextStep += kStepIncr;
}
-_offset: /* Requires: ip0, match0 */
- /* Compute the offset code */
- offset_2 = offset_1;
- offset_1 = (U32)(ip0-match0);
- offcode = offset_1 + ZSTD_REP_MOVE;
- mLength = 4;
- /* Count the backwards match length */
- while (((ip0>anchor) & (match0>prefixStart))
- && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
+ } while (ip3 < ilimit);
+
+_cleanup:
+ /* Note that there are probably still a couple positions one could search.
+ * However, it seems to be a meaningful performance hit to try to search
+ * them. So let's not. */
+
+ /* When the repcodes are outside of the prefix, we set them to zero before the loop.
+ * When the offsets are still zero, we need to restore them after the block to have a correct
+ * repcode history. If only one offset was invalid, it is easy. The tricky case is when both
+ * offsets were invalid. We need to figure out which offset to refill with.
+ * - If both offsets are zero they are in the same order.
+ * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
+ * - If only one is zero, we need to decide which offset to restore.
+ * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
+ * - It is impossible for rep_offset2 to be non-zero.
+ *
+ * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
+ * set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
+ */
+ offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
+
+ /* save reps for next block */
+ rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
+ rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+
+_offset: /* Requires: ip0, idx */
+
+ /* Compute the offset code. */
+ match0 = base + matchIdx;
+ rep_offset2 = rep_offset1;
+ rep_offset1 = (U32)(ip0-match0);
+ offcode = OFFSET_TO_OFFBASE(rep_offset1);
+ mLength = 4;
+
+ /* Count the backwards match length. */
+ while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
+ ip0--;
+ match0--;
+ mLength++;
+ }
_match: /* Requires: ip0, match0, offcode */
- /* Count the forward length */
- mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
- ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
- /* match found */
- ip0 += mLength;
- anchor = ip0;
+
+ /* Count the forward length. */
+ mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
+
+ ip0 += mLength;
+ anchor = ip0;
- if (ip0 <= ilimit) {
- /* Fill Table */
- assert(base+current0+2 > istart); /* check base overflow */
- hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
- hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+ /* Fill table and check for immediate repcode. */
+ if (ip0 <= ilimit) {
+ /* Fill Table */
+ assert(base+current0+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
- if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
- while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
- { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
- hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
- ip0 += rLength;
- ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
- anchor = ip0;
- continue; /* faster when present (confirmed on gcc-8) ... (?) */
- } } }
- ip1 = ip0 + 1;
- }
+ if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
+ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
+ { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+ ip0 += rLength;
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
+ anchor = ip0;
+ continue; /* faster when present (confirmed on gcc-8) ... (?) */
+ } } }
+
+ goto _start;
+}
- /* save reps for next block */
- rep[0] = offset_1 ? offset_1 : offsetSaved;
- rep[1] = offset_2 ? offset_2 : offsetSaved;
+#define ZSTD_GEN_FAST_FN(dictMode, mml, cmov) \
+ static size_t ZSTD_compressBlock_fast_##dictMode##_##mml##_##cmov( \
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+ void const* src, size_t srcSize) \
+ { \
+ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mml, cmov); \
+ }
- /* Return the last literals size */
- return (size_t)(iend - anchor);
-}
+ZSTD_GEN_FAST_FN(noDict, 4, 1)
+ZSTD_GEN_FAST_FN(noDict, 5, 1)
+ZSTD_GEN_FAST_FN(noDict, 6, 1)
+ZSTD_GEN_FAST_FN(noDict, 7, 1)
+ZSTD_GEN_FAST_FN(noDict, 4, 0)
+ZSTD_GEN_FAST_FN(noDict, 5, 0)
+ZSTD_GEN_FAST_FN(noDict, 6, 0)
+ZSTD_GEN_FAST_FN(noDict, 7, 0)
size_t ZSTD_compressBlock_fast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- U32 const mls = ms->cParams.minMatch;
+ U32 const mml = ms->cParams.minMatch;
+ /* use cmov when "candidate in range" branch is likely unpredictable */
+ int const useCmov = ms->cParams.windowLog < 19;
assert(ms->dictMatchState == NULL);
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
- case 5 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
- case 6 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
- case 7 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
+ if (useCmov) {
+ switch(mml)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
+ }
+ } else {
+ /* use a branch instead */
+ switch(mml)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
+ }
}
}
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize, U32 const mls)
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
@@ -215,16 +490,16 @@
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
+ const BYTE* ip0 = istart;
+ const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */
const BYTE* anchor = istart;
const U32 prefixStartIndex = ms->window.dictLimit;
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
- U32 offsetSaved = 0;
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
const U32* const dictHashTable = dms->hashTable;
const U32 dictStartIndex = dms->window.dictLimit;
@@ -232,127 +507,183 @@
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dms->window.nextSrc;
const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
- const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
- const U32 dictHLog = dictCParams->hashLog;
+ const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);
+ const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
/* if a dictionary is still attached, it necessarily means that
* it is within window size. So we just check it. */
const U32 maxDistance = 1U << cParams->windowLog;
- const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
assert(endIndex - prefixStartIndex <= maxDistance);
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
+
/* ensure there will be no underflow
* when translating a dict index into a local index */
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
+ if (ms->prefetchCDictTables) {
+ size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
+ PREFETCH_AREA(dictHashTable, hashTableBytes);
+ }
+
/* init */
DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
- ip += (dictAndPrefixLength == 0);
+ ip0 += (dictAndPrefixLength == 0);
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ /* Outer search loop */
+ assert(stepSize >= 1);
+ while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */
size_t mLength;
- size_t const h = ZSTD_hashPtr(ip, hlog, mls);
- U32 const curr = (U32)(ip-base);
- U32 const matchIndex = hashTable[h];
- const BYTE* match = base + matchIndex;
- const U32 repIndex = curr + 1 - offset_1;
- const BYTE* repMatch = (repIndex < prefixStartIndex) ?
- dictBase + (repIndex - dictIndexDelta) :
- base + repIndex;
- hashTable[h] = curr; /* update hash table */
+ size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
+
+ size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
+ U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
+ int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
+
+ U32 matchIndex = hashTable[hash0];
+ U32 curr = (U32)(ip0 - base);
+ size_t step = stepSize;
+ const size_t kStepIncr = 1 << kSearchStrength;
+ const BYTE* nextStep = ip0 + kStepIncr;
+
+ /* Inner search loop */
+ while (1) {
+ const BYTE* match = base + matchIndex;
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
+ size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
+ hashTable[hash0] = curr; /* update hash table */
- if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
- ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
- } else if ( (matchIndex <= prefixStartIndex) ) {
- size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
- U32 const dictMatchIndex = dictHashTable[dictHash];
- const BYTE* dictMatch = dictBase + dictMatchIndex;
- if (dictMatchIndex <= dictStartIndex ||
- MEM_read32(dictMatch) != MEM_read32(ip)) {
- assert(stepSize >= 1);
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
- continue;
- } else {
- /* found a dict match */
- U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
- mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
- while (((ip>anchor) & (dictMatch>dictStart))
- && (ip[-1] == dictMatch[-1])) {
- ip--; dictMatch--; mLength++;
- } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
+ ip0++;
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
+ break;
+ }
+
+ if (dictTagsMatch) {
+ /* Found a possible dict match */
+ const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
+ if (dictMatchIndex > dictStartIndex &&
+ MEM_read32(dictMatch) == MEM_read32(ip0)) {
+ /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
+ if (matchIndex <= prefixStartIndex) {
+ U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
+ mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
+ while (((ip0 > anchor) & (dictMatch > dictStart))
+ && (ip0[-1] == dictMatch[-1])) {
+ ip0--;
+ dictMatch--;
+ mLength++;
+ } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
+ break;
+ }
+ }
}
- } else if (MEM_read32(match) != MEM_read32(ip)) {
- /* it's not a match, and we're not going to check the dictionary */
- assert(stepSize >= 1);
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
- continue;
- } else {
- /* found a regular match */
- U32 const offset = (U32)(ip-match);
- mLength = ZSTD_count(ip+4, match+4, iend) + 4;
- while (((ip>anchor) & (match>prefixStart))
- && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
- }
+
+ if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex)) {
+ /* found a regular match of size >= 4 */
+ U32 const offset = (U32) (ip0 - match);
+ mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
+ while (((ip0 > anchor) & (match > prefixStart))
+ && (ip0[-1] == match[-1])) {
+ ip0--;
+ match--;
+ mLength++;
+ } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
+ break;
+ }
+
+ /* Prepare for next iteration */
+ dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
+ dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
+ matchIndex = hashTable[hash1];
+
+ if (ip1 >= nextStep) {
+ step++;
+ nextStep += kStepIncr;
+ }
+ ip0 = ip1;
+ ip1 = ip1 + step;
+ if (ip1 > ilimit) goto _cleanup;
+
+ curr = (U32)(ip0 - base);
+ hash0 = hash1;
+ } /* end inner search loop */
/* match found */
- ip += mLength;
- anchor = ip;
+ assert(mLength);
+ ip0 += mLength;
+ anchor = ip0;
- if (ip <= ilimit) {
+ if (ip0 <= ilimit) {
/* Fill Table */
assert(base+curr+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
/* check immediate repcode */
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
+ while (ip0 <= ilimit) {
+ U32 const current2 = (U32)(ip0-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
dictBase - dictIndexDelta + repIndex2 :
base + repIndex2;
- if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ if ( (ZSTD_index_overlap_check(prefixStartIndex, repIndex2))
+ && (MEM_read32(repMatch2) == MEM_read32(ip0))) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
- ip += repLength2;
- anchor = ip;
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
+ ip0 += repLength2;
+ anchor = ip0;
continue;
}
break;
}
}
+
+ /* Prepare for next iteration */
+ assert(ip0 == anchor);
+ ip1 = ip0 + stepSize;
}
+_cleanup:
/* save reps for next block */
- rep[0] = offset_1 ? offset_1 : offsetSaved;
- rep[1] = offset_2 ? offset_2 : offsetSaved;
+ rep[0] = offset_1;
+ rep[1] = offset_2;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
+
+ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
+ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
+ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
+ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
+
size_t ZSTD_compressBlock_fast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
@@ -361,30 +692,31 @@
{
default: /* includes case 3 */
case 4 :
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
+ return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
case 5 :
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
+ return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
case 6 :
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
+ return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
case 7 :
- return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
+ return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
}
}
-static size_t ZSTD_compressBlock_fast_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize, U32 const mls)
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_fast_extDict_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
/* support stepSize of 0 */
- U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
@@ -397,100 +729,256 @@
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
+
+ const BYTE* ip0 = istart;
+ const BYTE* ip1;
+ const BYTE* ip2;
+ const BYTE* ip3;
+ U32 current0;
+
+
+ size_t hash0; /* hash for ip0 */
+ size_t hash1; /* hash for ip1 */
+ U32 idx; /* match idx for ip0 */
+ const BYTE* idxBase; /* base pointer for idx */
+
+ U32 offcode;
+ const BYTE* match0;
+ size_t mLength;
+ const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
+
+ size_t step;
+ const BYTE* nextStep;
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
+
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
if (prefixStartIndex == dictStartIndex)
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
+ return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
+
+ { U32 const curr = (U32)(ip0 - base);
+ U32 const maxRep = curr - dictStartIndex;
+ if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
+ if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
+ }
+
+ /* start each op */
+_start: /* Requires: ip0 */
+
+ step = stepSize;
+ nextStep = ip0 + kStepIncr;
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t h = ZSTD_hashPtr(ip, hlog, mls);
- const U32 matchIndex = hashTable[h];
- const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
- const BYTE* match = matchBase + matchIndex;
- const U32 curr = (U32)(ip-base);
- const U32 repIndex = curr + 1 - offset_1;
- const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
- hashTable[h] = curr; /* update hash table */
- DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
- assert(offset_1 <= curr +1); /* check repIndex */
-
- if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
- size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
- ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
- ip += rLength;
- anchor = ip;
- } else {
- if ( (matchIndex < dictStartIndex) ||
- (MEM_read32(match) != MEM_read32(ip)) ) {
- assert(stepSize >= 1);
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
- continue;
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
+ ip1 = ip0 + 1;
+ ip2 = ip0 + step;
+ ip3 = ip2 + 1;
+
+ if (ip3 >= ilimit) {
+ goto _cleanup;
+ }
+
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
+
+ idx = hashTable[hash0];
+ idxBase = idx < prefixStartIndex ? dictBase : base;
+
+ do {
+ { /* load repcode match for ip[2] */
+ U32 const current2 = (U32)(ip2 - base);
+ U32 const repIndex = current2 - offset_1;
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ U32 rval;
+ if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
+ & (offset_1 > 0) ) {
+ rval = MEM_read32(repBase + repIndex);
+ } else {
+ rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
}
- { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
- const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
- U32 const offset = curr - matchIndex;
- size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
- while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- offset_2 = offset_1; offset_1 = offset; /* update offset history */
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
- ip += mLength;
- anchor = ip;
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ /* check repcode at ip[2] */
+ if (MEM_read32(ip2) == rval) {
+ ip0 = ip2;
+ match0 = repBase + repIndex;
+ matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ assert((match0 != prefixStart) & (match0 != dictStart));
+ mLength = ip0[-1] == match0[-1];
+ ip0 -= mLength;
+ match0 -= mLength;
+ offcode = REPCODE1_TO_OFFBASE;
+ mLength += 4;
+ goto _match;
+ } }
+
+ { /* load match for ip[0] */
+ U32 const mval = idx >= dictStartIndex ?
+ MEM_read32(idxBase + idx) :
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
+
+ /* check match at ip[0] */
+ if (MEM_read32(ip0) == mval) {
+ /* found a match! */
+ goto _offset;
+ } }
+
+ /* lookup ip[1] */
+ idx = hashTable[hash1];
+ idxBase = idx < prefixStartIndex ? dictBase : base;
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip3;
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ { /* load match for ip[0] */
+ U32 const mval = idx >= dictStartIndex ?
+ MEM_read32(idxBase + idx) :
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
+
+ /* check match at ip[0] */
+ if (MEM_read32(ip0) == mval) {
+ /* found a match! */
+ goto _offset;
} }
- if (ip <= ilimit) {
- /* Fill Table */
- hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
- U32 const repIndex2 = current2 - offset_2;
- const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
- const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
- { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- } } }
+ /* lookup ip[1] */
+ idx = hashTable[hash1];
+ idxBase = idx < prefixStartIndex ? dictBase : base;
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip0 + step;
+ ip3 = ip1 + step;
+
+ /* calculate step */
+ if (ip2 >= nextStep) {
+ step++;
+ PREFETCH_L1(ip1 + 64);
+ PREFETCH_L1(ip1 + 128);
+ nextStep += kStepIncr;
+ }
+ } while (ip3 < ilimit);
+
+_cleanup:
+ /* Note that there are probably still a couple positions we could search.
+ * However, it seems to be a meaningful performance hit to try to search
+ * them. So let's not. */
+
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+
+_offset: /* Requires: ip0, idx, idxBase */
+
+ /* Compute the offset code. */
+ { U32 const offset = current0 - idx;
+ const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
+ matchEnd = idx < prefixStartIndex ? dictEnd : iend;
+ match0 = idxBase + idx;
+ offset_2 = offset_1;
+ offset_1 = offset;
+ offcode = OFFSET_TO_OFFBASE(offset);
+ mLength = 4;
+
+ /* Count the backwards match length. */
+ while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
+ ip0--;
+ match0--;
+ mLength++;
+ } }
+
+_match: /* Requires: ip0, match0, offcode, matchEnd */
+
+ /* Count the forward length. */
+ assert(matchEnd != 0);
+ mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
+
+ ip0 += mLength;
+ anchor = ip0;
+
+ /* write next hash table entry */
+ if (ip1 < ip0) {
+ hashTable[hash1] = (U32)(ip1 - base);
+ }
+
+ /* Fill table and check for immediate repcode. */
+ if (ip0 <= ilimit) {
+ /* Fill Table */
+ assert(base+current0+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+
+ while (ip0 <= ilimit) {
+ U32 const repIndex2 = (U32)(ip0-base) - offset_2;
+ const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) & (offset_2 > 0))
+ && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+ ip0 += repLength2;
+ anchor = ip0;
+ continue;
+ }
+ break;
+ } }
- /* save reps for next block */
- rep[0] = offset_1;
- rep[1] = offset_2;
-
- /* Return the last literals size */
- return (size_t)(iend - anchor);
+ goto _start;
}
+ZSTD_GEN_FAST_FN(extDict, 4, 0)
+ZSTD_GEN_FAST_FN(extDict, 5, 0)
+ZSTD_GEN_FAST_FN(extDict, 6, 0)
+ZSTD_GEN_FAST_FN(extDict, 7, 0)
size_t ZSTD_compressBlock_fast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
+ assert(ms->dictMatchState == NULL);
switch(mls)
{
default: /* includes case 3 */
case 4 :
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
+ return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
case 5 :
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
+ return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
case 6 :
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
+ return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
case 7 :
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
+ return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
}
}
Index: lib/zstd/compress/zstd_compress_literals.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_compress_literals.h b/lib/zstd/compress/zstd_compress_literals.h
--- a/lib/zstd/compress/zstd_compress_literals.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_compress_literals.h (date 1740124241415)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,14 +17,24 @@
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+/* ZSTD_compressRleLiteralsBlock() :
+ * Conditions :
+ * - All bytes in @src are identical
+ * - dstCapacity >= 4 */
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
- ZSTD_hufCTables_t* nextHuf,
- ZSTD_strategy strategy, int disableLiteralCompression,
- void* dst, size_t dstCapacity,
+/* ZSTD_compressLiterals():
+ * @entropyWorkspace: must be aligned on 4-bytes boundaries
+ * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE
+ * @suspectUncompressible: sampling checks, to potentially skip huffman coding
+ */
+size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
void* entropyWorkspace, size_t entropyWorkspaceSize,
- const int bmi2);
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy, int disableLiteralCompression,
+ int suspectUncompressible,
+ int bmi2);
#endif /* ZSTD_COMPRESS_LITERALS_H */
Index: lib/zstd/compress/zstd_opt.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
--- a/lib/zstd/compress/zstd_opt.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_opt.h (date 1740124333228)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,40 +12,62 @@
#ifndef ZSTD_OPT_H
#define ZSTD_OPT_H
-
#include "zstd_compress_internal.h"
+#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
/* used in ZSTD_loadDictionaryContent() */
-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
+void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend);
+#endif
+#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btopt(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
+#define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt
+#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE ZSTD_compressBlock_btopt_dictMatchState
+#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT ZSTD_compressBlock_btopt_extDict
+#else
+#define ZSTD_COMPRESSBLOCK_BTOPT NULL
+#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT NULL
+#endif
-size_t ZSTD_compressBlock_btopt_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btultra_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize);
-
-size_t ZSTD_compressBlock_btopt_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btultra_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
* and is only specific for the first block (no prefix) */
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+#define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra
+#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE ZSTD_compressBlock_btultra_dictMatchState
+#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT ZSTD_compressBlock_btultra_extDict
+#define ZSTD_COMPRESSBLOCK_BTULTRA2 ZSTD_compressBlock_btultra2
+#else
+#define ZSTD_COMPRESSBLOCK_BTULTRA NULL
+#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT NULL
+#define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL
+#endif
#endif /* ZSTD_OPT_H */
Index: lib/zstd/compress/zstd_fast.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h
--- a/lib/zstd/compress/zstd_fast.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_fast.h (date 1740124241461)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,21 +12,20 @@
#ifndef ZSTD_FAST_H
#define ZSTD_FAST_H
-
#include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h"
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
- void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+void ZSTD_fillHashTable(ZSTD_MatchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp);
size_t ZSTD_compressBlock_fast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_fast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_fast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
#endif /* ZSTD_FAST_H */
Index: lib/zstd/compress/zstd_compress_superblock.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
--- a/lib/zstd/compress/zstd_compress_superblock.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_compress_superblock.h (date 1740124241435)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
Index: lib/zstd/decompress/zstd_ddict.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/decompress/zstd_ddict.h b/lib/zstd/decompress/zstd_ddict.h
--- a/lib/zstd/decompress/zstd_ddict.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/decompress/zstd_ddict.h (date 1740124333260)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
Index: lib/zstd/decompress/zstd_decompress_internal.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
--- a/lib/zstd/decompress/zstd_decompress_internal.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/decompress/zstd_decompress_internal.h (date 1740124333288)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -20,7 +21,7 @@
* Dependencies
*********************************************************/
#include "../common/mem.h" /* BYTE, U16, U32 */
-#include "../common/zstd_internal.h" /* ZSTD_seqSymbol */
+#include "../common/zstd_internal.h" /* constants : MaxLL, MaxML, MaxOff, LLFSELog, etc. */
@@ -40,7 +41,7 @@
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
-static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = {
+static UNUSED_ATTR const U8 OF_bits[MaxOff+1] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23,
@@ -75,12 +76,13 @@
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
+#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12
typedef struct {
ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
- HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
+ HUF_DTable hufTable[HUF_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; /* can accommodate HUF_decompress4X */
U32 rep[ZSTD_REP_NUM];
U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
} ZSTD_entropyDTables_t;
@@ -106,6 +108,22 @@
size_t ddictPtrCount;
} ZSTD_DDictHashSet;
+#ifndef ZSTD_DECODER_INTERNAL_BUFFER
+# define ZSTD_DECODER_INTERNAL_BUFFER (1 << 16)
+#endif
+
+#define ZSTD_LBMIN 64
+#define ZSTD_LBMAX (128 << 10)
+
+/* extra buffer, compensates when dst is not large enough to store litBuffer */
+#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX)
+
+typedef enum {
+ ZSTD_not_in_dst = 0, /* Stored entirely within litExtraBuffer */
+ ZSTD_in_dst = 1, /* Stored entirely within dst (in memory after current output write) */
+ ZSTD_split = 2 /* Split between litExtraBuffer and dst */
+} ZSTD_litLocation_e;
+
struct ZSTD_DCtx_s
{
const ZSTD_seqSymbol* LLTptr;
@@ -119,7 +137,7 @@
const void* virtualStart; /* virtual start of previous segment if it was just before current one */
const void* dictEnd; /* end of previous segment */
size_t expected;
- ZSTD_frameHeader fParams;
+ ZSTD_FrameHeader fParams;
U64 processedCSize;
U64 decodedSize;
blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
@@ -136,7 +154,10 @@
size_t litSize;
size_t rleSize;
size_t staticSize;
+ int isFrameDecompression;
+#if DYNAMIC_BMI2
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+#endif
/* dictionary */
ZSTD_DDict* ddictLocal;
@@ -146,6 +167,8 @@
ZSTD_dictUses_e dictUses;
ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */
ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
+ int disableHufAsm;
+ int maxBlockSizeParam;
/* streaming */
ZSTD_dStreamStage streamStage;
@@ -158,16 +181,16 @@
size_t outStart;
size_t outEnd;
size_t lhSize;
- void* legacyContext;
- U32 previousLegacyVersion;
- U32 legacyVersion;
U32 hostageByte;
int noForwardProgress;
ZSTD_bufferMode_e outBufferMode;
ZSTD_outBuffer expectedOutBuffer;
/* workspace */
- BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
+ BYTE* litBuffer;
+ const BYTE* litBufferEnd;
+ ZSTD_litLocation_e litBufferLocation;
+ BYTE litExtraBuffer[ZSTD_LITBUFFEREXTRASIZE + WILDCOPY_OVERLENGTH]; /* literal buffer can be split between storage within dst and within this scratch buffer */
BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
size_t oversizedDuration;
@@ -180,6 +203,14 @@
/* Tracing */
}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
+MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) {
+#if DYNAMIC_BMI2
+ return dctx->bmi2;
+#else
+ (void)dctx;
+ return 0;
+#endif
+}
/*-*******************************************************
* Shared internal functions
Index: lib/zstd/decompress/zstd_decompress_block.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
--- a/lib/zstd/decompress/zstd_decompress_block.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/decompress/zstd_decompress_block.h (date 1740124333283)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -33,6 +34,12 @@
*/
+ /* Streaming state is used to inform allocation of the literal buffer */
+typedef enum {
+ not_streaming = 0,
+ is_streaming = 1
+} streaming_operation;
+
/* ZSTD_decompressBlock_internal() :
* decompress block, starting at `src`,
* into destination buffer `dst`.
@@ -41,7 +48,7 @@
*/
size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
- const void* src, size_t srcSize, const int frame);
+ const void* src, size_t srcSize, const streaming_operation streaming);
/* ZSTD_buildFSETable() :
* generate FSE decoding table for one symbol (ll, ml or off)
@@ -54,9 +61,14 @@
*/
void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
- const U32* baseValue, const U32* nbAdditionalBits,
+ const U32* baseValue, const U8* nbAdditionalBits,
unsigned tableLog, void* wksp, size_t wkspSize,
int bmi2);
+/* Internal definition of ZSTD_decompressBlock() to avoid deprecation warnings. */
+size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
#endif /* ZSTD_DEC_BLOCK_H */
Index: lib/zstd/decompress/huf_decompress.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
--- a/lib/zstd/decompress/huf_decompress.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/decompress/huf_decompress.c (date 1740124333250)
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* huff0 huffman decoder,
* part of Finite State Entropy library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -19,14 +20,27 @@
#include "../common/compiler.h"
#include "../common/bitstream.h" /* BIT_* */
#include "../common/fse.h" /* to compress headers */
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/error_private.h"
+#include "../common/zstd_internal.h"
+#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_countTrailingZeros64 */
+
+/* **************************************************************
+* Constants
+****************************************************************/
+
+#define HUF_DECODER_FAST_TABLELOG 11
/* **************************************************************
* Macros
****************************************************************/
+#ifdef HUF_DISABLE_FAST_DECODE
+# define HUF_ENABLE_FAST_DECODE 0
+#else
+# define HUF_ENABLE_FAST_DECODE 1
+#endif
+
/* These two optional macros force the use one way or another of the two
* Huffman decompression implementations. You can't force in both directions
* at the same time.
@@ -36,6 +50,24 @@
#error "Cannot force the use of the X1 and X2 decoders at the same time!"
#endif
+/* When DYNAMIC_BMI2 is enabled, fast decoders are only called when bmi2 is
+ * supported at runtime, so we can add the BMI2 target attribute.
+ * When it is disabled, we will still get BMI2 if it is enabled statically.
+ */
+#if DYNAMIC_BMI2
+# define HUF_FAST_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE
+#else
+# define HUF_FAST_BMI2_ATTRS
+#endif
+
+#define HUF_EXTERN_C
+#define HUF_ASM_DECL HUF_EXTERN_C
+
+#if DYNAMIC_BMI2
+# define HUF_NEED_BMI2_FUNCTION 1
+#else
+# define HUF_NEED_BMI2_FUNCTION 0
+#endif
/* **************************************************************
* Error Management
@@ -53,6 +85,11 @@
/* **************************************************************
* BMI2 Variant Wrappers
****************************************************************/
+typedef size_t (*HUF_DecompressUsingDTableFn)(void *dst, size_t dstSize,
+ const void *cSrc,
+ size_t cSrcSize,
+ const HUF_DTable *DTable);
+
#if DYNAMIC_BMI2
#define HUF_DGEN(fn) \
@@ -65,7 +102,7 @@
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
} \
\
- static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \
+ static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \
void* dst, size_t dstSize, \
const void* cSrc, size_t cSrcSize, \
const HUF_DTable* DTable) \
@@ -74,9 +111,9 @@
} \
\
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
- size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ size_t cSrcSize, HUF_DTable const* DTable, int flags) \
{ \
- if (bmi2) { \
+ if (flags & HUF_flags_bmi2) { \
return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
} \
return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
@@ -86,9 +123,9 @@
#define HUF_DGEN(fn) \
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
- size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ size_t cSrcSize, HUF_DTable const* DTable, int flags) \
{ \
- (void)bmi2; \
+ (void)flags; \
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
}
@@ -107,13 +144,186 @@
return dtd;
}
+static size_t HUF_initFastDStream(BYTE const* ip) {
+ BYTE const lastByte = ip[7];
+ size_t const bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0;
+ size_t const value = MEM_readLEST(ip) | 1;
+ assert(bitsConsumed <= 8);
+ assert(sizeof(size_t) == 8);
+ return value << bitsConsumed;
+}
+
+
+/*
+ * The input/output arguments to the Huffman fast decoding loop:
+ *
+ * ip [in/out] - The input pointers, must be updated to reflect what is consumed.
+ * op [in/out] - The output pointers, must be updated to reflect what is written.
+ * bits [in/out] - The bitstream containers, must be updated to reflect the current state.
+ * dt [in] - The decoding table.
+ * ilowest [in] - The beginning of the valid range of the input. Decoders may read
+ * down to this pointer. It may be below iend[0].
+ * oend [in] - The end of the output stream. op[3] must not cross oend.
+ * iend [in] - The end of each input stream. ip[i] may cross iend[i],
+ * as long as it is above ilowest, but that indicates corruption.
+ */
+typedef struct {
+ BYTE const* ip[4];
+ BYTE* op[4];
+ U64 bits[4];
+ void const* dt;
+ BYTE const* ilowest;
+ BYTE* oend;
+ BYTE const* iend[4];
+} HUF_DecompressFastArgs;
+
+typedef void (*HUF_DecompressFastLoopFn)(HUF_DecompressFastArgs*);
+
+/*
+ * Initializes args for the fast decoding loop.
+ * @returns 1 on success
+ * 0 if the fallback implementation should be used.
+ * Or an error code on failure.
+ */
+static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable)
+{
+ void const* dt = DTable + 1;
+ U32 const dtLog = HUF_getDTableDesc(DTable).tableLog;
+
+ const BYTE* const istart = (const BYTE*)src;
+
+ BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
+
+ /* The fast decoding loop assumes 64-bit little-endian.
+ * This condition is false on x32.
+ */
+ if (!MEM_isLittleEndian() || MEM_32bits())
+ return 0;
+
+ /* Avoid nullptr addition */
+ if (dstSize == 0)
+ return 0;
+ assert(dst != NULL);
+
+ /* strict minimum : jump table + 1 byte per stream */
+ if (srcSize < 10)
+ return ERROR(corruption_detected);
+
+ /* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers.
+ * If table log is not correct at this point, fallback to the old decoder.
+ * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder.
+ */
+ if (dtLog != HUF_DECODER_FAST_TABLELOG)
+ return 0;
+
+ /* Read the jump table. */
+ {
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = srcSize - (length1 + length2 + length3 + 6);
+ args->iend[0] = istart + 6; /* jumpTable */
+ args->iend[1] = args->iend[0] + length1;
+ args->iend[2] = args->iend[1] + length2;
+ args->iend[3] = args->iend[2] + length3;
+
+ /* HUF_initFastDStream() requires this, and this small of an input
+ * won't benefit from the ASM loop anyways.
+ */
+ if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8)
+ return 0;
+ if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */
+ }
+ /* ip[] contains the position that is currently loaded into bits[]. */
+ args->ip[0] = args->iend[1] - sizeof(U64);
+ args->ip[1] = args->iend[2] - sizeof(U64);
+ args->ip[2] = args->iend[3] - sizeof(U64);
+ args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64);
+
+ /* op[] contains the output pointers. */
+ args->op[0] = (BYTE*)dst;
+ args->op[1] = args->op[0] + (dstSize+3)/4;
+ args->op[2] = args->op[1] + (dstSize+3)/4;
+ args->op[3] = args->op[2] + (dstSize+3)/4;
+
+ /* No point to call the ASM loop for tiny outputs. */
+ if (args->op[3] >= oend)
+ return 0;
+
+ /* bits[] is the bit container.
+ * It is read from the MSB down to the LSB.
+ * It is shifted left as it is read, and zeros are
+ * shifted in. After the lowest valid bit a 1 is
+ * set, so that CountTrailingZeros(bits[]) can be used
+ * to count how many bits we've consumed.
+ */
+ args->bits[0] = HUF_initFastDStream(args->ip[0]);
+ args->bits[1] = HUF_initFastDStream(args->ip[1]);
+ args->bits[2] = HUF_initFastDStream(args->ip[2]);
+ args->bits[3] = HUF_initFastDStream(args->ip[3]);
+
+ /* The decoders must be sure to never read beyond ilowest.
+ * This is lower than iend[0], but allowing decoders to read
+ * down to ilowest can allow an extra iteration or two in the
+ * fast loop.
+ */
+ args->ilowest = istart;
+
+ args->oend = oend;
+ args->dt = dt;
+
+ return 1;
+}
+
+static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs const* args, int stream, BYTE* segmentEnd)
+{
+ /* Validate that we haven't overwritten. */
+ if (args->op[stream] > segmentEnd)
+ return ERROR(corruption_detected);
+ /* Validate that we haven't read beyond iend[].
+ * Note that ip[] may be < iend[] because the MSB is
+ * the next bit to read, and we may have consumed 100%
+ * of the stream, so down to iend[i] - 8 is valid.
+ */
+ if (args->ip[stream] < args->iend[stream] - 8)
+ return ERROR(corruption_detected);
+
+ /* Construct the BIT_DStream_t. */
+ assert(sizeof(size_t) == 8);
+ bit->bitContainer = MEM_readLEST(args->ip[stream]);
+ bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]);
+ bit->start = (const char*)args->ilowest;
+ bit->limitPtr = bit->start + sizeof(size_t);
+ bit->ptr = (const char*)args->ip[stream];
+
+ return 0;
+}
+
+/* Calls X(N) for each stream 0, 1, 2, 3. */
+#define HUF_4X_FOR_EACH_STREAM(X) \
+ do { \
+ X(0); \
+ X(1); \
+ X(2); \
+ X(3); \
+ } while (0)
+
+/* Calls X(N, var) for each stream 0, 1, 2, 3. */
+#define HUF_4X_FOR_EACH_STREAM_WITH_VAR(X, var) \
+ do { \
+ X(0, (var)); \
+ X(1, (var)); \
+ X(2, (var)); \
+ X(3, (var)); \
+ } while (0)
+
#ifndef HUF_FORCE_DECOMPRESS_X2
/*-***************************/
/* single-symbol decoding */
/*-***************************/
-typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */
+typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */
/*
* Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
@@ -122,14 +332,45 @@
static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
U64 D4;
if (MEM_isLittleEndian()) {
- D4 = symbol + (nbBits << 8);
+ D4 = (U64)((symbol << 8) + nbBits);
} else {
- D4 = (symbol << 8) + nbBits;
+ D4 = (U64)(symbol + (nbBits << 8));
}
+ assert(D4 < (1U << 16));
D4 *= 0x0001000100010001ULL;
return D4;
}
+/*
+ * Increase the tableLog to targetTableLog and rescales the stats.
+ * If tableLog > targetTableLog this is a no-op.
+ * @returns New tableLog
+ */
+static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog)
+{
+ if (tableLog > targetTableLog)
+ return tableLog;
+ if (tableLog < targetTableLog) {
+ U32 const scale = targetTableLog - tableLog;
+ U32 s;
+ /* Increase the weight for all non-zero probability symbols by scale. */
+ for (s = 0; s < nbSymbols; ++s) {
+ huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale);
+ }
+ /* Update rankVal to reflect the new weights.
+ * All weights except 0 get moved to weight + scale.
+ * Weights [1, scale] are empty.
+ */
+ for (s = targetTableLog; s > scale; --s) {
+ rankVal[s] = rankVal[s - scale];
+ }
+ for (s = scale; s > 0; --s) {
+ rankVal[s] = 0;
+ }
+ }
+ return targetTableLog;
+}
+
typedef struct {
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
@@ -138,13 +379,7 @@
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
} HUF_ReadDTableX1_Workspace;
-
-size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
-{
- return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
-}
-
-size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags)
{
U32 tableLog = 0;
U32 nbSymbols = 0;
@@ -159,11 +394,15 @@
DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
/* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
- iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
+ iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), flags);
if (HUF_isError(iSize)) return iSize;
+
/* Table header */
{ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ U32 const maxTableLog = dtd.maxTableLog + 1;
+ U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG);
+ tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog);
if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
dtd.tableType = 0;
dtd.tableLog = (BYTE)tableLog;
@@ -182,9 +421,8 @@
* rankStart[0] is not filled because there are no entries in the table for
* weight 0.
*/
- {
- int n;
- int nextRankStart = 0;
+ { int n;
+ U32 nextRankStart = 0;
int const unroll = 4;
int const nLimit = (int)nbSymbols - unroll + 1;
for (n=0; n<(int)tableLog+1; n++) {
@@ -207,14 +445,13 @@
/* fill DTable
* We fill all entries of each weight in order.
- * That way length is a constant for each iteration of the outter loop.
+ * That way length is a constant for each iteration of the outer loop.
* We can switch based on the length to a different inner loop which is
* optimized for that particular case.
*/
- {
- U32 w;
- int symbol=wksp->rankVal[0];
- int rankStart=0;
+ { U32 w;
+ int symbol = wksp->rankVal[0];
+ int rankStart = 0;
for (w=1; wrankVal[w];
int const length = (1 << w) >> 1;
@@ -288,15 +525,19 @@
}
#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
- *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
+ do { *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog); } while (0)
-#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
- if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
- HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
+ do { \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
+ } while (0)
-#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
- if (MEM_64bits()) \
- HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
+ do { \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
+ } while (0)
HINT_INLINE size_t
HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
@@ -304,11 +545,15 @@
BYTE* const pStart = p;
/* up to 4 symbols at a time */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
- HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
- HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+ if ((pEnd - p) > 3) {
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+ }
+ } else {
+ BIT_reloadDStream(bitDPtr);
}
/* [0-3] symbols remaining */
@@ -320,7 +565,7 @@
while (p < pEnd)
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
- return pEnd-pStart;
+ return (size_t)(pEnd-pStart);
}
FORCE_INLINE_TEMPLATE size_t
@@ -330,7 +575,7 @@
const HUF_DTable* DTable)
{
BYTE* op = (BYTE*)dst;
- BYTE* const oend = op + dstSize;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd(op, dstSize);
const void* dtPtr = DTable + 1;
const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
BIT_DStream_t bitD;
@@ -346,6 +591,10 @@
return dstSize;
}
+/* HUF_decompress4X1_usingDTable_internal_body():
+ * Conditions :
+ * @dstSize >= 6
+ */
FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X1_usingDTable_internal_body(
void* dst, size_t dstSize,
@@ -354,6 +603,7 @@
{
/* Check */
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
@@ -388,33 +638,37 @@
U32 endSignal = 1;
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ assert(dstSize >= 6); /* validated above */
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
/* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
- for ( ; (endSignal) & (op4 < olimit) ; ) {
- HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
- HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
- HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
- HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
- HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
- HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
- HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
- HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
- endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
- endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
- endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
- endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
+ if ((size_t)(oend - op4) >= sizeof(size_t)) {
+ for ( ; (endSignal) & (op4 < olimit) ; ) {
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
+ }
}
/* check corruption */
@@ -440,73 +694,249 @@
}
}
+#if HUF_NEED_BMI2_FUNCTION
+static BMI2_TARGET_ATTRIBUTE
+size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
-typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
- const void *cSrc,
- size_t cSrcSize,
- const HUF_DTable *DTable);
+static
+size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
-HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
-HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
+#if ZSTD_ENABLE_ASM_X86_64_BMI2
+HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN;
+#endif
-size_t HUF_decompress1X1_usingDTable(
- void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
+static HUF_FAST_BMI2_ATTRS
+void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args)
{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 0) return ERROR(GENERIC);
- return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
+ U64 bits[4];
+ BYTE const* ip[4];
+ BYTE* op[4];
+ U16 const* const dtable = (U16 const*)args->dt;
+ BYTE* const oend = args->oend;
+ BYTE const* const ilowest = args->ilowest;
+
+ /* Copy the arguments to local variables */
+ ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
+ ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip));
+ ZSTD_memcpy(&op, &args->op, sizeof(op));
+
+ assert(MEM_isLittleEndian());
+ assert(!MEM_32bits());
+
+ for (;;) {
+ BYTE* olimit;
+ int stream;
+
+ /* Assert loop preconditions */
+#ifndef NDEBUG
+ for (stream = 0; stream < 4; ++stream) {
+ assert(op[stream] <= (stream == 3 ? oend : op[stream + 1]));
+ assert(ip[stream] >= ilowest);
+ }
+#endif
+ /* Compute olimit */
+ {
+ /* Each iteration produces 5 output symbols per stream */
+ size_t const oiters = (size_t)(oend - op[3]) / 5;
+ /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes
+ * per stream.
+ */
+ size_t const iiters = (size_t)(ip[0] - ilowest) / 7;
+ /* We can safely run iters iterations before running bounds checks */
+ size_t const iters = MIN(oiters, iiters);
+ size_t const symbols = iters * 5;
-size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
- const BYTE* ip = (const BYTE*) cSrc;
+ /* We can simply check that op[3] < olimit, instead of checking all
+ * of our bounds, since we can't hit the other bounds until we've run
+ * iters iterations, which only happens when op[3] == olimit.
+ */
+ olimit = op[3] + symbols;
+
+ /* Exit fast decoding loop once we reach the end. */
+ if (op[3] == olimit)
+ break;
+
+ /* Exit the decoding loop if any input pointer has crossed the
+ * previous one. This indicates corruption, and a precondition
+ * to our loop is that ip[i] >= ip[0].
+ */
+ for (stream = 1; stream < 4; ++stream) {
+ if (ip[stream] < ip[stream - 1])
+ goto _out;
+ }
+ }
+
+#ifndef NDEBUG
+ for (stream = 1; stream < 4; ++stream) {
+ assert(ip[stream] >= ip[stream - 1]);
+ }
+#endif
+
+#define HUF_4X1_DECODE_SYMBOL(_stream, _symbol) \
+ do { \
+ int const index = (int)(bits[(_stream)] >> 53); \
+ int const entry = (int)dtable[index]; \
+ bits[(_stream)] <<= (entry & 0x3F); \
+ op[(_stream)][(_symbol)] = (BYTE)((entry >> 8) & 0xFF); \
+ } while (0)
+
+#define HUF_4X1_RELOAD_STREAM(_stream) \
+ do { \
+ int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
+ int const nbBits = ctz & 7; \
+ int const nbBytes = ctz >> 3; \
+ op[(_stream)] += 5; \
+ ip[(_stream)] -= nbBytes; \
+ bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
+ bits[(_stream)] <<= nbBits; \
+ } while (0)
+
+ /* Manually unroll the loop because compilers don't consistently
+ * unroll the inner loops, which destroys performance.
+ */
+ do {
+ /* Decode 5 symbols in each of the 4 streams */
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 1);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 2);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 3);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 4);
- size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
- if (HUF_isError(hSize)) return hSize;
- if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
- ip += hSize; cSrcSize -= hSize;
+ /* Reload each of the 4 the bitstreams */
+ HUF_4X_FOR_EACH_STREAM(HUF_4X1_RELOAD_STREAM);
+ } while (op[3] < olimit);
- return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
-}
+#undef HUF_4X1_DECODE_SYMBOL
+#undef HUF_4X1_RELOAD_STREAM
+ }
+_out:
+
+ /* Save the final values of each of the state variables back to args. */
+ ZSTD_memcpy(&args->bits, &bits, sizeof(bits));
+ ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip));
+ ZSTD_memcpy(&args->op, &op, sizeof(op));
+}
-size_t HUF_decompress4X1_usingDTable(
+/*
+ * @returns @p dstSize on success (>= 6)
+ * 0 if the fallback implementation should be used
+ * An error if an error occurred
+ */
+static HUF_FAST_BMI2_ATTRS
+size_t
+HUF_decompress4X1_usingDTable_internal_fast(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
+ const HUF_DTable* DTable,
+ HUF_DecompressFastLoopFn loopFn)
{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 0) return ERROR(GENERIC);
- return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
+ void const* dt = DTable + 1;
+ BYTE const* const ilowest = (BYTE const*)cSrc;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
+ HUF_DecompressFastArgs args;
+ { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
+ FORWARD_IF_ERROR(ret, "Failed to init fast loop args");
+ if (ret == 0)
+ return 0;
+ }
+
+ assert(args.ip[0] >= args.ilowest);
+ loopFn(&args);
+
+ /* Our loop guarantees that ip[] >= ilowest and that we haven't
+ * overwritten any op[].
+ */
+ assert(args.ip[0] >= ilowest);
+ assert(args.ip[0] >= ilowest);
+ assert(args.ip[1] >= ilowest);
+ assert(args.ip[2] >= ilowest);
+ assert(args.ip[3] >= ilowest);
+ assert(args.op[3] <= oend);
+
+ assert(ilowest == args.ilowest);
+ assert(ilowest + 6 == args.iend[0]);
+ (void)ilowest;
+
+ /* finish bit streams one by one. */
+ { size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* segmentEnd = (BYTE*)dst;
+ int i;
+ for (i = 0; i < 4; ++i) {
+ BIT_DStream_t bit;
+ if (segmentSize <= (size_t)(oend - segmentEnd))
+ segmentEnd += segmentSize;
+ else
+ segmentEnd = oend;
+ FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
+ /* Decompress and validate that we've produced exactly the expected length. */
+ args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG);
+ if (args.op[i] != segmentEnd) return ERROR(corruption_detected);
+ }
+ }
-static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ /* decoded size */
+ assert(dstSize != 0);
+ return dstSize;
+}
+
+HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
+
+static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable, int flags)
+{
+ HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X1_usingDTable_internal_default;
+ HUF_DecompressFastLoopFn loopFn = HUF_decompress4X1_usingDTable_internal_fast_c_loop;
+
+#if DYNAMIC_BMI2
+ if (flags & HUF_flags_bmi2) {
+ fallbackFn = HUF_decompress4X1_usingDTable_internal_bmi2;
+# if ZSTD_ENABLE_ASM_X86_64_BMI2
+ if (!(flags & HUF_flags_disableAsm)) {
+ loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop;
+ }
+# endif
+ } else {
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
+ }
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
+ if (!(flags & HUF_flags_disableAsm)) {
+ loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop;
+ }
+#endif
+
+ if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) {
+ size_t const ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn);
+ if (ret != 0)
+ return ret;
+ }
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+static size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize, int bmi2)
+ void* workSpace, size_t wkspSize, int flags)
{
const BYTE* ip = (const BYTE*) cSrc;
- size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
- return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
}
-
-size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
- return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
-}
-
#endif /* HUF_FORCE_DECOMPRESS_X2 */
@@ -518,106 +948,226 @@
/* *************************/
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+typedef struct { BYTE symbol; } sortedSymbol_t;
typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
+/*
+ * Constructs a HUF_DEltX2 in a U32.
+ */
+static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level)
+{
+ U32 seq;
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0);
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2);
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3);
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32));
+ if (MEM_isLittleEndian()) {
+ seq = level == 1 ? symbol : (baseSeq + (symbol << 8));
+ return seq + (nbBits << 16) + ((U32)level << 24);
+ } else {
+ seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol);
+ return (seq << 16) + (nbBits << 8) + (U32)level;
+ }
+}
+
+/*
+ * Constructs a HUF_DEltX2.
+ */
+static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level)
+{
+ HUF_DEltX2 DElt;
+ U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
+ DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val));
+ ZSTD_memcpy(&DElt, &val, sizeof(val));
+ return DElt;
+}
+
+/*
+ * Constructs 2 HUF_DEltX2s and packs them into a U64.
+ */
+static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level)
+{
+ U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
+ return (U64)DElt + ((U64)DElt << 32);
+}
+
+/*
+ * Fills the DTable rank with all the symbols from [begin, end) that are each
+ * nbBits long.
+ *
+ * @param DTableRank The start of the rank in the DTable.
+ * @param begin The first symbol to fill (inclusive).
+ * @param end The last symbol to fill (exclusive).
+ * @param nbBits Each symbol is nbBits long.
+ * @param tableLog The table log.
+ * @param baseSeq If level == 1 { 0 } else { the first level symbol }
+ * @param level The level in the table. Must be 1 or 2.
+ */
+static void HUF_fillDTableX2ForWeight(
+ HUF_DEltX2* DTableRank,
+ sortedSymbol_t const* begin, sortedSymbol_t const* end,
+ U32 nbBits, U32 tableLog,
+ U16 baseSeq, int const level)
+{
+ U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */);
+ const sortedSymbol_t* ptr;
+ assert(level >= 1 && level <= 2);
+ switch (length) {
+ case 1:
+ for (ptr = begin; ptr != end; ++ptr) {
+ HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
+ *DTableRank++ = DElt;
+ }
+ break;
+ case 2:
+ for (ptr = begin; ptr != end; ++ptr) {
+ HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
+ DTableRank[0] = DElt;
+ DTableRank[1] = DElt;
+ DTableRank += 2;
+ }
+ break;
+ case 4:
+ for (ptr = begin; ptr != end; ++ptr) {
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
+ DTableRank += 4;
+ }
+ break;
+ case 8:
+ for (ptr = begin; ptr != end; ++ptr) {
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
+ DTableRank += 8;
+ }
+ break;
+ default:
+ for (ptr = begin; ptr != end; ++ptr) {
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
+ HUF_DEltX2* const DTableRankEnd = DTableRank + length;
+ for (; DTableRank != DTableRankEnd; DTableRank += 8) {
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
+ }
+ }
+ break;
+ }
+}
/* HUF_fillDTableX2Level2() :
* `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
-static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
- const U32* rankValOrigin, const int minWeight,
- const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
- U32 nbBitsBaseline, U16 baseSeq, U32* wksp, size_t wkspSize)
+static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits,
+ const U32* rankVal, const int minWeight, const int maxWeight1,
+ const sortedSymbol_t* sortedSymbols, U32 const* rankStart,
+ U32 nbBitsBaseline, U16 baseSeq)
{
- HUF_DEltX2 DElt;
- U32* rankVal = wksp;
-
- assert(wkspSize >= HUF_TABLELOG_MAX + 1);
- (void)wkspSize;
- /* get pre-calculated rankVal */
- ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
-
- /* fill skipped values */
+ /* Fill skipped values (all positions up to rankVal[minWeight]).
+ * These are positions only get a single symbol because the combined weight
+ * is too large.
+ */
if (minWeight>1) {
- U32 i, skipSize = rankVal[minWeight];
- MEM_writeLE16(&(DElt.sequence), baseSeq);
- DElt.nbBits = (BYTE)(consumed);
- DElt.length = 1;
- for (i = 0; i < skipSize; i++)
- DTable[i] = DElt;
- }
-
- /* fill DTable */
- { U32 s; for (s=0; s= 1 */
-
- rankVal[weight] += length;
- } }
-}
+ U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */);
+ U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1);
+ int const skipSize = rankVal[minWeight];
+ assert(length > 1);
+ assert((U32)skipSize < length);
+ switch (length) {
+ case 2:
+ assert(skipSize == 1);
+ ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2));
+ break;
+ case 4:
+ assert(skipSize <= 4);
+ ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2));
+ break;
+ default:
+ {
+ int i;
+ for (i = 0; i < skipSize; i += 8) {
+ ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2));
+ }
+ }
+ }
+ }
+ /* Fill each of the second level symbols by weight. */
+ {
+ int w;
+ for (w = minWeight; w < maxWeight1; ++w) {
+ int const begin = rankStart[w];
+ int const end = rankStart[w+1];
+ U32 const nbBits = nbBitsBaseline - w;
+ U32 const totalBits = nbBits + consumedBits;
+ HUF_fillDTableX2ForWeight(
+ DTable + rankVal[w],
+ sortedSymbols + begin, sortedSymbols + end,
+ totalBits, targetLog,
+ baseSeq, /* level */ 2);
+ }
+ }
+}
static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
- const sortedSymbol_t* sortedList, const U32 sortedListSize,
- const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
- const U32 nbBitsBaseline, U32* wksp, size_t wkspSize)
+ const sortedSymbol_t* sortedList,
+ const U32* rankStart, rankValCol_t* rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
{
- U32* rankVal = wksp;
+ U32* const rankVal = rankValOrigin[0];
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
- U32 s;
-
- assert(wkspSize >= HUF_TABLELOG_MAX + 1);
- wksp += HUF_TABLELOG_MAX + 1;
- wkspSize -= HUF_TABLELOG_MAX + 1;
+ int w;
+ int const wEnd = (int)maxWeight + 1;
- ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
+ /* Fill DTable in order of weight. */
+ for (w = 1; w < wEnd; ++w) {
+ int const begin = (int)rankStart[w];
+ int const end = (int)rankStart[w+1];
+ U32 const nbBits = nbBitsBaseline - w;
- /* fill DTable */
- for (s=0; s= minBits) { /* enough room for a second symbol */
- U32 sortedRank;
+ if (targetLog-nbBits >= minBits) {
+ /* Enough room for a second symbol. */
+ int start = rankVal[w];
+ U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */);
int minWeight = nbBits + scaleLog;
+ int s;
if (minWeight < 1) minWeight = 1;
- sortedRank = rankStart[minWeight];
- HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
- rankValOrigin[nbBits], minWeight,
- sortedList+sortedRank, sortedListSize-sortedRank,
- nbBitsBaseline, symbol, wksp, wkspSize);
+ /* Fill the DTable for every symbol of weight w.
+ * These symbols get at least 1 second symbol.
+ */
+ for (s = begin; s != end; ++s) {
+ HUF_fillDTableX2Level2(
+ DTable + start, targetLog, nbBits,
+ rankValOrigin[nbBits], minWeight, wEnd,
+ sortedList, rankStart,
+ nbBitsBaseline, sortedList[s].symbol);
+ start += length;
+ }
} else {
- HUF_DEltX2 DElt;
- MEM_writeLE16(&(DElt.sequence), symbol);
- DElt.nbBits = (BYTE)(nbBits);
- DElt.length = 1;
- { U32 const end = start + length;
- U32 u;
- for (u = start; u < end; u++) DTable[u] = DElt;
- } }
- rankVal[weight] += length;
+ /* Only a single symbol. */
+ HUF_fillDTableX2ForWeight(
+ DTable + rankVal[w],
+ sortedList + begin, sortedList + end,
+ nbBits, targetLog,
+ /* baseSeq */ 0, /* level */ 1);
+ }
}
}
typedef struct {
rankValCol_t rankVal[HUF_TABLELOG_MAX];
U32 rankStats[HUF_TABLELOG_MAX + 1];
- U32 rankStart0[HUF_TABLELOG_MAX + 2];
+ U32 rankStart0[HUF_TABLELOG_MAX + 3];
sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
@@ -625,11 +1175,11 @@
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
const void* src, size_t srcSize,
- void* workSpace, size_t wkspSize)
+ void* workSpace, size_t wkspSize, int flags)
{
- U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ U32 tableLog, maxW, nbSymbols;
DTableDesc dtd = HUF_getDTableDesc(DTable);
- U32 const maxTableLog = dtd.maxTableLog;
+ U32 maxTableLog = dtd.maxTableLog;
size_t iSize;
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
@@ -647,11 +1197,12 @@
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
/* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
- iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0);
+ iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), flags);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+ if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG;
/* find maxWeight */
for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
@@ -664,7 +1215,7 @@
rankStart[w] = curr;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
- sizeOfSort = nextRankStart;
+ rankStart[maxW+1] = nextRankStart;
}
/* sort symbols by weight */
@@ -673,7 +1224,6 @@
U32 const w = wksp->weightList[s];
U32 const r = rankStart[w]++;
wksp->sortedSymbol[r].symbol = (BYTE)s;
- wksp->sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
@@ -698,10 +1248,9 @@
} } } }
HUF_fillDTableX2(dt, maxTableLog,
- wksp->sortedSymbol, sizeOfSort,
+ wksp->sortedSymbol,
wksp->rankStart0, wksp->rankVal, maxW,
- tableLog+1,
- wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32));
+ tableLog+1);
dtd.tableLog = (BYTE)maxTableLog;
dtd.tableType = 1;
@@ -714,7 +1263,7 @@
HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
- ZSTD_memcpy(op, dt+val, 2);
+ ZSTD_memcpy(op, &dt[val].sequence, 2);
BIT_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
@@ -723,28 +1272,34 @@
HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
- ZSTD_memcpy(op, dt+val, 1);
- if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
- else {
+ ZSTD_memcpy(op, &dt[val].sequence, 1);
+ if (dt[val].length==1) {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ } else {
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
BIT_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
/* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
- } }
+ }
+ }
return 1;
}
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
- ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+ do { ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); } while (0)
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
- if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
- ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ do { \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
+ } while (0)
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
- if (MEM_64bits()) \
- ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ do { \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
+ } while (0)
HINT_INLINE size_t
HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
@@ -753,19 +1308,37 @@
BYTE* const pStart = p;
/* up to 8 symbols at a time */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
- HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
- HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) {
+ if (dtLog <= 11 && MEM_64bits()) {
+ /* up to 10 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) {
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+ } else {
+ /* up to 8 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+ }
+ } else {
+ BIT_reloadDStream(bitDPtr);
}
/* closer to end : up to 2 symbols at a time */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
- HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ if ((size_t)(pEnd - p) >= 2) {
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
- while (p <= pEnd-2)
- HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+ while (p <= pEnd-2)
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+ }
if (p < pEnd)
p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
@@ -786,7 +1359,7 @@
/* decode */
{ BYTE* const ostart = (BYTE*) dst;
- BYTE* const oend = ostart + dstSize;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, dstSize);
const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
@@ -800,6 +1373,10 @@
return dstSize;
}
+/* HUF_decompress4X2_usingDTable_internal_body():
+ * Conditions:
+ * @dstSize >= 6
+ */
FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X2_usingDTable_internal_body(
void* dst, size_t dstSize,
@@ -807,6 +1384,7 @@
const HUF_DTable* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
@@ -840,58 +1418,62 @@
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
- if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ assert(dstSize >= 6 /* validated above */);
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
/* 16-32 symbols per loop (4-8 symbols per stream) */
- for ( ; (endSignal) & (op4 < olimit); ) {
+ if ((size_t)(oend - op4) >= sizeof(size_t)) {
+ for ( ; (endSignal) & (op4 < olimit); ) {
#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
- HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
- endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
- endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
- HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
- endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
- endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
#else
- HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
- endSignal = (U32)LIKELY((U32)
- (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
- & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
- & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
- & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal = (U32)LIKELY((U32)
+ (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
#endif
+ }
}
/* check corruption */
@@ -915,67 +1497,286 @@
}
}
-HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
-HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
+#if HUF_NEED_BMI2_FUNCTION
+static BMI2_TARGET_ATTRIBUTE
+size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
+
+static
+size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2
+
+HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN;
+
+#endif
+
+static HUF_FAST_BMI2_ATTRS
+void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args)
+{
+ U64 bits[4];
+ BYTE const* ip[4];
+ BYTE* op[4];
+ BYTE* oend[4];
+ HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt;
+ BYTE const* const ilowest = args->ilowest;
+
+ /* Copy the arguments to local registers. */
+ ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
+ ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip));
+ ZSTD_memcpy(&op, &args->op, sizeof(op));
+
+ oend[0] = op[1];
+ oend[1] = op[2];
+ oend[2] = op[3];
+ oend[3] = args->oend;
+
+ assert(MEM_isLittleEndian());
+ assert(!MEM_32bits());
+
+ for (;;) {
+ BYTE* olimit;
+ int stream;
+
+ /* Assert loop preconditions */
+#ifndef NDEBUG
+ for (stream = 0; stream < 4; ++stream) {
+ assert(op[stream] <= oend[stream]);
+ assert(ip[stream] >= ilowest);
+ }
+#endif
+ /* Compute olimit */
+ {
+ /* Each loop does 5 table lookups for each of the 4 streams.
+ * Each table lookup consumes up to 11 bits of input, and produces
+ * up to 2 bytes of output.
+ */
+ /* We can consume up to 7 bytes of input per iteration per stream.
+ * We also know that each input pointer is >= ip[0]. So we can run
+ * iters loops before running out of input.
+ */
+ size_t iters = (size_t)(ip[0] - ilowest) / 7;
+ /* Each iteration can produce up to 10 bytes of output per stream.
+ * Each output stream my advance at different rates. So take the
+ * minimum number of safe iterations among all the output streams.
+ */
+ for (stream = 0; stream < 4; ++stream) {
+ size_t const oiters = (size_t)(oend[stream] - op[stream]) / 10;
+ iters = MIN(iters, oiters);
+ }
-size_t HUF_decompress1X2_usingDTable(
+ /* Each iteration produces at least 5 output symbols. So until
+ * op[3] crosses olimit, we know we haven't executed iters
+ * iterations yet. This saves us maintaining an iters counter,
+ * at the expense of computing the remaining # of iterations
+ * more frequently.
+ */
+ olimit = op[3] + (iters * 5);
+
+ /* Exit the fast decoding loop once we reach the end. */
+ if (op[3] == olimit)
+ break;
+
+ /* Exit the decoding loop if any input pointer has crossed the
+ * previous one. This indicates corruption, and a precondition
+ * to our loop is that ip[i] >= ip[0].
+ */
+ for (stream = 1; stream < 4; ++stream) {
+ if (ip[stream] < ip[stream - 1])
+ goto _out;
+ }
+ }
+
+#ifndef NDEBUG
+ for (stream = 1; stream < 4; ++stream) {
+ assert(ip[stream] >= ip[stream - 1]);
+ }
+#endif
+
+#define HUF_4X2_DECODE_SYMBOL(_stream, _decode3) \
+ do { \
+ if ((_decode3) || (_stream) != 3) { \
+ int const index = (int)(bits[(_stream)] >> 53); \
+ HUF_DEltX2 const entry = dtable[index]; \
+ MEM_write16(op[(_stream)], entry.sequence); \
+ bits[(_stream)] <<= (entry.nbBits) & 0x3F; \
+ op[(_stream)] += (entry.length); \
+ } \
+ } while (0)
+
+#define HUF_4X2_RELOAD_STREAM(_stream) \
+ do { \
+ HUF_4X2_DECODE_SYMBOL(3, 1); \
+ { \
+ int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
+ int const nbBits = ctz & 7; \
+ int const nbBytes = ctz >> 3; \
+ ip[(_stream)] -= nbBytes; \
+ bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
+ bits[(_stream)] <<= nbBits; \
+ } \
+ } while (0)
+
+ /* Manually unroll the loop because compilers don't consistently
+ * unroll the inner loops, which destroys performance.
+ */
+ do {
+ /* Decode 5 symbols from each of the first 3 streams.
+ * The final stream will be decoded during the reload phase
+ * to reduce register pressure.
+ */
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+
+ /* Decode one symbol from the final stream */
+ HUF_4X2_DECODE_SYMBOL(3, 1);
+
+ /* Decode 4 symbols from the final stream & reload bitstreams.
+ * The final stream is reloaded last, meaning that all 5 symbols
+ * are decoded from the final stream before it is reloaded.
+ */
+ HUF_4X_FOR_EACH_STREAM(HUF_4X2_RELOAD_STREAM);
+ } while (op[3] < olimit);
+ }
+
+#undef HUF_4X2_DECODE_SYMBOL
+#undef HUF_4X2_RELOAD_STREAM
+
+_out:
+
+ /* Save the final values of each of the state variables back to args. */
+ ZSTD_memcpy(&args->bits, &bits, sizeof(bits));
+ ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip));
+ ZSTD_memcpy(&args->op, &op, sizeof(op));
+}
+
+
+static HUF_FAST_BMI2_ATTRS size_t
+HUF_decompress4X2_usingDTable_internal_fast(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
+ const HUF_DTable* DTable,
+ HUF_DecompressFastLoopFn loopFn) {
+ void const* dt = DTable + 1;
+ const BYTE* const ilowest = (const BYTE*)cSrc;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
+ HUF_DecompressFastArgs args;
+ {
+ size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
+ FORWARD_IF_ERROR(ret, "Failed to init asm args");
+ if (ret == 0)
+ return 0;
+ }
+
+ assert(args.ip[0] >= args.ilowest);
+ loopFn(&args);
+
+ /* note : op4 already verified within main loop */
+ assert(args.ip[0] >= ilowest);
+ assert(args.ip[1] >= ilowest);
+ assert(args.ip[2] >= ilowest);
+ assert(args.ip[3] >= ilowest);
+ assert(args.op[3] <= oend);
+
+ assert(ilowest == args.ilowest);
+ assert(ilowest + 6 == args.iend[0]);
+ (void)ilowest;
+
+ /* finish bitStreams one by one */
+ {
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* segmentEnd = (BYTE*)dst;
+ int i;
+ for (i = 0; i < 4; ++i) {
+ BIT_DStream_t bit;
+ if (segmentSize <= (size_t)(oend - segmentEnd))
+ segmentEnd += segmentSize;
+ else
+ segmentEnd = oend;
+ FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
+ args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG);
+ if (args.op[i] != segmentEnd)
+ return ERROR(corruption_detected);
+ }
+ }
+
+ /* decoded size */
+ return dstSize;
+}
+
+static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable, int flags)
{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 1) return ERROR(GENERIC);
- return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+ HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X2_usingDTable_internal_default;
+ HUF_DecompressFastLoopFn loopFn = HUF_decompress4X2_usingDTable_internal_fast_c_loop;
+
+#if DYNAMIC_BMI2
+ if (flags & HUF_flags_bmi2) {
+ fallbackFn = HUF_decompress4X2_usingDTable_internal_bmi2;
+# if ZSTD_ENABLE_ASM_X86_64_BMI2
+ if (!(flags & HUF_flags_disableAsm)) {
+ loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop;
+ }
+# endif
+ } else {
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
+ }
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
+ if (!(flags & HUF_flags_disableAsm)) {
+ loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop;
+ }
+#endif
+
+ if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) {
+ size_t const ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn);
+ if (ret != 0)
+ return ret;
+ }
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
}
+
+HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
+ void* workSpace, size_t wkspSize, int flags)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
- workSpace, wkspSize);
+ workSpace, wkspSize, flags);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
- return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags);
}
-
-size_t HUF_decompress4X2_usingDTable(
- void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 1) return ERROR(GENERIC);
- return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
-static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+static size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize, int bmi2)
+ void* workSpace, size_t wkspSize, int flags)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
- workSpace, wkspSize);
+ workSpace, wkspSize, flags);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
- return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
}
-
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
- return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
-}
-
#endif /* HUF_FORCE_DECOMPRESS_X1 */
@@ -984,66 +1785,28 @@
/* Universal decompression selectors */
/* ***********************************/
-size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
- (void)dtd;
- assert(dtd.tableType == 0);
- return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
- (void)dtd;
- assert(dtd.tableType == 1);
- return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#else
- return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
- HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#endif
-}
-
-size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
- (void)dtd;
- assert(dtd.tableType == 0);
- return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
- (void)dtd;
- assert(dtd.tableType == 1);
- return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#else
- return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
- HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#endif
-}
-
#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] =
{
/* single, double, quad */
- {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
- {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
- {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
- {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
- {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
- {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
- {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
- {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
- {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
- {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
- {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
- {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
- {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
- {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
- {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
- {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+ {{0,0}, {1,1}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}}, /* Q==1 : impossible */
+ {{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */
+ {{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */
+ {{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */
+ {{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */
+ {{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */
+ {{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */
+ {{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */
+ {{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */
+ {{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */
+ {{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */
+ {{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */
+ {{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */
+ {{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */
+ {{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */
};
#endif
@@ -1070,42 +1833,15 @@
U32 const D256 = (U32)(dstSize >> 8);
U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
- DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */
+ DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */
return DTime1 < DTime0;
}
#endif
}
-
-
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
- size_t dstSize, const void* cSrc,
- size_t cSrcSize, void* workSpace,
- size_t wkspSize)
-{
- /* validation checks */
- if (dstSize == 0) return ERROR(dstSize_tooSmall);
- if (cSrcSize == 0) return ERROR(corruption_detected);
-
- { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
- (void)algoNb;
- assert(algoNb == 0);
- return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
- (void)algoNb;
- assert(algoNb == 1);
- return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
-#else
- return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize):
- HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
-#endif
- }
-}
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
+ void* workSpace, size_t wkspSize, int flags)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
@@ -1118,71 +1854,71 @@
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize);
+ cSrcSize, workSpace, wkspSize, flags);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize);
+ cSrcSize, workSpace, wkspSize, flags);
#else
return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize):
+ cSrcSize, workSpace, wkspSize, flags):
HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize);
+ cSrcSize, workSpace, wkspSize, flags);
#endif
}
}
-size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
- return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
- return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#else
- return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
- HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#endif
}
#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags)
{
const BYTE* ip = (const BYTE*) cSrc;
- size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
- return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
}
#endif
-size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
- return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
- return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#else
- return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
- HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#endif
}
-size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
@@ -1192,15 +1928,14 @@
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
- return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
- return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
#else
- return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
- HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) :
+ HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
#endif
}
}
-
Index: lib/zstd/decompress/zstd_ddict.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/decompress/zstd_ddict.c b/lib/zstd/decompress/zstd_ddict.c
--- a/lib/zstd/decompress/zstd_ddict.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/decompress/zstd_ddict.c (date 1740124333255)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -14,12 +15,12 @@
/*-*******************************************************
* Dependencies
*********************************************************/
+#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "zstd_decompress_internal.h"
#include "zstd_ddict.h"
@@ -131,7 +132,7 @@
ZSTD_memcpy(internalBuffer, dict, dictSize);
}
ddict->dictSize = dictSize;
- ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ ddict->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */
/* parse dictionary content */
FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
@@ -237,5 +238,5 @@
unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
{
if (ddict==NULL) return 0;
- return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+ return ddict->dictID;
}
Index: lib/zstd/decompress/zstd_decompress.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
--- a/lib/zstd/decompress/zstd_decompress.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/decompress/zstd_decompress.c (date 1740124333270)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -53,14 +54,15 @@
* Dependencies
*********************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
-#include "../common/cpu.h" /* bmi2 */
+#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
+#include "../common/error_private.h"
+#include "../common/zstd_internal.h" /* blockProperties_t */
#include "../common/mem.h" /* low level memory routines */
+#include "../common/bits.h" /* ZSTD_highbit32 */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
-#include "../common/zstd_internal.h" /* blockProperties_t */
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
@@ -73,11 +75,11 @@
*************************************/
#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
-#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
- * Currently, that means a 0.75 load factor.
- * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
- * the load factor of the ddict hash set.
- */
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
+ * Currently, that means a 0.75 load factor.
+ * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
+ * the load factor of the ddict hash set.
+ */
#define DDICT_HASHSET_TABLE_BASE_SIZE 64
#define DDICT_HASHSET_RESIZE_FACTOR 2
@@ -238,6 +240,8 @@
dctx->outBufferMode = ZSTD_bm_buffered;
dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
+ dctx->disableHufAsm = 0;
+ dctx->maxBlockSizeParam = 0;
}
static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
@@ -252,11 +256,12 @@
dctx->inBuffSize = 0;
dctx->outBuffSize = 0;
dctx->streamStage = zdss_init;
- dctx->legacyContext = NULL;
- dctx->previousLegacyVersion = 0;
dctx->noForwardProgress = 0;
dctx->oversizedDuration = 0;
- dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ dctx->isFrameDecompression = 1;
+#if DYNAMIC_BMI2
+ dctx->bmi2 = ZSTD_cpuSupportsBmi2();
+#endif
dctx->ddictSet = NULL;
ZSTD_DCtx_resetParameters(dctx);
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
@@ -277,8 +282,7 @@
return dctx;
}
-ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
-{
+static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) {
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
@@ -289,10 +293,15 @@
}
}
+ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createDCtx_internal(customMem);
+}
+
ZSTD_DCtx* ZSTD_createDCtx(void)
{
DEBUGLOG(3, "ZSTD_createDCtx");
- return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
+ return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
}
static void ZSTD_clearDict(ZSTD_DCtx* dctx)
@@ -369,6 +378,19 @@
}
return 0;
}
+
+/*! ZSTD_isSkippableFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ */
+unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size)
+{
+ if (size < ZSTD_FRAMEIDSIZE) return 0;
+ { U32 const magic = MEM_readLE32(buffer);
+ if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+ }
+ return 0;
+}
/* ZSTD_frameHeaderSize_internal() :
* srcSize must be large enough to reach header size fields.
@@ -405,16 +427,40 @@
* note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
- * or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+** or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
{
const BYTE* ip = (const BYTE*)src;
size_t const minInputSize = ZSTD_startingInputLength(format);
- ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
- if (srcSize < minInputSize) return minInputSize;
- RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
+ DEBUGLOG(5, "ZSTD_getFrameHeader_advanced: minInputSize = %zu, srcSize = %zu", minInputSize, srcSize);
+ if (srcSize > 0) {
+ /* note : technically could be considered an assert(), since it's an invalid entry */
+ RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter : src==NULL, but srcSize>0");
+ }
+ if (srcSize < minInputSize) {
+ if (srcSize > 0 && format != ZSTD_f_zstd1_magicless) {
+ /* when receiving less than @minInputSize bytes,
+ * control these bytes at least correspond to a supported magic number
+ * in order to error out early if they don't.
+ **/
+ size_t const toCopy = MIN(4, srcSize);
+ unsigned char hbuf[4]; MEM_writeLE32(hbuf, ZSTD_MAGICNUMBER);
+ assert(src != NULL);
+ ZSTD_memcpy(hbuf, src, toCopy);
+ if ( MEM_readLE32(hbuf) != ZSTD_MAGICNUMBER ) {
+ /* not a zstd frame : let's check if it's a skippable frame */
+ MEM_writeLE32(hbuf, ZSTD_MAGIC_SKIPPABLE_START);
+ ZSTD_memcpy(hbuf, src, toCopy);
+ if ((MEM_readLE32(hbuf) & ZSTD_MAGIC_SKIPPABLE_MASK) != ZSTD_MAGIC_SKIPPABLE_START) {
+ RETURN_ERROR(prefix_unknown,
+ "first bytes don't correspond to any supported magic number");
+ } } }
+ return minInputSize;
+ }
+
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzers may not understand that zfhPtr will be read only if return value is zero, since they are 2 different signals */
if ( (format != ZSTD_f_zstd1_magicless)
&& (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
@@ -422,8 +468,10 @@
if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
- zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
zfhPtr->frameType = ZSTD_skippableFrame;
+ zfhPtr->dictID = MEM_readLE32(src) - ZSTD_MAGIC_SKIPPABLE_START;
+ zfhPtr->headerSize = ZSTD_SKIPPABLEHEADERSIZE;
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
return 0;
}
RETURN_ERROR(prefix_unknown, "");
@@ -492,12 +540,11 @@
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
* or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize)
{
return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
}
-
/* ZSTD_getFrameContentSize() :
* compatible with legacy mode
* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
@@ -505,7 +552,7 @@
* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
{
- { ZSTD_frameHeader zfh;
+ { ZSTD_FrameHeader zfh;
if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
return ZSTD_CONTENTSIZE_ERROR;
if (zfh.frameType == ZSTD_skippableFrame) {
@@ -525,18 +572,52 @@
sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
frameParameter_unsupported, "");
- {
- size_t const skippableSize = skippableHeaderSize + sizeU32;
+ { size_t const skippableSize = skippableHeaderSize + sizeU32;
RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
return skippableSize;
}
}
+/*! ZSTD_readSkippableFrame() :
+ * Retrieves content of a skippable frame, and writes it to dst buffer.
+ *
+ * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
+ * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
+ * in the magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame.
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity,
+ unsigned* magicVariant, /* optional, can be NULL */
+ const void* src, size_t srcSize)
+{
+ RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
+
+ { U32 const magicNumber = MEM_readLE32(src);
+ size_t skippableFrameSize = readSkippableFrameSize(src, srcSize);
+ size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE;
+
+ /* check input validity */
+ RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, "");
+ RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, "");
+ RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, "");
+
+ /* deliver payload */
+ if (skippableContentSize > 0 && dst != NULL)
+ ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize);
+ if (magicVariant != NULL)
+ *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START;
+ return skippableContentSize;
+ }
+}
+
/* ZSTD_findDecompressedSize() :
- * compatible with legacy mode
* `srcSize` must be the exact length of some number of ZSTD compressed and/or
* skippable frames
- * @return : decompressed size of the frames contained */
+ * note: compatible with legacy mode
+ * @return : decompressed size of the frames contained */
unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
{
unsigned long long totalDstSize = 0;
@@ -546,9 +627,7 @@
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
- if (ZSTD_isError(skippableSize)) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
+ if (ZSTD_isError(skippableSize)) return ZSTD_CONTENTSIZE_ERROR;
assert(skippableSize <= srcSize);
src = (const BYTE *)src + skippableSize;
@@ -556,17 +635,17 @@
continue;
}
- { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
- if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
+ { unsigned long long const fcs = ZSTD_getFrameContentSize(src, srcSize);
+ if (fcs >= ZSTD_CONTENTSIZE_ERROR) return fcs;
- /* check for overflow */
- if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
- totalDstSize += ret;
+ if (totalDstSize + fcs < totalDstSize)
+ return ZSTD_CONTENTSIZE_ERROR; /* check for overflow */
+ totalDstSize += fcs;
}
+ /* skip to next frame */
{ size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
- if (ZSTD_isError(frameSrcSize)) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
+ if (ZSTD_isError(frameSrcSize)) return ZSTD_CONTENTSIZE_ERROR;
+ assert(frameSrcSize <= srcSize);
src = (const BYTE *)src + frameSrcSize;
srcSize -= frameSrcSize;
@@ -630,13 +709,13 @@
return frameSizeInfo;
}
-static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
+static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize, ZSTD_format_e format)
{
ZSTD_frameSizeInfo frameSizeInfo;
ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
- if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+ if (format == ZSTD_f_zstd1 && (srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
&& (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
@@ -647,10 +726,10 @@
const BYTE* const ipstart = ip;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
/* Extract Frame Header */
- { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ { size_t const ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format);
if (ZSTD_isError(ret))
return ZSTD_errorFrameSizeInfo(ret);
if (ret > 0)
@@ -684,28 +763,31 @@
ip += 4;
}
+ frameSizeInfo.nbBlocks = nbBlocks;
frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
? zfh.frameContentSize
- : nbBlocks * zfh.blockSizeMax;
+ : (unsigned long long)nbBlocks * zfh.blockSizeMax;
return frameSizeInfo;
}
}
+static size_t ZSTD_findFrameCompressedSize_advanced(const void *src, size_t srcSize, ZSTD_format_e format) {
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format);
+ return frameSizeInfo.compressedSize;
+}
+
/* ZSTD_findFrameCompressedSize() :
- * compatible with legacy mode
- * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
- * `srcSize` must be at least as large as the frame contained
- * @return : the compressed size of the frame starting at `src` */
+ * See docs in zstd.h
+ * Note: compatible with legacy mode */
size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
{
- ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
- return frameSizeInfo.compressedSize;
+ return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_f_zstd1);
}
/* ZSTD_decompressBound() :
* compatible with legacy mode
- * `src` must point to the start of a ZSTD frame or a skippeable frame
+ * `src` must point to the start of a ZSTD frame or a skippable frame
* `srcSize` must be at least as large as the frame contained
* @return : the maximum decompressed size of the compressed source
*/
@@ -714,7 +796,7 @@
unsigned long long bound = 0;
/* Iterate over each frame */
while (srcSize > 0) {
- ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1);
size_t const compressedSize = frameSizeInfo.compressedSize;
unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
@@ -727,6 +809,48 @@
return bound;
}
+size_t ZSTD_decompressionMargin(void const* src, size_t srcSize)
+{
+ size_t margin = 0;
+ unsigned maxBlockSize = 0;
+
+ /* Iterate over each frame */
+ while (srcSize > 0) {
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1);
+ size_t const compressedSize = frameSizeInfo.compressedSize;
+ unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+ ZSTD_FrameHeader zfh;
+
+ FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), "");
+ if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+ return ERROR(corruption_detected);
+
+ if (zfh.frameType == ZSTD_frame) {
+ /* Add the frame header to our margin */
+ margin += zfh.headerSize;
+ /* Add the checksum to our margin */
+ margin += zfh.checksumFlag ? 4 : 0;
+ /* Add 3 bytes per block */
+ margin += 3 * frameSizeInfo.nbBlocks;
+
+ /* Compute the max block size */
+ maxBlockSize = MAX(maxBlockSize, zfh.blockSizeMax);
+ } else {
+ assert(zfh.frameType == ZSTD_skippableFrame);
+ /* Add the entire skippable frame size to our margin. */
+ margin += compressedSize;
+ }
+
+ assert(srcSize >= compressedSize);
+ src = (const BYTE*)src + compressedSize;
+ srcSize -= compressedSize;
+ }
+
+ /* Add the max block size back to the margin. */
+ margin += maxBlockSize;
+
+ return margin;
+}
/*-*************************************************************
* Frame decoding
@@ -752,7 +876,7 @@
if (srcSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
}
- ZSTD_memcpy(dst, src, srcSize);
+ ZSTD_memmove(dst, src, srcSize);
return srcSize;
}
@@ -769,7 +893,7 @@
return regenSize;
}
-static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
+static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, int streaming)
{
(void)dctx;
(void)uncompressedSize;
@@ -810,8 +934,13 @@
ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
}
+ /* Shrink the blockSizeMax if enabled */
+ if (dctx->maxBlockSizeParam != 0)
+ dctx->fParams.blockSizeMax = MIN(dctx->fParams.blockSizeMax, (unsigned)dctx->maxBlockSizeParam);
+
/* Loop on each block */
while (1) {
+ BYTE* oBlockEnd = oend;
size_t decodedSize;
blockProperties_t blockProperties;
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
@@ -821,27 +950,48 @@
remainingSrcSize -= ZSTD_blockHeaderSize;
RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
+ if (ip >= op && ip < oBlockEnd) {
+ /* We are decompressing in-place. Limit the output pointer so that we
+ * don't overwrite the block that we are currently reading. This will
+ * fail decompression if the input & output pointers aren't spaced
+ * far enough apart.
+ *
+ * This is important to set, even when the pointers are far enough
+ * apart, because ZSTD_decompressBlock_internal() can decide to store
+ * literals in the output buffer, after the block it is decompressing.
+ * Since we don't want anything to overwrite our input, we have to tell
+ * ZSTD_decompressBlock_internal to never write past ip.
+ *
+ * See ZSTD_allocateLiteralsBuffer() for reference.
+ */
+ oBlockEnd = op + (ip - op);
+ }
+
switch(blockProperties.blockType)
{
case bt_compressed:
- decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1);
+ assert(dctx->isFrameDecompression == 1);
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, not_streaming);
break;
case bt_raw :
+ /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */
decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
break;
case bt_rle :
- decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
+ decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize);
break;
case bt_reserved :
default:
RETURN_ERROR(corruption_detected, "invalid block type");
}
-
- if (ZSTD_isError(decodedSize)) return decodedSize;
- if (dctx->validateChecksum)
+ FORWARD_IF_ERROR(decodedSize, "Block decompression failure");
+ DEBUGLOG(5, "Decompressed block of dSize = %u", (unsigned)decodedSize);
+ if (dctx->validateChecksum) {
xxh64_update(&dctx->xxhState, op, decodedSize);
- if (decodedSize != 0)
+ }
+ if (decodedSize) /* support dst = NULL,0 */ {
op += decodedSize;
+ }
assert(ip != NULL);
ip += cBlockSize;
remainingSrcSize -= cBlockSize;
@@ -865,12 +1015,15 @@
}
ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
/* Allow caller to get size read */
+ DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %i, consuming %i bytes of input", (int)(op-ostart), (int)(ip - (const BYTE*)*srcPtr));
*srcPtr = ip;
*srcSizePtr = remainingSrcSize;
return (size_t)(op-ostart);
}
-static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize,
@@ -890,17 +1043,18 @@
while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
- { U32 const magicNumber = MEM_readLE32(src);
- DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
- (unsigned)magicNumber, ZSTD_MAGICNUMBER);
+ if (dctx->format == ZSTD_f_zstd1 && srcSize >= 4) {
+ U32 const magicNumber = MEM_readLE32(src);
+ DEBUGLOG(5, "reading magic number %08X", (unsigned)magicNumber);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ /* skippable frame detected : skip it */
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
- FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
+ FORWARD_IF_ERROR(skippableSize, "invalid skippable frame");
assert(skippableSize <= srcSize);
src = (const BYTE *)src + skippableSize;
srcSize -= skippableSize;
- continue;
+ continue; /* check next frame */
} }
if (ddict) {
@@ -976,7 +1130,7 @@
{
#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
size_t regenSize;
- ZSTD_DCtx* const dctx = ZSTD_createDCtx();
+ ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem);
RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
ZSTD_freeDCtx(dctx);
@@ -996,8 +1150,8 @@
size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
/*
- * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed,
- * we allow taking a partial block as the input. Currently only raw uncompressed blocks can
+ * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we
+ * allow taking a partial block as the input. Currently only raw uncompressed blocks can
* be streamed.
*
* For blocks that can be streamed, this allows us to reduce the latency until we produce
@@ -1010,7 +1164,7 @@
return dctx->expected;
if (dctx->bType != bt_raw)
return dctx->expected;
- return MIN(MAX(inputSize, 1), dctx->expected);
+ return BOUNDED(1, inputSize, dctx->expected);
}
ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
@@ -1116,7 +1270,8 @@
{
case bt_compressed:
DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
- rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
+ assert(dctx->isFrameDecompression == 1);
+ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, is_streaming);
dctx->expected = 0; /* Streaming not supported */
break;
case bt_raw :
@@ -1185,6 +1340,7 @@
case ZSTDds_decodeSkippableHeader:
assert(src != NULL);
assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
+ assert(dctx->format != ZSTD_f_zstd1_magicless);
ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
dctx->stage = ZSTDds_skipFrame;
@@ -1197,7 +1353,7 @@
default:
assert(0); /* impossible */
- RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */
}
}
@@ -1238,11 +1394,11 @@
/* in minimal huffman, we always use X1 variants */
size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
dictPtr, dictEnd - dictPtr,
- workspace, workspaceSize);
+ workspace, workspaceSize, /* flags */ 0);
#else
size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
dictPtr, (size_t)(dictEnd - dictPtr),
- workspace, workspaceSize);
+ workspace, workspaceSize, /* flags */ 0);
#endif
RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
dictPtr += hSize;
@@ -1338,10 +1494,11 @@
dctx->prefixStart = NULL;
dctx->virtualStart = NULL;
dctx->dictEnd = NULL;
- dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ dctx->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */
dctx->litEntropy = dctx->fseEntropy = 0;
dctx->dictID = 0;
dctx->bType = bt_reserved;
+ dctx->isFrameDecompression = 1;
ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
dctx->LLTptr = dctx->entropy.LLTable;
@@ -1400,7 +1557,7 @@
* This could for one of the following reasons :
* - The frame does not require a dictionary (most common case).
* - The frame was built with dictID intentionally removed.
- * Needed dictionary is a hidden information.
+ * Needed dictionary is a hidden piece of information.
* Note : this use case also happens when using a non-conformant dictionary.
* - `srcSize` is too small, and as a result, frame header could not be decoded.
* Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
@@ -1409,7 +1566,7 @@
* ZSTD_getFrameHeader(), which will provide a more precise error code. */
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
{
- ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
+ ZSTD_FrameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 };
size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
if (ZSTD_isError(hError)) return 0;
return zfp.dictID;
@@ -1438,7 +1595,7 @@
ZSTD_DStream* ZSTD_createDStream(void)
{
DEBUGLOG(3, "ZSTD_createDStream");
- return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
+ return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
}
ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
@@ -1448,7 +1605,7 @@
ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
{
- return ZSTD_createDCtx_advanced(customMem);
+ return ZSTD_createDCtx_internal(customMem);
}
size_t ZSTD_freeDStream(ZSTD_DStream* zds)
@@ -1516,7 +1673,9 @@
size_t ZSTD_initDStream(ZSTD_DStream* zds)
{
DEBUGLOG(4, "ZSTD_initDStream");
- return ZSTD_initDStream_usingDDict(zds, NULL);
+ FORWARD_IF_ERROR(ZSTD_DCtx_reset(zds, ZSTD_reset_session_only), "");
+ FORWARD_IF_ERROR(ZSTD_DCtx_refDDict(zds, NULL), "");
+ return ZSTD_startingInputLength(zds->format);
}
/* ZSTD_initDStream_usingDDict() :
@@ -1524,6 +1683,7 @@
* this function cannot fail */
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
{
+ DEBUGLOG(4, "ZSTD_initDStream_usingDDict");
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
return ZSTD_startingInputLength(dctx->format);
@@ -1534,6 +1694,7 @@
* this function cannot fail */
size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
+ DEBUGLOG(4, "ZSTD_resetDStream");
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
return ZSTD_startingInputLength(dctx->format);
}
@@ -1605,6 +1766,15 @@
bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
return bounds;
+ case ZSTD_d_disableHuffmanAssembly:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+ case ZSTD_d_maxBlockSize:
+ bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN;
+ bounds.upperBound = ZSTD_BLOCKSIZE_MAX;
+ return bounds;
+
default:;
}
bounds.error = ERROR(parameter_unsupported);
@@ -1644,6 +1814,12 @@
return 0;
case ZSTD_d_refMultipleDDicts:
*value = (int)dctx->refMultipleDDicts;
+ return 0;
+ case ZSTD_d_disableHuffmanAssembly:
+ *value = (int)dctx->disableHufAsm;
+ return 0;
+ case ZSTD_d_maxBlockSize:
+ *value = dctx->maxBlockSizeParam;
return 0;
default:;
}
@@ -1677,6 +1853,14 @@
RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
}
dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
+ return 0;
+ case ZSTD_d_disableHuffmanAssembly:
+ CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value);
+ dctx->disableHufAsm = value != 0;
+ return 0;
+ case ZSTD_d_maxBlockSize:
+ if (value != 0) CHECK_DBOUNDS(ZSTD_d_maxBlockSize, value);
+ dctx->maxBlockSizeParam = value;
return 0;
default:;
}
@@ -1689,6 +1873,7 @@
|| (reset == ZSTD_reset_session_and_parameters) ) {
dctx->streamStage = zdss_init;
dctx->noForwardProgress = 0;
+ dctx->isFrameDecompression = 1;
}
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
@@ -1705,10 +1890,17 @@
return ZSTD_sizeof_DCtx(dctx);
}
-size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+static size_t ZSTD_decodingBufferSize_internal(unsigned long long windowSize, unsigned long long frameContentSize, size_t blockSizeMax)
{
- size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
- unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
+ size_t const blockSize = MIN((size_t)MIN(windowSize, ZSTD_BLOCKSIZE_MAX), blockSizeMax);
+ /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block
+ * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing
+ * the block at the beginning of the output buffer, and maintain a full window.
+ *
+ * We need another blockSize worth of buffer so that we can store split
+ * literals at the end of the block without overwriting the extDict window.
+ */
+ unsigned long long const neededRBSize = windowSize + (blockSize * 2) + (WILDCOPY_OVERLENGTH * 2);
unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
size_t const minRBSize = (size_t) neededSize;
RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
@@ -1716,6 +1908,11 @@
return minRBSize;
}
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+ return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, ZSTD_BLOCKSIZE_MAX);
+}
+
size_t ZSTD_estimateDStreamSize(size_t windowSize)
{
size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
@@ -1727,7 +1924,7 @@
size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
{
U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(err)) return err;
RETURN_ERROR_IF(err>0, srcSize_wrong, "");
@@ -1822,6 +2019,7 @@
U32 someMoreWork = 1;
DEBUGLOG(5, "ZSTD_decompressStream");
+ assert(zds != NULL);
RETURN_ERROR_IF(
input->pos > input->size,
srcSize_wrong,
@@ -1842,7 +2040,6 @@
DEBUGLOG(5, "stage zdss_init => transparent reset ");
zds->streamStage = zdss_loadHeader;
zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
- zds->legacyVersion = 0;
zds->hostageByte = 0;
zds->expectedOutBuffer = *output;
ZSTD_FALLTHROUGH;
@@ -1853,7 +2050,6 @@
if (zds->refMultipleDDicts && zds->ddictSet) {
ZSTD_DCtx_selectFrameDDict(zds);
}
- DEBUGLOG(5, "header size : %u", (U32)hSize);
if (ZSTD_isError(hSize)) {
return hSize; /* error */
}
@@ -1867,6 +2063,11 @@
zds->lhSize += remainingInput;
}
input->pos = input->size;
+ /* check first few bytes */
+ FORWARD_IF_ERROR(
+ ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format),
+ "First few bytes detected incorrect" );
+ /* return hint input size */
return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
}
assert(ip != NULL);
@@ -1878,14 +2079,15 @@
if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& zds->fParams.frameType != ZSTD_skippableFrame
&& (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
- size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
+ size_t const cSize = ZSTD_findFrameCompressedSize_advanced(istart, (size_t)(iend-istart), zds->format);
if (cSize <= (size_t)(iend-istart)) {
/* shortcut : using single-pass mode */
size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
if (ZSTD_isError(decompressedSize)) return decompressedSize;
- DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+ DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()");
+ assert(istart != NULL);
ip = istart + cSize;
- op += decompressedSize;
+ op = op ? op + decompressedSize : op; /* can occur if frameContentSize = 0 (empty frame) */
zds->expected = 0;
zds->streamStage = zdss_init;
someMoreWork = 0;
@@ -1904,7 +2106,8 @@
DEBUGLOG(4, "Consume header");
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
- if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ if (zds->format == ZSTD_f_zstd1
+ && (MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
zds->stage = ZSTDds_skipFrame;
} else {
@@ -1920,11 +2123,13 @@
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
frameParameter_windowTooLarge, "");
+ if (zds->maxBlockSizeParam != 0)
+ zds->fParams.blockSizeMax = MIN(zds->fParams.blockSizeMax, (unsigned)zds->maxBlockSizeParam);
/* Adapt buffer sizes to frame header instructions */
{ size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
- ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
+ ? ZSTD_decodingBufferSize_internal(zds->fParams.windowSize, zds->fParams.frameContentSize, zds->fParams.blockSizeMax)
: 0;
ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
@@ -1969,6 +2174,7 @@
}
if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
+ assert(ip != NULL);
ip += neededInSize;
/* Function modifies the stage so we must break */
break;
@@ -1983,7 +2189,7 @@
int const isSkipFrame = ZSTD_isSkipFrame(zds);
size_t loadedSize;
/* At this point we shouldn't be decompressing a block that we can stream. */
- assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
+ assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip)));
if (isSkipFrame) {
loadedSize = MIN(toLoad, (size_t)(iend-ip));
} else {
@@ -1992,8 +2198,11 @@
"should never happen");
loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
}
- ip += loadedSize;
- zds->inPos += loadedSize;
+ if (loadedSize != 0) {
+ /* ip may be NULL */
+ ip += loadedSize;
+ zds->inPos += loadedSize;
+ }
if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
/* decode loaded input */
@@ -2003,14 +2212,17 @@
break;
}
case zdss_flush:
- { size_t const toFlushSize = zds->outEnd - zds->outStart;
+ {
+ size_t const toFlushSize = zds->outEnd - zds->outStart;
size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
- op += flushedSize;
+
+ op = op ? op + flushedSize : op;
+
zds->outStart += flushedSize;
if (flushedSize == toFlushSize) { /* flush completed */
zds->streamStage = zdss_read;
if ( (zds->outBuffSize < zds->fParams.frameContentSize)
- && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+ && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
(int)(zds->outBuffSize - zds->outStart),
(U32)zds->fParams.blockSizeMax);
@@ -2024,7 +2236,7 @@
default:
assert(0); /* impossible */
- RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */
} }
/* result */
@@ -2037,8 +2249,8 @@
if ((ip==istart) && (op==ostart)) { /* no forward progress */
zds->noForwardProgress ++;
if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
- RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
- RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
+ RETURN_ERROR_IF(op==oend, noForwardProgress_destFull, "");
+ RETURN_ERROR_IF(ip==iend, noForwardProgress_inputEmpty, "");
assert(0);
}
} else {
@@ -2075,11 +2287,17 @@
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos)
{
- ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
- ZSTD_inBuffer input = { src, srcSize, *srcPos };
- /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
- size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
- *dstPos = output.pos;
- *srcPos = input.pos;
- return cErr;
+ ZSTD_outBuffer output;
+ ZSTD_inBuffer input;
+ output.dst = dst;
+ output.size = dstCapacity;
+ output.pos = *dstPos;
+ input.src = src;
+ input.size = srcSize;
+ input.pos = *srcPos;
+ { size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+ }
}
Index: lib/zstd/common/allocations.h
===================================================================
diff --git a/lib/zstd/common/allocations.h b/lib/zstd/common/allocations.h
new file mode 100644
--- /dev/null (date 1740124241238)
+++ b/lib/zstd/common/allocations.h (date 1740124241238)
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* This file provides custom allocation primitives
+ */
+
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
+
+#include "compiler.h" /* MEM_STATIC */
+#define ZSTD_STATIC_LINKING_ONLY
+#include /* ZSTD_customMem */
+
+#ifndef ZSTD_ALLOCATIONS_H
+#define ZSTD_ALLOCATIONS_H
+
+/* custom memory allocation functions */
+
+MEM_STATIC void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc)
+ return customMem.customAlloc(customMem.opaque, size);
+ return ZSTD_malloc(size);
+}
+
+MEM_STATIC void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc) {
+ /* calloc implemented as malloc+memset;
+ * not as efficient as calloc, but next best guess for custom malloc */
+ void* const ptr = customMem.customAlloc(customMem.opaque, size);
+ ZSTD_memset(ptr, 0, size);
+ return ptr;
+ }
+ return ZSTD_calloc(1, size);
+}
+
+MEM_STATIC void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
+{
+ if (ptr!=NULL) {
+ if (customMem.customFree)
+ customMem.customFree(customMem.opaque, ptr);
+ else
+ ZSTD_free(ptr);
+ }
+}
+
+#endif /* ZSTD_ALLOCATIONS_H */
Index: lib/zstd/common/bits.h
===================================================================
diff --git a/lib/zstd/common/bits.h b/lib/zstd/common/bits.h
new file mode 100644
--- /dev/null (date 1740124241245)
+++ b/lib/zstd/common/bits.h (date 1740124241245)
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_BITS_H
+#define ZSTD_BITS_H
+
+#include "mem.h"
+
+MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val)
+{
+ assert(val != 0);
+ {
+ static const U32 DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3,
+ 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7,
+ 26, 12, 18, 6, 11, 5, 10, 9};
+ return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >> 27];
+ }
+}
+
+MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val)
+{
+ assert(val != 0);
+#if (__GNUC__ >= 4)
+ return (unsigned)__builtin_ctz(val);
+#else
+ return ZSTD_countTrailingZeros32_fallback(val);
+#endif
+}
+
+MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val)
+{
+ assert(val != 0);
+ {
+ static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29,
+ 11, 14, 16, 18, 22, 25, 3, 30,
+ 8, 12, 20, 28, 15, 17, 24, 7,
+ 19, 27, 23, 6, 26, 5, 4, 31};
+ val |= val >> 1;
+ val |= val >> 2;
+ val |= val >> 4;
+ val |= val >> 8;
+ val |= val >> 16;
+ return 31 - DeBruijnClz[(val * 0x07C4ACDDU) >> 27];
+ }
+}
+
+MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val)
+{
+ assert(val != 0);
+#if (__GNUC__ >= 4)
+ return (unsigned)__builtin_clz(val);
+#else
+ return ZSTD_countLeadingZeros32_fallback(val);
+#endif
+}
+
+MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val)
+{
+ assert(val != 0);
+#if (__GNUC__ >= 4) && defined(__LP64__)
+ return (unsigned)__builtin_ctzll(val);
+#else
+ {
+ U32 mostSignificantWord = (U32)(val >> 32);
+ U32 leastSignificantWord = (U32)val;
+ if (leastSignificantWord == 0) {
+ return 32 + ZSTD_countTrailingZeros32(mostSignificantWord);
+ } else {
+ return ZSTD_countTrailingZeros32(leastSignificantWord);
+ }
+ }
+#endif
+}
+
+MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val)
+{
+ assert(val != 0);
+#if (__GNUC__ >= 4)
+ return (unsigned)(__builtin_clzll(val));
+#else
+ {
+ U32 mostSignificantWord = (U32)(val >> 32);
+ U32 leastSignificantWord = (U32)val;
+ if (mostSignificantWord == 0) {
+ return 32 + ZSTD_countLeadingZeros32(leastSignificantWord);
+ } else {
+ return ZSTD_countLeadingZeros32(mostSignificantWord);
+ }
+ }
+#endif
+}
+
+MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val)
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
+ return ZSTD_countTrailingZeros64((U64)val) >> 3;
+ } else {
+ return ZSTD_countTrailingZeros32((U32)val) >> 3;
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
+ return ZSTD_countLeadingZeros64((U64)val) >> 3;
+ } else {
+ return ZSTD_countLeadingZeros32((U32)val) >> 3;
+ }
+ }
+}
+
+MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
+{
+ assert(val != 0);
+ return 31 - ZSTD_countLeadingZeros32(val);
+}
+
+/* ZSTD_rotateRight_*():
+ * Rotates a bitfield to the right by "count" bits.
+ * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
+ */
+MEM_STATIC
+U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
+ assert(count < 64);
+ count &= 0x3F; /* for fickle pattern recognition */
+ return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
+}
+
+MEM_STATIC
+U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
+ assert(count < 32);
+ count &= 0x1F; /* for fickle pattern recognition */
+ return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
+}
+
+MEM_STATIC
+U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
+ assert(count < 16);
+ count &= 0x0F; /* for fickle pattern recognition */
+ return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
+}
+
+#endif /* ZSTD_BITS_H */
Index: lib/zstd/common/portability_macros.h
===================================================================
diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h
new file mode 100644
--- /dev/null (date 1740124241324)
+++ b/lib/zstd/common/portability_macros.h (date 1740124241324)
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_PORTABILITY_MACROS_H
+#define ZSTD_PORTABILITY_MACROS_H
+
+/*
+ * This header file contains macro definitions to support portability.
+ * This header is shared between C and ASM code, so it MUST only
+ * contain macro definitions. It MUST not contain any C code.
+ *
+ * This header ONLY defines macros to detect platforms/feature support.
+ *
+ */
+
+
+/* compat. with non-clang compilers */
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+
+/* compat. with non-clang compilers */
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+/* compat. with non-clang compilers */
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+/* detects whether we are being compiled under msan */
+
+/* detects whether we are being compiled under asan */
+
+/* detects whether we are being compiled under dfsan */
+
+/* Mark the internal assembly functions as hidden */
+#ifdef __ELF__
+# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
+#elif defined(__APPLE__)
+# define ZSTD_HIDE_ASM_FUNCTION(func) .private_extern func
+#else
+# define ZSTD_HIDE_ASM_FUNCTION(func)
+#endif
+
+/* Compile time determination of BMI2 support */
+
+
+/* Enable runtime BMI2 dispatch based on the CPU.
+ * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
+ */
+#ifndef DYNAMIC_BMI2
+# if ((defined(__clang__) && __has_attribute(__target__)) \
+ || (defined(__GNUC__) \
+ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
+ && (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)) \
+ && !defined(__BMI2__)
+# define DYNAMIC_BMI2 1
+# else
+# define DYNAMIC_BMI2 0
+# endif
+#endif
+
+/*
+ * Only enable assembly for GNU C compatible compilers,
+ * because other platforms may not support GAS assembly syntax.
+ *
+ * Only enable assembly for Linux / MacOS / Win32, other platforms may
+ * work, but they haven't been tested. This could likely be
+ * extended to BSD systems.
+ *
+ * Disable assembly when MSAN is enabled, because MSAN requires
+ * 100% of code to be instrumented to work.
+ */
+#define ZSTD_ASM_SUPPORTED 1
+
+/*
+ * Determines whether we should enable assembly for x86-64
+ * with BMI2.
+ *
+ * Enable if all of the following conditions hold:
+ * - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM
+ * - Assembly is supported
+ * - We are compiling for x86-64 and either:
+ * - DYNAMIC_BMI2 is enabled
+ * - BMI2 is supported at compile time
+ */
+#define ZSTD_ENABLE_ASM_X86_64_BMI2 0
+
+/*
+ * For x86 ELF targets, add .note.gnu.property section for Intel CET in
+ * assembly sources when CET is enabled.
+ *
+ * Additionally, any function that may be called indirectly must begin
+ * with ZSTD_CET_ENDBRANCH.
+ */
+#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \
+ && defined(__has_include)
+# if __has_include()
+# include
+# define ZSTD_CET_ENDBRANCH _CET_ENDBR
+# endif
+#endif
+
+#ifndef ZSTD_CET_ENDBRANCH
+# define ZSTD_CET_ENDBRANCH
+#endif
+
+#endif /* ZSTD_PORTABILITY_MACROS_H */
Index: lib/zstd/compress/clevels.h
===================================================================
diff --git a/lib/zstd/compress/clevels.h b/lib/zstd/compress/clevels.h
new file mode 100644
--- /dev/null (date 1740124241346)
+++ b/lib/zstd/compress/clevels.h (date 1740124241346)
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CLEVELS_H
+#define ZSTD_CLEVELS_H
+
+#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
+#include
+
+/*-===== Pre-defined compression levels =====-*/
+
+#define ZSTD_MAX_CLEVEL 22
+
+__attribute__((__unused__))
+
+static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
+{ /* "default" - for any srcSize > 256 KB */
+ /* W, C, H, S, L, TL, strat */
+ { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
+ { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
+ { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
+ { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
+ { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
+ { 21, 18, 19, 3, 5, 2, ZSTD_greedy }, /* level 5 */
+ { 21, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6 */
+ { 21, 19, 20, 4, 5, 8, ZSTD_lazy }, /* level 7 */
+ { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 8 */
+ { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
+ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 10 */
+ { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 11 */
+ { 22, 22, 23, 6, 5, 32, ZSTD_lazy2 }, /* level 12 */
+ { 22, 22, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */
+ { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
+ { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
+ { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
+ { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
+ { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
+ { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
+ { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
+ { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
+ { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
+},
+{ /* for srcSize <= 256 KB */
+ /* W, C, H, S, L, T, strat */
+ { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
+ { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
+ { 18, 16, 17, 3, 5, 2, ZSTD_greedy }, /* level 4.*/
+ { 18, 17, 18, 5, 5, 2, ZSTD_greedy }, /* level 5.*/
+ { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
+ { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
+ { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
+ { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
+ { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
+ { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 128 KB */
+ /* W, C, H, S, L, T, strat */
+ { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
+ { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
+ { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
+ { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
+ { 17, 16, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
+ { 17, 16, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 17, 16, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 17, 16, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 17, 16, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
+ { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
+ { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
+ { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
+ { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
+ { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 16 KB */
+ /* W, C, H, S, L, T, strat */
+ { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
+ { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
+ { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
+ { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
+ { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
+ { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
+ { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
+ { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
+ { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
+ { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
+ { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
+ { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
+ { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
+ { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
+ { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
+ { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+};
+
+
+
+#endif /* ZSTD_CLEVELS_H */
Index: lib/zstd/compress/zstd_preSplit.c
===================================================================
diff --git a/lib/zstd/compress/zstd_preSplit.c b/lib/zstd/compress/zstd_preSplit.c
new file mode 100644
--- /dev/null (date 1740124333234)
+++ b/lib/zstd/compress/zstd_preSplit.c (date 1740124333234)
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "../common/compiler.h" /* ZSTD_ALIGNOF */
+#include "../common/mem.h" /* S64 */
+#include "../common/zstd_deps.h" /* ZSTD_memset */
+#include "../common/zstd_internal.h" /* ZSTD_STATIC_ASSERT */
+#include "hist.h" /* HIST_add */
+#include "zstd_preSplit.h"
+
+
+#define BLOCKSIZE_MIN 3500
+#define THRESHOLD_PENALTY_RATE 16
+#define THRESHOLD_BASE (THRESHOLD_PENALTY_RATE - 2)
+#define THRESHOLD_PENALTY 3
+
+#define HASHLENGTH 2
+#define HASHLOG_MAX 10
+#define HASHTABLESIZE (1 << HASHLOG_MAX)
+#define HASHMASK (HASHTABLESIZE - 1)
+#define KNUTH 0x9e3779b9
+
+/* for hashLog > 8, hash 2 bytes.
+ * for hashLog == 8, just take the byte, no hashing.
+ * The speed of this method relies on compile-time constant propagation */
+FORCE_INLINE_TEMPLATE unsigned hash2(const void *p, unsigned hashLog)
+{
+ assert(hashLog >= 8);
+ if (hashLog == 8) return (U32)((const BYTE*)p)[0];
+ assert(hashLog <= HASHLOG_MAX);
+ return (U32)(MEM_read16(p)) * KNUTH >> (32 - hashLog);
+}
+
+
+typedef struct {
+ unsigned events[HASHTABLESIZE];
+ size_t nbEvents;
+} Fingerprint;
+typedef struct {
+ Fingerprint pastEvents;
+ Fingerprint newEvents;
+} FPStats;
+
+static void initStats(FPStats* fpstats)
+{
+ ZSTD_memset(fpstats, 0, sizeof(FPStats));
+}
+
+FORCE_INLINE_TEMPLATE void
+addEvents_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
+{
+ const char* p = (const char*)src;
+ size_t limit = srcSize - HASHLENGTH + 1;
+ size_t n;
+ assert(srcSize >= HASHLENGTH);
+ for (n = 0; n < limit; n+=samplingRate) {
+ fp->events[hash2(p+n, hashLog)]++;
+ }
+ fp->nbEvents += limit/samplingRate;
+}
+
+FORCE_INLINE_TEMPLATE void
+recordFingerprint_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
+{
+ ZSTD_memset(fp, 0, sizeof(unsigned) * ((size_t)1 << hashLog));
+ fp->nbEvents = 0;
+ addEvents_generic(fp, src, srcSize, samplingRate, hashLog);
+}
+
+typedef void (*RecordEvents_f)(Fingerprint* fp, const void* src, size_t srcSize);
+
+#define FP_RECORD(_rate) ZSTD_recordFingerprint_##_rate
+
+#define ZSTD_GEN_RECORD_FINGERPRINT(_rate, _hSize) \
+ static void FP_RECORD(_rate)(Fingerprint* fp, const void* src, size_t srcSize) \
+ { \
+ recordFingerprint_generic(fp, src, srcSize, _rate, _hSize); \
+ }
+
+ZSTD_GEN_RECORD_FINGERPRINT(1, 10)
+ZSTD_GEN_RECORD_FINGERPRINT(5, 10)
+ZSTD_GEN_RECORD_FINGERPRINT(11, 9)
+ZSTD_GEN_RECORD_FINGERPRINT(43, 8)
+
+
+static U64 abs64(S64 s64) { return (U64)((s64 < 0) ? -s64 : s64); }
+
+static U64 fpDistance(const Fingerprint* fp1, const Fingerprint* fp2, unsigned hashLog)
+{
+ U64 distance = 0;
+ size_t n;
+ assert(hashLog <= HASHLOG_MAX);
+ for (n = 0; n < ((size_t)1 << hashLog); n++) {
+ distance +=
+ abs64((S64)fp1->events[n] * (S64)fp2->nbEvents - (S64)fp2->events[n] * (S64)fp1->nbEvents);
+ }
+ return distance;
+}
+
+/* Compare newEvents with pastEvents
+ * return 1 when considered "too different"
+ */
+static int compareFingerprints(const Fingerprint* ref,
+ const Fingerprint* newfp,
+ int penalty,
+ unsigned hashLog)
+{
+ assert(ref->nbEvents > 0);
+ assert(newfp->nbEvents > 0);
+ { U64 p50 = (U64)ref->nbEvents * (U64)newfp->nbEvents;
+ U64 deviation = fpDistance(ref, newfp, hashLog);
+ U64 threshold = p50 * (U64)(THRESHOLD_BASE + penalty) / THRESHOLD_PENALTY_RATE;
+ return deviation >= threshold;
+ }
+}
+
+static void mergeEvents(Fingerprint* acc, const Fingerprint* newfp)
+{
+ size_t n;
+ for (n = 0; n < HASHTABLESIZE; n++) {
+ acc->events[n] += newfp->events[n];
+ }
+ acc->nbEvents += newfp->nbEvents;
+}
+
+static void flushEvents(FPStats* fpstats)
+{
+ size_t n;
+ for (n = 0; n < HASHTABLESIZE; n++) {
+ fpstats->pastEvents.events[n] = fpstats->newEvents.events[n];
+ }
+ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents;
+ ZSTD_memset(&fpstats->newEvents, 0, sizeof(fpstats->newEvents));
+}
+
+static void removeEvents(Fingerprint* acc, const Fingerprint* slice)
+{
+ size_t n;
+ for (n = 0; n < HASHTABLESIZE; n++) {
+ assert(acc->events[n] >= slice->events[n]);
+ acc->events[n] -= slice->events[n];
+ }
+ acc->nbEvents -= slice->nbEvents;
+}
+
+#define CHUNKSIZE (8 << 10)
+static size_t ZSTD_splitBlock_byChunks(const void* blockStart, size_t blockSize,
+ int level,
+ void* workspace, size_t wkspSize)
+{
+ static const RecordEvents_f records_fs[] = {
+ FP_RECORD(43), FP_RECORD(11), FP_RECORD(5), FP_RECORD(1)
+ };
+ static const unsigned hashParams[] = { 8, 9, 10, 10 };
+ const RecordEvents_f record_f = (assert(0<=level && level<=3), records_fs[level]);
+ FPStats* const fpstats = (FPStats*)workspace;
+ const char* p = (const char*)blockStart;
+ int penalty = THRESHOLD_PENALTY;
+ size_t pos = 0;
+ assert(blockSize == (128 << 10));
+ assert(workspace != NULL);
+ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
+ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
+ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
+
+ initStats(fpstats);
+ record_f(&fpstats->pastEvents, p, CHUNKSIZE);
+ for (pos = CHUNKSIZE; pos <= blockSize - CHUNKSIZE; pos += CHUNKSIZE) {
+ record_f(&fpstats->newEvents, p + pos, CHUNKSIZE);
+ if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level])) {
+ return pos;
+ } else {
+ mergeEvents(&fpstats->pastEvents, &fpstats->newEvents);
+ if (penalty > 0) penalty--;
+ }
+ }
+ assert(pos == blockSize);
+ return blockSize;
+ (void)flushEvents; (void)removeEvents;
+}
+
+/* ZSTD_splitBlock_fromBorders(): very fast strategy :
+ * compare fingerprint from beginning and end of the block,
+ * derive from their difference if it's preferable to split in the middle,
+ * repeat the process a second time, for finer grained decision.
+ * 3 times did not brought improvements, so I stopped at 2.
+ * Benefits are good enough for a cheap heuristic.
+ * More accurate splitting saves more, but speed impact is also more perceptible.
+ * For better accuracy, use more elaborate variant *_byChunks.
+ */
+static size_t ZSTD_splitBlock_fromBorders(const void* blockStart, size_t blockSize,
+ void* workspace, size_t wkspSize)
+{
+#define SEGMENT_SIZE 512
+ FPStats* const fpstats = (FPStats*)workspace;
+ Fingerprint* middleEvents = (Fingerprint*)(void*)((char*)workspace + 512 * sizeof(unsigned));
+ assert(blockSize == (128 << 10));
+ assert(workspace != NULL);
+ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
+ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
+ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
+
+ initStats(fpstats);
+ HIST_add(fpstats->pastEvents.events, blockStart, SEGMENT_SIZE);
+ HIST_add(fpstats->newEvents.events, (const char*)blockStart + blockSize - SEGMENT_SIZE, SEGMENT_SIZE);
+ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = SEGMENT_SIZE;
+ if (!compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8))
+ return blockSize;
+
+ HIST_add(middleEvents->events, (const char*)blockStart + blockSize/2 - SEGMENT_SIZE/2, SEGMENT_SIZE);
+ middleEvents->nbEvents = SEGMENT_SIZE;
+ { U64 const distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8);
+ U64 const distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8);
+ U64 const minDistance = SEGMENT_SIZE * SEGMENT_SIZE / 3;
+ if (abs64((S64)distFromBegin - (S64)distFromEnd) < minDistance)
+ return 64 KB;
+ return (distFromBegin > distFromEnd) ? 32 KB : 96 KB;
+ }
+}
+
+size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize,
+ int level,
+ void* workspace, size_t wkspSize)
+{
+ DEBUGLOG(6, "ZSTD_splitBlock (level=%i)", level);
+ assert(0<=level && level<=4);
+ if (level == 0)
+ return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize);
+ /* level >= 1*/
+ return ZSTD_splitBlock_byChunks(blockStart, blockSize, level-1, workspace, wkspSize);
+}
Index: include/linux/zstd.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
--- a/include/linux/zstd.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/include/linux/zstd.h (date 1740124100073)
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -136,9 +136,20 @@
zstd_parameters zstd_get_params(int level,
unsigned long long estimated_src_size);
-/* ====== Single-pass Compression ====== */
-
typedef ZSTD_CCtx zstd_cctx;
+typedef ZSTD_cParameter zstd_cparameter;
+
+/**
+ * zstd_cctx_set_param() - sets a compression parameter
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @param: The parameter to set.
+ * @value: The value to set the parameter to.
+ *
+ * Return: Zero or an error, which can be checked using zstd_is_error().
+ */
+size_t zstd_cctx_set_param(zstd_cctx *cctx, zstd_cparameter param, int value);
+
+/* ====== Single-pass Compression ====== */
/**
* zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
@@ -153,6 +164,20 @@
*/
size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
+/**
+ * zstd_cctx_workspace_bound_with_ext_seq_prod() - max memory needed to
+ * initialize a zstd_cctx when using the block-level external sequence
+ * producer API.
+ * @parameters: The compression parameters to be used.
+ *
+ * If multiple compression parameters might be used, the caller must call
+ * this function for each set of parameters and use the maximum size.
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_cctx().
+ */
+size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *parameters);
+
/**
* zstd_init_cctx() - initialize a zstd compression context
* @workspace: The workspace to emplace the context into. It must outlive
@@ -257,6 +282,16 @@
*/
size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
+/**
+ * zstd_cstream_workspace_bound_with_ext_seq_prod() - memory needed to initialize
+ * a zstd_cstream when using the block-level external sequence producer API.
+ * @cparams: The compression parameters to be used for compression.
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_cstream().
+ */
+size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *cparams);
+
/**
* zstd_init_cstream() - initialize a zstd streaming compression context
* @parameters The zstd parameters to use for compression.
@@ -416,6 +451,18 @@
*/
size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
+/**
+ * zstd_register_sequence_producer() - exposes the zstd library function
+ * ZSTD_registerSequenceProducer(). This is used for the block-level external
+ * sequence producer API. See upstream zstd.h for detailed documentation.
+ */
+typedef ZSTD_sequenceProducer_F zstd_sequence_producer_f;
+void zstd_register_sequence_producer(
+ zstd_cctx *cctx,
+ void* sequence_producer_state,
+ zstd_sequence_producer_f sequence_producer
+);
+
/**
* struct zstd_frame_params - zstd frame parameters stored in the frame header
* @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
@@ -429,7 +476,7 @@
*
* See zstd_lib.h.
*/
-typedef ZSTD_frameHeader zstd_frame_header;
+typedef ZSTD_FrameHeader zstd_frame_header;
/**
* zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
@@ -444,7 +491,35 @@
size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
size_t src_size);
-#define DSLAB_OPTIMIZE_COMPRESS
-#define DSLAB_L1DCACHE_LOG 15 /* 32KB L1d cache */
+/**
+ * struct zstd_sequence - a sequence of literals or a match
+ *
+ * @offset: The offset of the match
+ * @litLength: The literal length of the sequence
+ * @matchLength: The match length of the sequence
+ * @rep: Represents which repeat offset is used
+ */
+typedef ZSTD_Sequence zstd_sequence;
+
+/**
+ * zstd_compress_sequences_and_literals() - compress an array of zstd_sequence and literals
+ *
+ * @cctx: The zstd compression context.
+ * @dst: The buffer to compress the data into.
+ * @dst_capacity: The size of the destination buffer.
+ * @in_seqs: The array of zstd_sequence to compress.
+ * @in_seqs_size: The number of sequences in in_seqs.
+ * @literals: The literals associated to the sequences to be compressed.
+ * @lit_size: The size of the literals in the literals buffer.
+ * @lit_capacity: The size of the literals buffer.
+ * @decompressed_size: The size of the input data
+ *
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity,
+ const zstd_sequence *in_seqs, size_t in_seqs_size,
+ const void* literals, size_t lit_size, size_t lit_capacity,
+ size_t decompressed_size);
#endif /* LINUX_ZSTD_H */
Index: lib/zstd/compress/zstd_preSplit.h
===================================================================
diff --git a/lib/zstd/compress/zstd_preSplit.h b/lib/zstd/compress/zstd_preSplit.h
new file mode 100644
--- /dev/null (date 1740124333240)
+++ b/lib/zstd/compress/zstd_preSplit.h (date 1740124333240)
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_PRESPLIT_H
+#define ZSTD_PRESPLIT_H
+
+#include /* size_t */
+
+#define ZSTD_SLIPBLOCK_WORKSPACESIZE 8208
+
+/* ZSTD_splitBlock():
+ * @level must be a value between 0 and 4.
+ * higher levels spend more energy to detect block boundaries.
+ * @workspace must be aligned for size_t.
+ * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE
+ * note:
+ * For the time being, this function only accepts full 128 KB blocks.
+ * Therefore, @blockSize must be == 128 KB.
+ * While this could be extended to smaller sizes in the future,
+ * it is not yet clear if this would be useful. TBD.
+ */
+size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize,
+ int level,
+ void* workspace, size_t wkspSize);
+
+#endif /* ZSTD_PRESPLIT_H */
Index: lib/zstd/zstd_common_module.c
===================================================================
diff --git a/lib/zstd/zstd_common_module.c b/lib/zstd/zstd_common_module.c
new file mode 100644
--- /dev/null (date 1740124333301)
+++ b/lib/zstd/zstd_common_module.c (date 1740124333301)
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include
+
+#include "common/huf.h"
+#include "common/fse.h"
+#include "common/zstd_internal.h"
+
+// Export symbols shared by compress and decompress into a common module
+
+#undef ZSTD_isError /* defined within zstd_internal.h */
+EXPORT_SYMBOL_GPL(FSE_readNCount);
+EXPORT_SYMBOL_GPL(HUF_readStats);
+EXPORT_SYMBOL_GPL(HUF_readStats_wksp);
+EXPORT_SYMBOL_GPL(ZSTD_isError);
+EXPORT_SYMBOL_GPL(ZSTD_getErrorName);
+EXPORT_SYMBOL_GPL(ZSTD_getErrorCode);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Common");
Index: MAINTAINERS
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/MAINTAINERS b/MAINTAINERS
--- a/MAINTAINERS (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/MAINTAINERS (date 1740124333344)
@@ -22857,7 +22857,7 @@
M: Nick Terrell
S: Maintained
B: https://github.com/facebook/zstd/issues
-T: git git://github.com/terrelln/linux.git
+T: git https://github.com/terrelln/linux.git
F: include/linux/zstd*
F: lib/zstd/
F: lib/decompress_unzstd.c
Index: include/linux/zstd_lib.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
--- a/include/linux/zstd_lib.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/include/linux/zstd_lib.h (date 1740124241097)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,14 +12,46 @@
#ifndef ZSTD_H_235446
#define ZSTD_H_235446
-/* ====== Dependency ======*/
+
+/* ====== Dependencies ======*/
+#include /* size_t */
+
+#include /* list of errors */
+#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
#include /* INT_MAX */
-#include /* size_t */
+#endif /* ZSTD_STATIC_LINKING_ONLY */
/* ===== ZSTDLIB_API : control library symbols visibility ===== */
-#define ZSTDLIB_VISIBILITY
-#define ZSTDLIB_API ZSTDLIB_VISIBILITY
+#define ZSTDLIB_VISIBLE
+
+#ifndef ZSTDLIB_HIDDEN
+# if (__GNUC__ >= 4) && !defined(__MINGW32__)
+# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden")))
+# else
+# define ZSTDLIB_HIDDEN
+# endif
+#endif
+
+#define ZSTDLIB_API ZSTDLIB_VISIBLE
+
+/* Deprecation warnings :
+ * Should these warnings be a problem, it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
+ * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
+ */
+#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
+# define ZSTD_DEPRECATED(message) /* disable deprecation warnings */
+#else
+# if (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) || defined(__IAR_SYSTEMS_ICC__)
+# define ZSTD_DEPRECATED(message) __attribute__((deprecated(message)))
+# elif (__GNUC__ >= 3)
+# define ZSTD_DEPRECATED(message) __attribute__((deprecated))
+# else
+# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
+# define ZSTD_DEPRECATED(message)
+# endif
+#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
/* *****************************************************************************
@@ -56,8 +89,8 @@
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
-#define ZSTD_VERSION_MINOR 4
-#define ZSTD_VERSION_RELEASE 10
+#define ZSTD_VERSION_MINOR 5
+#define ZSTD_VERSION_RELEASE 7
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
/*! ZSTD_versionNumber() :
@@ -94,13 +127,13 @@
#define ZSTD_BLOCKSIZE_MAX (1<= `ZSTD_compressBound(srcSize)`.
+ * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have
+ * enough space to successfully compress the data.
* @return : compressed size written into `dst` (<= `dstCapacity),
* or an error code if it fails (which can be tested using ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
@@ -108,65 +141,106 @@
int compressionLevel);
/*! ZSTD_decompress() :
- * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
- * `dstCapacity` is an upper bound of originalSize to regenerate.
- * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
- * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
- * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * Multiple compressed frames can be decompressed at once with this method.
+ * The result will be the concatenation of all decompressed frames, back to back.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * First frame's decompressed size can be extracted using ZSTD_getFrameContentSize().
+ * If maximum upper bound isn't known, prefer using streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
+
+/*====== Decompression helper functions ======*/
+
/*! ZSTD_getFrameContentSize() : requires v1.3.0+
- * `src` should point to the start of a ZSTD encoded frame.
- * `srcSize` must be at least as large as the frame header.
- * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
- * @return : - decompressed size of `src` frame content, if known
- * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
- * note 1 : a 0 return value means the frame is valid but "empty".
- * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
- * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
- * In which case, it's necessary to use streaming mode to decompress data.
- * Optionally, application can rely on some implicit limit,
- * as ZSTD_decompress() only needs an upper bound of decompressed size.
- * (For example, data could be necessarily cut into blocks <= 16 KB).
- * note 3 : decompressed size is always present when compression is completed using single-pass functions,
- * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
- * note 4 : decompressed size can be very large (64-bits value),
- * potentially larger than what local system can handle as a single memory segment.
- * In which case, it's necessary to use streaming mode to decompress data.
- * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
- * Always ensure return value fits within application's authorized limits.
- * Each application can set its own limits.
- * note 6 : This function replaces ZSTD_getDecompressedSize() */
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * @return : - decompressed size of `src` frame content, if known
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * When invoking this method on a skippable frame, it will return 0.
+ * note 2 : decompressed size is an optional field, it may not be present (typically in streaming mode).
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
-/*! ZSTD_getDecompressedSize() :
- * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+/*! ZSTD_getDecompressedSize() (obsolete):
+ * This function is now obsolete, in favor of ZSTD_getFrameContentSize().
* Both functions work the same way, but ZSTD_getDecompressedSize() blends
* "empty", "unknown" and "error" results to the same return value (0),
* while ZSTD_getFrameContentSize() gives them separate return values.
* @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
+ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
-/*! ZSTD_findFrameCompressedSize() :
+/*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+
* `src` should point to the start of a ZSTD frame or skippable frame.
* `srcSize` must be >= first frame size
* @return : the compressed size of the first frame starting at `src`,
* suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
- * or an error code if input is invalid */
+ * or an error code if input is invalid
+ * Note 1: this method is called _find*() because it's not enough to read the header,
+ * it may have to scan through the frame's content, to reach its end.
+ * Note 2: this method also works with Skippable Frames. In which case,
+ * it returns the size of the complete skippable frame,
+ * which is always equal to its content size + 8 bytes for headers. */
ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
-/*====== Helper functions ======*/
-#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
-ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
-ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
-ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
-ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
-ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+/*====== Compression helper functions ======*/
+
+/*! ZSTD_compressBound() :
+ * maximum compressed size in worst case single-pass scenario.
+ * When invoking `ZSTD_compress()`, or any other one-pass compression function,
+ * it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize)
+ * as it eliminates one potential failure scenario,
+ * aka not enough room in dst buffer to write the compressed frame.
+ * Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE .
+ * In which case, ZSTD_compressBound() will return an error code
+ * which can be tested using ZSTD_isError().
+ *
+ * ZSTD_COMPRESSBOUND() :
+ * same as ZSTD_compressBound(), but as a macro.
+ * It can be used to produce constants, which can be useful for static allocation,
+ * for example to size a static array on stack.
+ * Will produce constant value 0 if srcSize is too large.
+ */
+#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U)
+#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+
+
+/*====== Error helper functions ======*/
+/* ZSTD_isError() :
+ * Most ZSTD_* functions returning a size_t value can be tested for error,
+ * using ZSTD_isError().
+ * @return 1 if error, 0 otherwise
+ */
+ZSTDLIB_API unsigned ZSTD_isError(size_t result); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); /* convert a result into an error code, which can be compared to error enum list */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t result); /*!< provides readable string from a function result */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
/* *************************************
@@ -174,25 +248,25 @@
***************************************/
/*= Compression context
* When compressing many times,
- * it is recommended to allocate a context just once,
- * and re-use it for each successive compression operation.
- * This will make workload friendlier for system's memory.
+ * it is recommended to allocate a compression context just once,
+ * and reuse it for each successive compression operation.
+ * This will make the workload easier for system's memory.
* Note : re-using context is just a speed / resource optimization.
* It doesn't change the compression ratio, which remains identical.
- * Note 2 : In multi-threaded environments,
- * use one different context per thread for parallel execution.
+ * Note 2: For parallel execution in multi-threaded environments,
+ * use one different context per thread .
*/
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
-ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* compatible with NULL pointer */
/*! ZSTD_compressCCtx() :
* Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
- * Important : in order to behave similarly to `ZSTD_compress()`,
- * this function compresses at requested compression level,
- * __ignoring any other parameter__ .
+ * Important : in order to mirror `ZSTD_compress()` behavior,
+ * this function compresses at the requested compression level,
+ * __ignoring any other advanced parameter__ .
* If any advanced parameter was set using the advanced API,
- * they will all be reset. Only `compressionLevel` remains.
+ * they will all be reset. Only @compressionLevel remains.
*/
ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
@@ -202,7 +276,7 @@
/*= Decompression context
* When decompressing many times,
* it is recommended to allocate a context only once,
- * and re-use it for each successive compression operation.
+ * and reuse it for each successive compression operation.
* This will make workload friendlier for system's memory.
* Use one context per thread for parallel execution. */
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
@@ -212,28 +286,28 @@
/*! ZSTD_decompressDCtx() :
* Same as ZSTD_decompress(),
* requires an allocated ZSTD_DCtx.
- * Compatible with sticky parameters.
+ * Compatible with sticky parameters (see below).
*/
ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize);
-/* *************************************
-* Advanced compression API
-***************************************/
+/* *******************************************
+* Advanced compression API (Requires v1.4.0+)
+**********************************************/
/* API design :
* Parameters are pushed one by one into an existing context,
* using ZSTD_CCtx_set*() functions.
* Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
* "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
- * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
+ * __They do not apply to one-shot variants such as ZSTD_compressCCtx()__ .
*
* It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
*
- * This API supercedes all other "advanced" API entry points in the experimental section.
- * In the future, we expect to remove from experimental API entry points which are redundant with this API.
+ * This API supersedes all other "advanced" API entry points in the experimental section.
+ * In the future, we expect to remove API entry points from experimental which are redundant with this API.
*/
@@ -251,7 +325,6 @@
Only the order (from fast to strong) is guaranteed */
} ZSTD_strategy;
-
typedef enum {
/* compression parameters
@@ -318,6 +391,18 @@
* resulting in stronger and slower compression.
* Special: value 0 means "use default strategy". */
+ ZSTD_c_targetCBlockSize=130, /* v1.5.6+
+ * Attempts to fit compressed block size into approximately targetCBlockSize.
+ * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX.
+ * Note that it's not a guarantee, just a convergence target (default:0).
+ * No target when targetCBlockSize == 0.
+ * This is helpful in low bandwidth streaming environments to improve end-to-end latency,
+ * when a client can make use of partial documents (a prominent example being Chrome).
+ * Note: this parameter is stable since v1.5.6.
+ * It was present as an experimental parameter in earlier versions,
+ * but it's not recommended using it with earlier library versions
+ * due to massive performance regressions.
+ */
/* LDM mode parameters */
ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
* This parameter is designed to improve compression ratio
@@ -374,7 +459,7 @@
ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
* Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
* 0 means default, which is dynamically determined based on compression parameters.
- * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
+ * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest.
* The minimum size is automatically and transparently enforced. */
ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size.
* The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
@@ -397,13 +482,18 @@
* ZSTD_c_forceMaxWindow
* ZSTD_c_forceAttachDict
* ZSTD_c_literalCompressionMode
- * ZSTD_c_targetCBlockSize
* ZSTD_c_srcSizeHint
* ZSTD_c_enableDedicatedDictSearch
* ZSTD_c_stableInBuffer
* ZSTD_c_stableOutBuffer
* ZSTD_c_blockDelimiters
* ZSTD_c_validateSequences
+ * ZSTD_c_blockSplitterLevel
+ * ZSTD_c_splitAfterSequences
+ * ZSTD_c_useRowMatchFinder
+ * ZSTD_c_prefetchCDictTables
+ * ZSTD_c_enableSeqProducerFallback
+ * ZSTD_c_maxBlockSize
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly;
* also, the enums values themselves are unstable and can still change.
@@ -413,13 +503,21 @@
ZSTD_c_experimentalParam3=1000,
ZSTD_c_experimentalParam4=1001,
ZSTD_c_experimentalParam5=1002,
- ZSTD_c_experimentalParam6=1003,
+ /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */
ZSTD_c_experimentalParam7=1004,
ZSTD_c_experimentalParam8=1005,
ZSTD_c_experimentalParam9=1006,
ZSTD_c_experimentalParam10=1007,
ZSTD_c_experimentalParam11=1008,
- ZSTD_c_experimentalParam12=1009
+ ZSTD_c_experimentalParam12=1009,
+ ZSTD_c_experimentalParam13=1010,
+ ZSTD_c_experimentalParam14=1011,
+ ZSTD_c_experimentalParam15=1012,
+ ZSTD_c_experimentalParam16=1013,
+ ZSTD_c_experimentalParam17=1014,
+ ZSTD_c_experimentalParam18=1015,
+ ZSTD_c_experimentalParam19=1016,
+ ZSTD_c_experimentalParam20=1017
} ZSTD_cParameter;
typedef struct {
@@ -482,7 +580,7 @@
* They will be used to compress next frame.
* Resetting session never fails.
* - The parameters : changes all parameters back to "default".
- * This removes any reference to any dictionary too.
+ * This also removes any reference to any dictionary or external sequence producer.
* Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
* otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
* - Both : similar to resetting the session, followed by resetting parameters.
@@ -491,11 +589,13 @@
/*! ZSTD_compress2() :
* Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
+ * (note that this entry point doesn't even expose a compression level parameter).
* ZSTD_compress2() always starts a new frame.
* Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
* - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
* - The function is always blocking, returns when compression is completed.
- * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have
+ * enough space to successfully compress the data, though it is possible it fails for other reasons.
* @return : compressed size written into `dst` (<= `dstCapacity),
* or an error code if it fails (which can be tested using ZSTD_isError()).
*/
@@ -504,9 +604,9 @@
const void* src, size_t srcSize);
-/* *************************************
-* Advanced decompression API
-***************************************/
+/* *********************************************
+* Advanced decompression API (Requires v1.4.0+)
+************************************************/
/* The advanced API pushes parameters one by one into an existing DCtx context.
* Parameters are sticky, and remain valid for all following frames
@@ -532,13 +632,17 @@
* ZSTD_d_stableOutBuffer
* ZSTD_d_forceIgnoreChecksum
* ZSTD_d_refMultipleDDicts
+ * ZSTD_d_disableHuffmanAssembly
+ * ZSTD_d_maxBlockSize
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly
*/
ZSTD_d_experimentalParam1=1000,
ZSTD_d_experimentalParam2=1001,
ZSTD_d_experimentalParam3=1002,
- ZSTD_d_experimentalParam4=1003
+ ZSTD_d_experimentalParam4=1003,
+ ZSTD_d_experimentalParam5=1004,
+ ZSTD_d_experimentalParam6=1005
} ZSTD_dParameter;
@@ -593,14 +697,14 @@
* A ZSTD_CStream object is required to track streaming operation.
* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
-* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+* It is recommended to reuse ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
*
* For parallel execution, use one separate ZSTD_CStream per thread.
*
* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
*
* Parameters are sticky : when starting a new compression on the same context,
-* it will re-use the same sticky parameters as previous compression session.
+* it will reuse the same sticky parameters as previous compression session.
* When in doubt, it's recommended to fully initialize the context before usage.
* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
@@ -668,7 +772,7 @@
: note : multithreaded compression will block to flush as much output as possible. */
} ZSTD_EndDirective;
-/*! ZSTD_compressStream2() :
+/*! ZSTD_compressStream2() : Requires v1.4.0+
* Behaves about the same as ZSTD_compressStream, with additional control on end directive.
* - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
* - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
@@ -689,6 +793,11 @@
* only ZSTD_e_end or ZSTD_e_flush operations are allowed.
* Before starting a new compression job, or changing compression parameters,
* it is required to fully flush internal buffers.
+ * - note: if an operation ends with an error, it may leave @cctx in an undefined state.
+ * Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.
+ * In order to be re-employed after an error, a state must be reset,
+ * which can be done explicitly (ZSTD_CCtx_reset()),
+ * or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())
*/
ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
@@ -714,11 +823,9 @@
/* *****************************************************************************
- * This following is a legacy streaming API.
+ * This following is a legacy streaming API, available since v1.0+ .
* It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
* It is redundant, but remains fully supported.
- * Advanced parameters and dictionary compression can only be used through the
- * new API.
******************************************************************************/
/*!
@@ -727,6 +834,9 @@
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ *
+ * Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API
+ * to compress with a dictionary.
*/
ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
/*!
@@ -747,7 +857,7 @@
*
* A ZSTD_DStream object is required to track streaming operations.
* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
-* ZSTD_DStream objects can be re-used multiple times.
+* ZSTD_DStream objects can be re-employed multiple times.
*
* Use ZSTD_initDStream() to start a new decompression operation.
* @return : recommended first input size
@@ -757,16 +867,21 @@
* The function will update both `pos` fields.
* If `input.pos < input.size`, some input has not been consumed.
* It's up to the caller to present again remaining data.
+*
* The function tries to flush all data decoded immediately, respecting output buffer size.
* If `output.pos < output.size`, decoder has flushed everything it could.
-* But if `output.pos == output.size`, there might be some data left within internal buffers.,
+*
+* However, when `output.pos == output.size`, it's more difficult to know.
+* If @return > 0, the frame is not complete, meaning
+* either there is still some data left to flush within internal buffers,
+* or there is more input to read to complete the frame (or both).
* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
* @return : 0 when a frame is completely decoded and fully flushed,
* or an error code, which can be tested using ZSTD_isError(),
* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
* the return value is a suggested next input size (just a hint for better latency)
-* that will never request more than the remaining frame size.
+* that will never request more than the remaining content of the compressed frame.
* *******************************************************************************/
typedef ZSTD_DCtx ZSTD_DStream; /*< DCtx and DStream are now effectively same object (>= v1.3.0) */
@@ -777,13 +892,38 @@
/*===== Streaming decompression functions =====*/
-/* This function is redundant with the advanced API and equivalent to:
+/*! ZSTD_initDStream() :
+ * Initialize/reset DStream state for new decompression operation.
+ * Call before new decompression operation using same DStream.
*
+ * Note : This function is redundant with the advanced API and equivalent to:
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
* ZSTD_DCtx_refDDict(zds, NULL);
*/
ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+/*! ZSTD_decompressStream() :
+ * Streaming decompression function.
+ * Call repetitively to consume full input updating it as necessary.
+ * Function will update both input and output `pos` fields exposing current state via these fields:
+ * - `input.pos < input.size`, some input remaining and caller should provide remaining input
+ * on the next call.
+ * - `output.pos < output.size`, decoder flushed internal output buffer.
+ * - `output.pos == output.size`, unflushed data potentially present in the internal buffers,
+ * check ZSTD_decompressStream() @return value,
+ * if > 0, invoke it again to flush remaining data to output.
+ * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.
+ *
+ * @return : 0 when a frame is completely decoded and fully flushed,
+ * or an error code, which can be tested using ZSTD_isError(),
+ * or any other value > 0, which means there is some decoding or flushing to do to complete current frame.
+ *
+ * Note: when an operation returns with an error code, the @zds state may be left in undefined state.
+ * It's UB to invoke `ZSTD_decompressStream()` on such a state.
+ * In order to re-use such a state, it must be first reset,
+ * which can be done explicitly (`ZSTD_DCtx_reset()`),
+ * or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`)
+ */
ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
@@ -796,7 +936,7 @@
/*! ZSTD_compress_usingDict() :
* Compression at an explicit compression level using a Dictionary.
* A dictionary can be any arbitrary data segment (also called a prefix),
- * or a buffer with specified information (see dictBuilder/zdict.h).
+ * or a buffer with specified information (see zdict.h).
* Note : This function loads the dictionary, resulting in significant startup delay.
* It's intended for a dictionary used only once.
* Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
@@ -879,24 +1019,30 @@
* Dictionary helper functions
*******************************/
-/*! ZSTD_getDictID_fromDict() :
+/*! ZSTD_getDictID_fromDict() : Requires v1.4.0+
* Provides the dictID stored within dictionary.
* if @return == 0, the dictionary is not conformant with Zstandard specification.
* It can still be loaded, but as a content-only dictionary. */
ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
-/*! ZSTD_getDictID_fromDDict() :
+/*! ZSTD_getDictID_fromCDict() : Requires v1.5.0+
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
+
+/*! ZSTD_getDictID_fromDDict() : Requires v1.4.0+
* Provides the dictID of the dictionary loaded into `ddict`.
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
-/*! ZSTD_getDictID_fromFrame() :
+/*! ZSTD_getDictID_fromFrame() : Requires v1.4.0+
* Provides the dictID required to decompressed the frame stored within `src`.
* If @return == 0, the dictID could not be decoded.
* This could for one of the following reasons :
* - The frame does not require a dictionary to be decoded (most common case).
- * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.
* Note : this use case also happens when using a non-conformant dictionary.
* - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
* - This is not a Zstandard frame.
@@ -905,23 +1051,26 @@
/* *****************************************************************************
- * Advanced dictionary and prefix API
+ * Advanced dictionary and prefix API (Requires v1.4.0+)
*
* This API allows dictionaries to be used with ZSTD_compress2(),
- * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
- * only reset with the context is reset with ZSTD_reset_parameters or
- * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ * ZSTD_compressStream2(), and ZSTD_decompressDCtx().
+ * Dictionaries are sticky, they remain valid when same context is reused,
+ * they only reset when the context is reset
+ * with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters.
+ * In contrast, Prefixes are single-use.
******************************************************************************/
-/*! ZSTD_CCtx_loadDictionary() :
+/*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+
* Create an internal CDict from `dict` buffer.
* Decompression will have to use same dictionary.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
* meaning "return to no-dictionary mode".
- * Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
- * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+ * Note 1 : Dictionary is sticky, it will be used for all future compressed frames,
+ * until parameters are reset, a new dictionary is loaded, or the dictionary
+ * is explicitly invalidated by loading a NULL dictionary.
* Note 2 : Loading a dictionary involves building tables.
* It's also a CPU consuming operation, with non-negligible impact on latency.
* Tables are dependent on compression parameters, and for this reason,
@@ -930,11 +1079,15 @@
* Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
* In such a case, dictionary buffer must outlive its users.
* Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
- * to precisely select how dictionary content must be interpreted. */
+ * to precisely select how dictionary content must be interpreted.
+ * Note 5 : This method does not benefit from LDM (long distance mode).
+ * If you want to employ LDM on some large dictionary content,
+ * prefer employing ZSTD_CCtx_refPrefix() described below.
+ */
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
-/*! ZSTD_CCtx_refCDict() :
- * Reference a prepared dictionary, to be used for all next compressed frames.
+/*! ZSTD_CCtx_refCDict() : Requires v1.4.0+
+ * Reference a prepared dictionary, to be used for all future compressed frames.
* Note that compression parameters are enforced from within CDict,
* and supersede any compression parameter previously set within CCtx.
* The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
@@ -947,12 +1100,13 @@
* Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
-/*! ZSTD_CCtx_refPrefix() :
+/*! ZSTD_CCtx_refPrefix() : Requires v1.4.0+
* Reference a prefix (single-usage dictionary) for next compressed frame.
* A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
* Decompression will need same prefix to properly regenerate data.
* Compressing with a prefix is similar in outcome as performing a diff and compressing it,
* but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * This method is compatible with LDM (long distance mode).
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
* Note 1 : Prefix buffer is referenced. It **must** outlive compression.
@@ -968,10 +1122,10 @@
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
const void* prefix, size_t prefixSize);
-/*! ZSTD_DCtx_loadDictionary() :
- * Create an internal DDict from dict buffer,
- * to be used to decompress next frames.
- * The dictionary remains valid for all future frames, until explicitly invalidated.
+/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+
+ * Create an internal DDict from dict buffer, to be used to decompress all future frames.
+ * The dictionary remains valid for all future frames, until explicitly invalidated, or
+ * a new dictionary is loaded.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
* meaning "return to no-dictionary mode".
@@ -985,7 +1139,7 @@
*/
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-/*! ZSTD_DCtx_refDDict() :
+/*! ZSTD_DCtx_refDDict() : Requires v1.4.0+
* Reference a prepared dictionary, to be used to decompress next frames.
* The dictionary remains active for decompression of future frames using same DCtx.
*
@@ -995,15 +1149,16 @@
* The memory for the table is allocated on the first call to refDDict, and can be
* freed with ZSTD_freeDCtx().
*
+ * If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary
+ * will be managed, and referencing a dictionary effectively "discards" any previous one.
+ *
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
- * Note 1 : Currently, only one dictionary can be managed.
- * Referencing a new dictionary effectively "discards" any previous one.
* Special: referencing a NULL DDict means "return to no-dictionary mode".
* Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
*/
ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-/*! ZSTD_DCtx_refPrefix() :
+/*! ZSTD_DCtx_refPrefix() : Requires v1.4.0+
* Reference a prefix (single-usage dictionary) to decompress next frame.
* This is the reverse operation of ZSTD_CCtx_refPrefix(),
* and must use the same prefix as the one used during compression.
@@ -1024,7 +1179,7 @@
/* === Memory management === */
-/*! ZSTD_sizeof_*() :
+/*! ZSTD_sizeof_*() : Requires v1.4.0+
* These functions give the _current_ memory usage of selected object.
* Note that object memory usage can evolve (increase or decrease) over time. */
ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
@@ -1034,6 +1189,7 @@
ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
#endif /* ZSTD_H_235446 */
@@ -1049,6 +1205,12 @@
#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
+/* This can be overridden externally to hide static symbols. */
+#ifndef ZSTDLIB_STATIC_API
+#define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE
+#endif
+
/* **************************************************************************************
* experimental API (static linking only)
****************************************************************************************
@@ -1083,6 +1245,7 @@
#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
#define ZSTD_STRATEGY_MIN ZSTD_fast
#define ZSTD_STRATEGY_MAX ZSTD_btultra2
+#define ZSTD_BLOCKSIZE_MAX_MIN (1 << 10) /* The minimum valid max blocksize. Maximum blocksizes smaller than this make compressBound() inaccurate. */
#define ZSTD_OVERLAPLOG_MIN 0
@@ -1106,14 +1269,11 @@
#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
/* Advanced parameter bounds */
-#define ZSTD_TARGETCBLOCKSIZE_MIN 64
+#define ZSTD_TARGETCBLOCKSIZE_MIN 1340 /* suitable to fit into an ethernet / wifi / 4G transport frame */
#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
#define ZSTD_SRCSIZEHINT_MIN 0
#define ZSTD_SRCSIZEHINT_MAX INT_MAX
-/* internal */
-#define ZSTD_HASHLOG3_MAX 17
-
/* --- Advanced types --- */
@@ -1151,7 +1311,7 @@
*
* Note: This field is optional. ZSTD_generateSequences() will calculate the value of
* 'rep', but repeat offsets do not necessarily need to be calculated from an external
- * sequence provider's perspective. For example, ZSTD_compressSequences() does not
+ * sequence provider perspective. For example, ZSTD_compressSequences() does not
* use this 'rep' field at all (as of now).
*/
} ZSTD_Sequence;
@@ -1255,9 +1415,19 @@
ZSTD_lcm_uncompressed = 2 /*< Always emit uncompressed literals. */
} ZSTD_literalCompressionMode_e;
+typedef enum {
+ /* Note: This enum controls features which are conditionally beneficial.
+ * Zstd can take a decision on whether or not to enable the feature (ZSTD_ps_auto),
+ * but setting the switch to ZSTD_ps_enable or ZSTD_ps_disable force enable/disable the feature.
+ */
+ ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */
+ ZSTD_ps_enable = 1, /* Force-enable the feature */
+ ZSTD_ps_disable = 2 /* Do not use the feature */
+} ZSTD_ParamSwitch_e;
+#define ZSTD_paramSwitch_e ZSTD_ParamSwitch_e /* old name */
/* *************************************
-* Frame size functions
+* Frame header and size functions
***************************************/
/*! ZSTD_findDecompressedSize() :
@@ -1281,7 +1451,7 @@
* note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
* read each contained frame header. This is fast as most of the data is skipped,
* however it does mean that all frame data must be present and valid. */
-ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
/*! ZSTD_decompressBound() :
* `src` should point to the start of a series of ZSTD encoded and/or skippable frames
@@ -1296,41 +1466,137 @@
* note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
* upper-bound = # blocks * min(128 KB, Window_Size)
*/
-ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
/*! ZSTD_frameHeaderSize() :
- * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX.
* @return : size of the Frame Header,
* or an error code (if srcSize is too small) */
-ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_FrameType_e;
+#define ZSTD_frameType_e ZSTD_FrameType_e /* old name */
+typedef struct {
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
+ ZSTD_FrameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ unsigned headerSize;
+ unsigned dictID; /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */
+ unsigned checksumFlag;
+ unsigned _reserved1;
+ unsigned _reserved2;
+} ZSTD_FrameHeader;
+#define ZSTD_frameHeader ZSTD_FrameHeader /* old name */
+
+/*! ZSTD_getFrameHeader() :
+ * decode Frame Header into `zfhPtr`, or requires larger `srcSize`.
+ * @return : 0 => header is complete, `zfhPtr` is correctly filled,
+ * >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled,
+ * or an error code, which can be tested using ZSTD_isError() */
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize);
+/*! ZSTD_getFrameHeader_advanced() :
+ * same as ZSTD_getFrameHeader(),
+ * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+
+/*! ZSTD_decompressionMargin() :
+ * Zstd supports in-place decompression, where the input and output buffers overlap.
+ * In this case, the output buffer must be at least (Margin + Output_Size) bytes large,
+ * and the input buffer must be at the end of the output buffer.
+ *
+ * _______________________ Output Buffer ________________________
+ * | |
+ * | ____ Input Buffer ____|
+ * | | |
+ * v v v
+ * |---------------------------------------|-----------|----------|
+ * ^ ^ ^
+ * |___________________ Output_Size ___________________|_ Margin _|
+ *
+ * NOTE: See also ZSTD_DECOMPRESSION_MARGIN().
+ * NOTE: This applies only to single-pass decompression through ZSTD_decompress() or
+ * ZSTD_decompressDCtx().
+ * NOTE: This function supports multi-frame input.
+ *
+ * @param src The compressed frame(s)
+ * @param srcSize The size of the compressed frame(s)
+ * @returns The decompression margin or an error that can be checked with ZSTD_isError().
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize);
+
+/*! ZSTD_DECOMPRESS_MARGIN() :
+ * Similar to ZSTD_decompressionMargin(), but instead of computing the margin from
+ * the compressed frame, compute it from the original size and the blockSizeLog.
+ * See ZSTD_decompressionMargin() for details.
+ *
+ * WARNING: This macro does not support multi-frame input, the input must be a single
+ * zstd frame. If you need that support use the function, or implement it yourself.
+ *
+ * @param originalSize The original uncompressed size of the data.
+ * @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX).
+ * Unless you explicitly set the windowLog smaller than
+ * ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX.
+ */
+#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)( \
+ ZSTD_FRAMEHEADERSIZE_MAX /* Frame header */ + \
+ 4 /* checksum */ + \
+ ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \
+ (blockSize) /* One block of margin */ \
+ ))
typedef enum {
- ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
- ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
-} ZSTD_sequenceFormat_e;
+ ZSTD_sf_noBlockDelimiters = 0, /* ZSTD_Sequence[] has no block delimiters, just sequences */
+ ZSTD_sf_explicitBlockDelimiters = 1 /* ZSTD_Sequence[] contains explicit block delimiters */
+} ZSTD_SequenceFormat_e;
+#define ZSTD_sequenceFormat_e ZSTD_SequenceFormat_e /* old name */
+
+/*! ZSTD_sequenceBound() :
+ * `srcSize` : size of the input buffer
+ * @return : upper-bound for the number of sequences that can be generated
+ * from a buffer of srcSize bytes
+ *
+ * note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
/*! ZSTD_generateSequences() :
- * Generate sequences using ZSTD_compress2, given a source buffer.
+ * WARNING: This function is meant for debugging and informational purposes ONLY!
+ * Its implementation is flawed, and it will be deleted in a future version.
+ * It is not guaranteed to succeed, as there are several cases where it will give
+ * up and fail. You should NOT use this function in production code.
+ *
+ * This function is deprecated, and will be removed in a future version.
+ *
+ * Generate sequences using ZSTD_compress2(), given a source buffer.
+ *
+ * @param zc The compression context to be used for ZSTD_compress2(). Set any
+ * compression parameters you need on this context.
+ * @param outSeqs The output sequences buffer of size @p outSeqsSize
+ * @param outSeqsCapacity The size of the output sequences buffer.
+ * ZSTD_sequenceBound(srcSize) is an upper bound on the number
+ * of sequences that can be generated.
+ * @param src The source buffer to generate sequences from of size @p srcSize.
+ * @param srcSize The size of the source buffer.
*
* Each block will end with a dummy sequence
* with offset == 0, matchLength == 0, and litLength == length of last literals.
* litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
* simply acts as a block delimiter.
*
- * zc can be used to insert custom compression params.
- * This function invokes ZSTD_compress2
- *
- * The output of this function can be fed into ZSTD_compressSequences() with CCtx
- * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
- * @return : number of sequences generated
+ * @returns The number of sequences generated, necessarily less than
+ * ZSTD_sequenceBound(srcSize), or an error code that can be checked
+ * with ZSTD_isError().
*/
-
-ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
- size_t outSeqsSize, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()")
+ZSTDLIB_STATIC_API size_t
+ZSTD_generateSequences(ZSTD_CCtx* zc,
+ ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
+ const void* src, size_t srcSize);
/*! ZSTD_mergeBlockDelimiters() :
* Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
- * by merging them into into the literals of the next sequence.
+ * by merging them into the literals of the next sequence.
*
* As such, the final generated result has no explicit representation of block boundaries,
* and the final last literals segment is not represented in the sequences.
@@ -1339,11 +1605,13 @@
* setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
* @return : number of sequences left after merging
*/
-ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
+ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
/*! ZSTD_compressSequences() :
- * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
- * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
+ * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.
+ * @src contains the entire input (not just the literals).
+ * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals
+ * If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.).
* The entire source is compressed into a single frame.
*
* The compression behavior changes based on cctx params. In particular:
@@ -1352,11 +1620,17 @@
* the block size derived from the cctx, and sequences may be split. This is the default setting.
*
* If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
- * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ * valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
*
- * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
- * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
- * specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
+ * When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes
+ * using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit
+ * can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation.
+ * By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10).
+ * ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction.
+ *
+ * If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined
+ * behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) and then bail out and return an error.
*
* In addition to the two adjustable experimental params, there are other important cctx params.
* - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
@@ -1364,31 +1638,81 @@
* - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
* is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
*
- * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
- * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
- * and cannot emit an RLE block that disagrees with the repcode history
- * @return : final compressed size or a ZSTD error.
+ * Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused.
+ * Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly,
+ * and cannot emit an RLE block that disagrees with the repcode history.
+ * @return : final compressed size, or a ZSTD error code.
*/
-ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
- const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
- const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API size_t
+ZSTD_compressSequences(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize);
+/*! ZSTD_compressSequencesAndLiterals() :
+ * This is a variant of ZSTD_compressSequences() which,
+ * instead of receiving (src,srcSize) as input parameter, receives (literals,litSize),
+ * aka all the literals, already extracted and laid out into a single continuous buffer.
+ * This can be useful if the process generating the sequences also happens to generate the buffer of literals,
+ * thus skipping an extraction + caching stage.
+ * It's a speed optimization, useful when the right conditions are met,
+ * but it also features the following limitations:
+ * - Only supports explicit delimiter mode
+ * - Currently does not support Sequences validation (so input Sequences are trusted)
+ * - Not compatible with frame checksum, which must be disabled
+ * - If any block is incompressible, will fail and return an error
+ * - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error.
+ * - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals.
+ * @litBufCapacity must be at least 8 bytes larger than @litSize.
+ * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error.
+ * @return : final compressed size, or a ZSTD error code.
+ */
+ZSTDLIB_STATIC_API size_t
+ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t nbSequences,
+ const void* literals, size_t litSize, size_t litBufCapacity,
+ size_t decompressedSize);
+
+
/*! ZSTD_writeSkippableFrame() :
* Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
*
- * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
+ * Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,
* ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
- * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
- * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
+ * As such, the parameter magicVariant controls the exact skippable frame magic number variant used,
+ * so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
*
* Returns an error if destination buffer is not large enough, if the source size is not representable
* with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
*
* @return : number of bytes written or a ZSTD error.
*/
-ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize, unsigned magicVariant);
+ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned magicVariant);
+
+/*! ZSTD_readSkippableFrame() :
+ * Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer.
+ *
+ * The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written,
+ * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START.
+ * This can be NULL if the caller is not interested in the magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, or if the frame is not skippable.
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity,
+ unsigned* magicVariant,
+ const void* src, size_t srcSize);
+
+/*! ZSTD_isSkippableFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
+ */
+ZSTDLIB_STATIC_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
+
/* *************************************
@@ -1398,58 +1722,69 @@
/*! ZSTD_estimate*() :
* These functions make it possible to estimate memory usage
* of a future {D,C}Ctx, before its creation.
+ * This is useful in combination with ZSTD_initStatic(),
+ * which makes it possible to employ a static buffer for ZSTD_CCtx* state.
*
* ZSTD_estimateCCtxSize() will provide a memory budget large enough
- * for any compression level up to selected one.
- * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
- * does not include space for a window buffer.
- * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
+ * to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2()
+ * associated with any compression level up to max specified one.
* The estimate will assume the input may be arbitrarily large,
* which is the worst case.
*
+ * Note that the size estimation is specific for one-shot compression,
+ * it is not valid for streaming (see ZSTD_estimateCStreamSize*())
+ * nor other potential ways of using a ZSTD_CCtx* state.
+ *
* When srcSize can be bound by a known and rather "small" value,
- * this fact can be used to provide a tighter estimation
- * because the CCtx compression context will need less memory.
- * This tighter estimation can be provided by more advanced functions
+ * this knowledge can be used to provide a tighter budget estimation
+ * because the ZSTD_CCtx* state will need less memory for small inputs.
+ * This tighter estimation can be provided by employing more advanced functions
* ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
* and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
* Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
*
- * Note 2 : only single-threaded compression is supported.
+ * Note : only single-threaded compression is supported.
* ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
*/
-ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
-ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
-ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
-ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int maxCompressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
/*! ZSTD_estimateCStreamSize() :
- * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
- * It will also consider src size to be arbitrarily "large", which is worst case.
+ * ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression
+ * using any compression level up to the max specified one.
+ * It will also consider src size to be arbitrarily "large", which is a worst case scenario.
* If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
* ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
* ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
* Note : CStream size estimation is only correct for single-threaded compression.
- * ZSTD_DStream memory budget depends on window Size.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.
+ * Size estimates assume that no external sequence producer is registered.
+ *
+ * ZSTD_DStream memory budget depends on frame's window Size.
* This information can be passed manually, using ZSTD_estimateDStreamSize,
* or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ * Any frame requesting a window size larger than max specified one will be rejected.
* Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
* an internal ?Dict will be created, which additional size is not estimated here.
- * In this case, get total size by adding ZSTD_estimate?DictSize */
-ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
-ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
-ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
-ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
-ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+ * In this case, get total size by adding ZSTD_estimate?DictSize
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int maxCompressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t maxWindowSize);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
/*! ZSTD_estimate?DictSize() :
* ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
* ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
* Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
*/
-ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
-ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
/*! ZSTD_initStatic*() :
* Initialize an object using a pre-allocated fixed-size buffer.
@@ -1472,20 +1807,20 @@
* Limitation 2 : static cctx currently not compatible with multi-threading.
* Limitation 3 : static dctx is incompatible with legacy support.
*/
-ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
-ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticCCtx() */
+ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticCCtx() */
-ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
-ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticDCtx() */
+ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticDCtx() */
-ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
+ZSTDLIB_STATIC_API const ZSTD_CDict* ZSTD_initStaticCDict(
void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams);
-ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
+ZSTDLIB_STATIC_API const ZSTD_DDict* ZSTD_initStaticDDict(
void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
@@ -1502,46 +1837,54 @@
typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
static
__attribute__((__unused__))
+
+#if defined(__clang__) && __clang_major__ >= 5
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
+#endif
ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /*< this constant defers to stdlib's functions */
+#if defined(__clang__) && __clang_major__ >= 5
+#pragma clang diagnostic pop
+#endif
-ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
-ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
-ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
-ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams,
ZSTD_customMem customMem);
-/* ! Thread pool :
- * These prototypes make it possible to share a thread pool among multiple compression contexts.
- * This can limit resources for applications with multiple threads where each one uses
- * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
- * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
- * Note that the lifetime of such pool must exist while being used.
- * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
- * to use an internal thread pool).
- * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
+/*! Thread pool :
+ * These prototypes make it possible to share a thread pool among multiple compression contexts.
+ * This can limit resources for applications with multiple threads where each one uses
+ * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
+ * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
+ * Note that the lifetime of such pool must exist while being used.
+ * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
+ * to use an internal thread pool).
+ * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
*/
typedef struct POOL_ctx_s ZSTD_threadPool;
-ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
-ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
-ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
+ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
+ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
/*
* This API is temporary and is expected to change or disappear in the future!
*/
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
+ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced2(
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
const ZSTD_CCtx_params* cctxParams,
ZSTD_customMem customMem);
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
+ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced(
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
@@ -1558,28 +1901,22 @@
* As a consequence, `dictBuffer` **must** outlive CDict,
* and its content must remain unmodified throughout the lifetime of CDict.
* note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
-
-/*! ZSTD_getDictID_fromCDict() :
- * Provides the dictID of the dictionary loaded into `cdict`.
- * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
- * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
+ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
/*! ZSTD_getCParams() :
* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
* `estimatedSrcSize` value is optional, select 0 if not known */
-ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
/*! ZSTD_getParams() :
* same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
* All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
-ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
/*! ZSTD_checkCParams() :
* Ensure param values remain within authorized range.
* @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
/*! ZSTD_adjustCParams() :
* optimize params for a given `srcSize` and `dictSize`.
@@ -1587,23 +1924,48 @@
* `dictSize` must be `0` when there is no dictionary.
* cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
* This function never fails (wide contract) */
-ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+
+/*! ZSTD_CCtx_setCParams() :
+ * Set all parameters provided within @p cparams into the working @p cctx.
+ * Note : if modifying parameters during compression (MT mode only),
+ * note that changes to the .windowLog parameter will be ignored.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()).
+ * On failure, no parameters are updated.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams);
+
+/*! ZSTD_CCtx_setFParams() :
+ * Set all parameters provided within @p fparams into the working @p cctx.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams);
+
+/*! ZSTD_CCtx_setParams() :
+ * Set all parameters provided within @p params into the working @p cctx.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params);
/*! ZSTD_compress_advanced() :
* Note : this function is now DEPRECATED.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
- * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
-ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict,size_t dictSize,
- ZSTD_parameters params);
+ * This prototype will generate compilation warnings. */
+ZSTD_DEPRECATED("use ZSTD_compress2")
+ZSTDLIB_STATIC_API
+size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params);
/*! ZSTD_compress_usingCDict_advanced() :
- * Note : this function is now REDUNDANT.
+ * Note : this function is now DEPRECATED.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
- * This prototype will be marked as deprecated and generate compilation warning in some future version */
-ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ * This prototype will generate compilation warnings. */
+ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
+ZSTDLIB_STATIC_API
+size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
@@ -1613,18 +1975,18 @@
/*! ZSTD_CCtx_loadDictionary_byReference() :
* Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
* It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
/*! ZSTD_CCtx_loadDictionary_advanced() :
* Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
* how to load the dictionary (by copy ? by reference ?)
* and how to interpret it (automatic ? force raw mode ? full mode only ?) */
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
/*! ZSTD_CCtx_refPrefix_advanced() :
* Same as ZSTD_CCtx_refPrefix(), but gives finer control over
* how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
-ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
/* === experimental parameters === */
/* these parameters can be used with ZSTD_setParameter()
@@ -1663,17 +2025,18 @@
* See the comments on that enum for an explanation of the feature. */
#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
-/* Controls how the literals are compressed (default is auto).
- * The value must be of type ZSTD_literalCompressionMode_e.
- * See ZSTD_literalCompressionMode_t enum definition for details.
+/* Controlled with ZSTD_ParamSwitch_e enum.
+ * Default is ZSTD_ps_auto.
+ * Set to ZSTD_ps_disable to never compress literals.
+ * Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals
+ * may still be emitted if huffman is not beneficial to use.)
+ *
+ * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
+ * literals compression based on the compression parameters - specifically,
+ * negative compression levels do not use literal compression.
*/
#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
-/* Tries to fit compressed block size to be around targetCBlockSize.
- * No target when targetCBlockSize == 0.
- * There is no guarantee on compressed block size (default:0) */
-#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
-
/* User's best guess of source size.
* Hint is not valid when srcSizeHint == 0.
* There is no guarantee that hint is close to actual source size,
@@ -1728,7 +2091,7 @@
*
* Note that this means that the CDict tables can no longer be copied into the
* CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
- * useable. The dictionary can only be attached or reloaded.
+ * usable. The dictionary can only be attached or reloaded.
*
* In general, you should expect compression to be faster--sometimes very much
* so--and CDict creation to be slightly slower. Eventually, we will probably
@@ -1740,13 +2103,16 @@
* Experimental parameter.
* Default is 0 == disabled. Set to 1 to enable.
*
- * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
- * between calls, except for the modifications that zstd makes to pos (the
- * caller must not modify pos). This is checked by the compressor, and
- * compression will fail if it ever changes. This means the only flush
- * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
- * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
- * MUST not be modified during compression or you will get data corruption.
+ * Tells the compressor that input data presented with ZSTD_inBuffer
+ * will ALWAYS be the same between calls.
+ * Technically, the @src pointer must never be changed,
+ * and the @pos field can only be updated by zstd.
+ * However, it's possible to increase the @size field,
+ * allowing scenarios where more data can be appended after compressions starts.
+ * These conditions are checked by the compressor,
+ * and compression will fail if they are not respected.
+ * Also, data in the ZSTD_inBuffer within the range [src, src + pos)
+ * MUST not be modified during compression or it will result in data corruption.
*
* When this flag is enabled zstd won't allocate an input window buffer,
* because the user guarantees it can reference the ZSTD_inBuffer until
@@ -1754,18 +2120,15 @@
* large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
* avoid the memcpy() from the input buffer to the input window buffer.
*
- * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
- * That means this flag cannot be used with ZSTD_compressStream().
- *
* NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
* this flag is ALWAYS memory safe, and will never access out-of-bounds
- * memory. However, compression WILL fail if you violate the preconditions.
+ * memory. However, compression WILL fail if conditions are not respected.
*
- * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
- * not be modified during compression or you will get data corruption. This
- * is because zstd needs to reference data in the ZSTD_inBuffer to find
+ * WARNING: The data in the ZSTD_inBuffer in the range [src, src + pos) MUST
+ * not be modified during compression or it will result in data corruption.
+ * This is because zstd needs to reference data in the ZSTD_inBuffer to find
* matches. Normally zstd maintains its own window buffer for this purpose,
- * but passing this flag tells zstd to use the user provided buffer.
+ * but passing this flag tells zstd to rely on user provided buffer instead.
*/
#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
@@ -1803,26 +2166,167 @@
/* ZSTD_c_validateSequences
* Default is 0 == disabled. Set to 1 to enable sequence validation.
*
- * For use with sequence compression API: ZSTD_compressSequences().
- * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
+ * For use with sequence compression API: ZSTD_compressSequences*().
+ * Designates whether or not provided sequences are validated within ZSTD_compressSequences*()
* during function execution.
*
- * Without validation, providing a sequence that does not conform to the zstd spec will cause
- * undefined behavior, and may produce a corrupted block.
+ * When Sequence validation is disabled (default), Sequences are compressed as-is,
+ * so they must correct, otherwise it would result in a corruption error.
*
- * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
+ * Sequence validation adds some protection, by ensuring that all values respect boundary conditions.
+ * If a Sequence is detected invalid (see doc/zstd_compression_format.md for
* specifics regarding offset/matchlength requirements) then the function will bail out and
* return an error.
- *
*/
#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
+/* ZSTD_c_blockSplitterLevel
+ * note: this parameter only influences the first splitter stage,
+ * which is active before producing the sequences.
+ * ZSTD_c_splitAfterSequences controls the next splitter stage,
+ * which is active after sequence production.
+ * Note that both can be combined.
+ * Allowed values are between 0 and ZSTD_BLOCKSPLITTER_LEVEL_MAX included.
+ * 0 means "auto", which will select a value depending on current ZSTD_c_strategy.
+ * 1 means no splitting.
+ * Then, values from 2 to 6 are sorted in increasing cpu load order.
+ *
+ * Note that currently the first block is never split,
+ * to ensure expansion guarantees in presence of incompressible data.
+ */
+#define ZSTD_BLOCKSPLITTER_LEVEL_MAX 6
+#define ZSTD_c_blockSplitterLevel ZSTD_c_experimentalParam20
+
+/* ZSTD_c_splitAfterSequences
+ * This is a stronger splitter algorithm,
+ * based on actual sequences previously produced by the selected parser.
+ * It's also slower, and as a consequence, mostly used for high compression levels.
+ * While the post-splitter does overlap with the pre-splitter,
+ * both can nonetheless be combined,
+ * notably with ZSTD_c_blockSplitterLevel at ZSTD_BLOCKSPLITTER_LEVEL_MAX,
+ * resulting in higher compression ratio than just one of them.
+ *
+ * Default is ZSTD_ps_auto.
+ * Set to ZSTD_ps_disable to never use block splitter.
+ * Set to ZSTD_ps_enable to always use block splitter.
+ *
+ * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
+ * block splitting based on the compression parameters.
+ */
+#define ZSTD_c_splitAfterSequences ZSTD_c_experimentalParam13
+
+/* ZSTD_c_useRowMatchFinder
+ * Controlled with ZSTD_ParamSwitch_e enum.
+ * Default is ZSTD_ps_auto.
+ * Set to ZSTD_ps_disable to never use row-based matchfinder.
+ * Set to ZSTD_ps_enable to force usage of row-based matchfinder.
+ *
+ * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
+ * the row-based matchfinder based on support for SIMD instructions and the window log.
+ * Note that this only pertains to compression strategies: greedy, lazy, and lazy2
+ */
+#define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14
+
+/* ZSTD_c_deterministicRefPrefix
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Zstd produces different results for prefix compression when the prefix is
+ * directly adjacent to the data about to be compressed vs. when it isn't.
+ * This is because zstd detects that the two buffers are contiguous and it can
+ * use a more efficient match finding algorithm. However, this produces different
+ * results than when the two buffers are non-contiguous. This flag forces zstd
+ * to always load the prefix in non-contiguous mode, even if it happens to be
+ * adjacent to the data, to guarantee determinism.
+ *
+ * If you really care about determinism when using a dictionary or prefix,
+ * like when doing delta compression, you should select this option. It comes
+ * at a speed penalty of about ~2.5% if the dictionary and data happened to be
+ * contiguous, and is free if they weren't contiguous. We don't expect that
+ * intentionally making the dictionary and data contiguous will be worth the
+ * cost to memcpy() the data.
+ */
+#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15
+
+/* ZSTD_c_prefetchCDictTables
+ * Controlled with ZSTD_ParamSwitch_e enum. Default is ZSTD_ps_auto.
+ *
+ * In some situations, zstd uses CDict tables in-place rather than copying them
+ * into the working context. (See docs on ZSTD_dictAttachPref_e above for details).
+ * In such situations, compression speed is seriously impacted when CDict tables are
+ * "cold" (outside CPU cache). This parameter instructs zstd to prefetch CDict tables
+ * when they are used in-place.
+ *
+ * For sufficiently small inputs, the cost of the prefetch will outweigh the benefit.
+ * For sufficiently large inputs, zstd will by default memcpy() CDict tables
+ * into the working context, so there is no need to prefetch. This parameter is
+ * targeted at a middle range of input sizes, where a prefetch is cheap enough to be
+ * useful but memcpy() is too expensive. The exact range of input sizes where this
+ * makes sense is best determined by careful experimentation.
+ *
+ * Note: for this parameter, ZSTD_ps_auto is currently equivalent to ZSTD_ps_disable,
+ * but in the future zstd may conditionally enable this feature via an auto-detection
+ * heuristic for cold CDicts.
+ * Use ZSTD_ps_disable to opt out of prefetching under any circumstances.
+ */
+#define ZSTD_c_prefetchCDictTables ZSTD_c_experimentalParam16
+
+/* ZSTD_c_enableSeqProducerFallback
+ * Allowed values are 0 (disable) and 1 (enable). The default setting is 0.
+ *
+ * Controls whether zstd will fall back to an internal sequence producer if an
+ * external sequence producer is registered and returns an error code. This fallback
+ * is block-by-block: the internal sequence producer will only be called for blocks
+ * where the external sequence producer returns an error code. Fallback parsing will
+ * follow any other cParam settings, such as compression level, the same as in a
+ * normal (fully-internal) compression operation.
+ *
+ * The user is strongly encouraged to read the full Block-Level Sequence Producer API
+ * documentation (below) before setting this parameter. */
+#define ZSTD_c_enableSeqProducerFallback ZSTD_c_experimentalParam17
+
+/* ZSTD_c_maxBlockSize
+ * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB).
+ * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default.
+ *
+ * This parameter can be used to set an upper bound on the blocksize
+ * that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper
+ * bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make
+ * compressBound() inaccurate). Only currently meant to be used for testing.
+ */
+#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18
+
+/* ZSTD_c_repcodeResolution
+ * This parameter only has an effect if ZSTD_c_blockDelimiters is
+ * set to ZSTD_sf_explicitBlockDelimiters (may change in the future).
+ *
+ * This parameter affects how zstd parses external sequences,
+ * provided via the ZSTD_compressSequences*() API
+ * or from an external block-level sequence producer.
+ *
+ * If set to ZSTD_ps_enable, the library will check for repeated offsets within
+ * external sequences, even if those repcodes are not explicitly indicated in
+ * the "rep" field. Note that this is the only way to exploit repcode matches
+ * while using compressSequences*() or an external sequence producer, since zstd
+ * currently ignores the "rep" field of external sequences.
+ *
+ * If set to ZSTD_ps_disable, the library will not exploit repeated offsets in
+ * external sequences, regardless of whether the "rep" field has been set. This
+ * reduces sequence compression overhead by about 25% while sacrificing some
+ * compression ratio.
+ *
+ * The default value is ZSTD_ps_auto, for which the library will enable/disable
+ * based on compression level (currently: level<10 disables, level>=10 enables).
+ */
+#define ZSTD_c_repcodeResolution ZSTD_c_experimentalParam19
+#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19 /* older name */
+
+
/*! ZSTD_CCtx_getParameter() :
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
* and store it into int* value.
* @return : 0, or an error code (which can be tested with ZSTD_isError()).
*/
-ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
/*! ZSTD_CCtx_params :
@@ -1842,27 +2346,27 @@
* This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
* for static allocation of CCtx for single-threaded compression.
*/
-ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
-ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
+ZSTDLIB_STATIC_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
/*! ZSTD_CCtxParams_reset() :
* Reset params to default values.
*/
-ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
/*! ZSTD_CCtxParams_init() :
* Initializes the compression parameters of cctxParams according to
* compression level. All other parameters are reset to their default values.
*/
-ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
/*! ZSTD_CCtxParams_init_advanced() :
* Initializes the compression and frame parameters of cctxParams according to
* params. All other parameters are reset to their default values.
*/
-ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
-/*! ZSTD_CCtxParams_setParameter() :
+/*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+
* Similar to ZSTD_CCtx_setParameter.
* Set one compression parameter, selected by enum ZSTD_cParameter.
* Parameters must be applied to a ZSTD_CCtx using
@@ -1870,14 +2374,14 @@
* @result : a code representing success or failure (which can be tested with
* ZSTD_isError()).
*/
-ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
/*! ZSTD_CCtxParams_getParameter() :
* Similar to ZSTD_CCtx_getParameter.
* Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
*/
-ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
* Apply a set of ZSTD_CCtx_params to the compression context.
@@ -1886,7 +2390,7 @@
* if nbWorkers>=1, new parameters will be picked up at next job,
* with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
*/
-ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
/*! ZSTD_compressStream2_simpleArgs() :
@@ -1895,7 +2399,7 @@
* This variant might be helpful for binders from dynamic languages
* which have troubles handling structures containing memory pointers.
*/
-ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
+ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs (
ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos,
@@ -1911,33 +2415,33 @@
* Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
* Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
* Note 3 : Skippable Frame Identifiers are considered valid. */
-ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
+ZSTDLIB_STATIC_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
/*! ZSTD_createDDict_byReference() :
* Create a digested dictionary, ready to start decompression operation without startup delay.
* Dictionary content is referenced, and therefore stays in dictBuffer.
* It is important that dictBuffer outlives DDict,
* it must remain read accessible throughout the lifetime of DDict */
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
/*! ZSTD_DCtx_loadDictionary_byReference() :
* Same as ZSTD_DCtx_loadDictionary(),
* but references `dict` content instead of copying it into `dctx`.
* This saves memory if `dict` remains around.,
* However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
-ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
/*! ZSTD_DCtx_loadDictionary_advanced() :
* Same as ZSTD_DCtx_loadDictionary(),
* but gives direct control over
* how to load the dictionary (by copy ? by reference ?)
* and how to interpret it (automatic ? force raw mode ? full mode only ?). */
-ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
/*! ZSTD_DCtx_refPrefix_advanced() :
* Same as ZSTD_DCtx_refPrefix(), but gives finer control over
* how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
-ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
/*! ZSTD_DCtx_setMaxWindowSize() :
* Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
@@ -1946,14 +2450,14 @@
* By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
* @return : 0, or an error code (which can be tested using ZSTD_isError()).
*/
-ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
/*! ZSTD_DCtx_getParameter() :
* Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
* and store it into int* value.
* @return : 0, or an error code (which can be tested with ZSTD_isError()).
*/
-ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
+ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
/* ZSTD_d_format
* experimental parameter,
@@ -1973,7 +2477,7 @@
* in the range [dst, dst + pos) MUST not be modified during decompression
* or you will get data corruption.
*
- * When this flags is enabled zstd won't allocate an output buffer, because
+ * When this flag is enabled zstd won't allocate an output buffer, because
* it can write directly to the ZSTD_outBuffer, but it will still allocate
* an input buffer large enough to fit any compressed block. This will also
* avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
@@ -2026,13 +2530,43 @@
*/
#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
+/* ZSTD_d_disableHuffmanAssembly
+ * Set to 1 to disable the Huffman assembly implementation.
+ * The default value is 0, which allows zstd to use the Huffman assembly
+ * implementation if available.
+ *
+ * This parameter can be used to disable Huffman assembly at runtime.
+ * If you want to disable it at compile time you can define the macro
+ * ZSTD_DISABLE_ASM.
+ */
+#define ZSTD_d_disableHuffmanAssembly ZSTD_d_experimentalParam5
+
+/* ZSTD_d_maxBlockSize
+ * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB).
+ * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default.
+ *
+ * Forces the decompressor to reject blocks whose content size is
+ * larger than the configured maxBlockSize. When maxBlockSize is
+ * larger than the windowSize, the windowSize is used instead.
+ * This saves memory on the decoder when you know all blocks are small.
+ *
+ * This option is typically used in conjunction with ZSTD_c_maxBlockSize.
+ *
+ * WARNING: This causes the decoder to reject otherwise valid frames
+ * that have block sizes larger than the configured maxBlockSize.
+ */
+#define ZSTD_d_maxBlockSize ZSTD_d_experimentalParam6
+
/*! ZSTD_DCtx_setFormat() :
+ * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().
* Instruct the decoder context about what kind of data to decode next.
* This instruction is mandatory to decode data without a fully-formed header,
* such ZSTD_f_zstd1_magicless for example.
* @return : 0, or an error code (which can be tested using ZSTD_isError()). */
-ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
+ZSTDLIB_STATIC_API
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
/*! ZSTD_decompressStream_simpleArgs() :
* Same as ZSTD_decompressStream(),
@@ -2040,7 +2574,7 @@
* This can be helpful for binders from dynamic languages
* which have troubles handling structures containing memory pointers.
*/
-ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
+ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (
ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos);
@@ -2056,7 +2590,7 @@
/*===== Advanced Streaming compression functions =====*/
/*! ZSTD_initCStream_srcSize() :
- * This function is deprecated, and equivalent to:
+ * This function is DEPRECATED, and equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
@@ -2065,15 +2599,16 @@
* pledgedSrcSize must be correct. If it is not known at init time, use
* ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
* "0" also disables frame content size field. It may be enabled in the future.
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t
-ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
int compressionLevel,
unsigned long long pledgedSrcSize);
/*! ZSTD_initCStream_usingDict() :
- * This function is deprecated, and is equivalent to:
+ * This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
@@ -2082,81 +2617,85 @@
* dict == NULL or dictSize < 8, in which case no dict is used.
* Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
* it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t
-ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
int compressionLevel);
/*! ZSTD_initCStream_advanced() :
- * This function is deprecated, and is approximately equivalent to:
+ * This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- * // Pseudocode: Set each zstd parameter and leave the rest as-is.
- * for ((param, value) : params) {
- * ZSTD_CCtx_setParameter(zcs, param, value);
- * }
+ * ZSTD_CCtx_setParams(zcs, params);
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
*
* dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
* pledgedSrcSize must be correct.
* If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t
-ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
ZSTD_parameters params,
unsigned long long pledgedSrcSize);
/*! ZSTD_initCStream_usingCDict() :
- * This function is deprecated, and equivalent to:
+ * This function is DEPRECATED, and equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
* note : cdict will just be referenced, and must outlive compression session
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
+size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
/*! ZSTD_initCStream_usingCDict_advanced() :
- * This function is DEPRECATED, and is approximately equivalent to:
+ * This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- * // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
- * for ((fParam, value) : fParams) {
- * ZSTD_CCtx_setParameter(zcs, fParam, value);
- * }
+ * ZSTD_CCtx_setFParams(zcs, fParams);
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
* same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
* pledgedSrcSize must be correct. If srcSize is not known at init time, use
* value ZSTD_CONTENTSIZE_UNKNOWN.
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t
-ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
+size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams,
unsigned long long pledgedSrcSize);
/*! ZSTD_resetCStream() :
- * This function is deprecated, and is equivalent to:
+ * This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but
+ * ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be
+ * explicitly specified.
*
* start a new frame, using same parameters from previous frame.
- * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
+ * This is typically useful to skip dictionary loading stage, since it will reuse it in-place.
* Note that zcs must be init at least once before using ZSTD_resetCStream().
* If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
* If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
* For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
* but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
* @return : 0, or an error code (which can be tested using ZSTD_isError())
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * This prototype will generate compilation warnings.
*/
-ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
+ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
typedef struct {
@@ -2174,7 +2713,7 @@
* Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
* Aggregates progression inside active worker threads.
*/
-ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
/*! ZSTD_toFlushNow() :
* Tell how many bytes are ready to be flushed immediately.
@@ -2189,7 +2728,7 @@
* therefore flush speed is limited by production speed of oldest job
* irrespective of the speed of concurrent (and newer) jobs.
*/
-ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
+ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
/*===== Advanced Streaming decompression functions =====*/
@@ -2201,9 +2740,9 @@
* ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
*
* note: no dictionary will be used if dict == NULL or dictSize < 8
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
-ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
/*!
* This function is deprecated, and is equivalent to:
@@ -2212,27 +2751,211 @@
* ZSTD_DCtx_refDDict(zds, ddict);
*
* note : ddict is referenced, it must outlive decompression session
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
-ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
/*!
* This function is deprecated, and is equivalent to:
*
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
*
- * re-use decompression parameters from previous init; saves dictionary loading
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * reuse decompression parameters from previous init; saves dictionary loading
*/
-ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+
+
+/* ********************* BLOCK-LEVEL SEQUENCE PRODUCER API *********************
+ *
+ * *** OVERVIEW ***
+ * The Block-Level Sequence Producer API allows users to provide their own custom
+ * sequence producer which libzstd invokes to process each block. The produced list
+ * of sequences (literals and matches) is then post-processed by libzstd to produce
+ * valid compressed blocks.
+ *
+ * This block-level offload API is a more granular complement of the existing
+ * frame-level offload API compressSequences() (introduced in v1.5.1). It offers
+ * an easier migration story for applications already integrated with libzstd: the
+ * user application continues to invoke the same compression functions
+ * ZSTD_compress2() or ZSTD_compressStream2() as usual, and transparently benefits
+ * from the specific advantages of the external sequence producer. For example,
+ * the sequence producer could be tuned to take advantage of known characteristics
+ * of the input, to offer better speed / ratio, or could leverage hardware
+ * acceleration not available within libzstd itself.
+ *
+ * See contrib/externalSequenceProducer for an example program employing the
+ * Block-Level Sequence Producer API.
+ *
+ * *** USAGE ***
+ * The user is responsible for implementing a function of type
+ * ZSTD_sequenceProducer_F. For each block, zstd will pass the following
+ * arguments to the user-provided function:
+ *
+ * - sequenceProducerState: a pointer to a user-managed state for the sequence
+ * producer.
+ *
+ * - outSeqs, outSeqsCapacity: an output buffer for the sequence producer.
+ * outSeqsCapacity is guaranteed >= ZSTD_sequenceBound(srcSize). The memory
+ * backing outSeqs is managed by the CCtx.
+ *
+ * - src, srcSize: an input buffer for the sequence producer to parse.
+ * srcSize is guaranteed to be <= ZSTD_BLOCKSIZE_MAX.
+ *
+ * - dict, dictSize: a history buffer, which may be empty, which the sequence
+ * producer may reference as it parses the src buffer. Currently, zstd will
+ * always pass dictSize == 0 into external sequence producers, but this will
+ * change in the future.
+ *
+ * - compressionLevel: a signed integer representing the zstd compression level
+ * set by the user for the current operation. The sequence producer may choose
+ * to use this information to change its compression strategy and speed/ratio
+ * tradeoff. Note: the compression level does not reflect zstd parameters set
+ * through the advanced API.
+ *
+ * - windowSize: a size_t representing the maximum allowed offset for external
+ * sequences. Note that sequence offsets are sometimes allowed to exceed the
+ * windowSize if a dictionary is present, see doc/zstd_compression_format.md
+ * for details.
+ *
+ * The user-provided function shall return a size_t representing the number of
+ * sequences written to outSeqs. This return value will be treated as an error
+ * code if it is greater than outSeqsCapacity. The return value must be non-zero
+ * if srcSize is non-zero. The ZSTD_SEQUENCE_PRODUCER_ERROR macro is provided
+ * for convenience, but any value greater than outSeqsCapacity will be treated as
+ * an error code.
+ *
+ * If the user-provided function does not return an error code, the sequences
+ * written to outSeqs must be a valid parse of the src buffer. Data corruption may
+ * occur if the parse is not valid. A parse is defined to be valid if the
+ * following conditions hold:
+ * - The sum of matchLengths and literalLengths must equal srcSize.
+ * - All sequences in the parse, except for the final sequence, must have
+ * matchLength >= ZSTD_MINMATCH_MIN. The final sequence must have
+ * matchLength >= ZSTD_MINMATCH_MIN or matchLength == 0.
+ * - All offsets must respect the windowSize parameter as specified in
+ * doc/zstd_compression_format.md.
+ * - If the final sequence has matchLength == 0, it must also have offset == 0.
+ *
+ * zstd will only validate these conditions (and fail compression if they do not
+ * hold) if the ZSTD_c_validateSequences cParam is enabled. Note that sequence
+ * validation has a performance cost.
+ *
+ * If the user-provided function returns an error, zstd will either fall back
+ * to an internal sequence producer or fail the compression operation. The user can
+ * choose between the two behaviors by setting the ZSTD_c_enableSeqProducerFallback
+ * cParam. Fallback compression will follow any other cParam settings, such as
+ * compression level, the same as in a normal compression operation.
+ *
+ * The user shall instruct zstd to use a particular ZSTD_sequenceProducer_F
+ * function by calling
+ * ZSTD_registerSequenceProducer(cctx,
+ * sequenceProducerState,
+ * sequenceProducer)
+ * This setting will persist until the next parameter reset of the CCtx.
+ *
+ * The sequenceProducerState must be initialized by the user before calling
+ * ZSTD_registerSequenceProducer(). The user is responsible for destroying the
+ * sequenceProducerState.
+ *
+ * *** LIMITATIONS ***
+ * This API is compatible with all zstd compression APIs which respect advanced parameters.
+ * However, there are three limitations:
+ *
+ * First, the ZSTD_c_enableLongDistanceMatching cParam is not currently supported.
+ * COMPRESSION WILL FAIL if it is enabled and the user tries to compress with a block-level
+ * external sequence producer.
+ * - Note that ZSTD_c_enableLongDistanceMatching is auto-enabled by default in some
+ * cases (see its documentation for details). Users must explicitly set
+ * ZSTD_c_enableLongDistanceMatching to ZSTD_ps_disable in such cases if an external
+ * sequence producer is registered.
+ * - As of this writing, ZSTD_c_enableLongDistanceMatching is disabled by default
+ * whenever ZSTD_c_windowLog < 128MB, but that's subject to change. Users should
+ * check the docs on ZSTD_c_enableLongDistanceMatching whenever the Block-Level Sequence
+ * Producer API is used in conjunction with advanced settings (like ZSTD_c_windowLog).
+ *
+ * Second, history buffers are not currently supported. Concretely, zstd will always pass
+ * dictSize == 0 to the external sequence producer (for now). This has two implications:
+ * - Dictionaries are not currently supported. Compression will *not* fail if the user
+ * references a dictionary, but the dictionary won't have any effect.
+ * - Stream history is not currently supported. All advanced compression APIs, including
+ * streaming APIs, work with external sequence producers, but each block is treated as
+ * an independent chunk without history from previous blocks.
+ *
+ * Third, multi-threading within a single compression is not currently supported. In other words,
+ * COMPRESSION WILL FAIL if ZSTD_c_nbWorkers > 0 and an external sequence producer is registered.
+ * Multi-threading across compressions is fine: simply create one CCtx per thread.
+ *
+ * Long-term, we plan to overcome all three limitations. There is no technical blocker to
+ * overcoming them. It is purely a question of engineering effort.
+ */
+
+#define ZSTD_SEQUENCE_PRODUCER_ERROR ((size_t)(-1))
+
+typedef size_t (*ZSTD_sequenceProducer_F) (
+ void* sequenceProducerState,
+ ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ int compressionLevel,
+ size_t windowSize
+);
+
+/*! ZSTD_registerSequenceProducer() :
+ * Instruct zstd to use a block-level external sequence producer function.
+ *
+ * The sequenceProducerState must be initialized by the caller, and the caller is
+ * responsible for managing its lifetime. This parameter is sticky across
+ * compressions. It will remain set until the user explicitly resets compression
+ * parameters.
+ *
+ * Sequence producer registration is considered to be an "advanced parameter",
+ * part of the "advanced API". This means it will only have an effect on compression
+ * APIs which respect advanced parameters, such as compress2() and compressStream2().
+ * Older compression APIs such as compressCCtx(), which predate the introduction of
+ * "advanced parameters", will ignore any external sequence producer setting.
+ *
+ * The sequence producer can be "cleared" by registering a NULL function pointer. This
+ * removes all limitations described above in the "LIMITATIONS" section of the API docs.
+ *
+ * The user is strongly encouraged to read the full API documentation (above) before
+ * calling this function. */
+ZSTDLIB_STATIC_API void
+ZSTD_registerSequenceProducer(
+ ZSTD_CCtx* cctx,
+ void* sequenceProducerState,
+ ZSTD_sequenceProducer_F sequenceProducer
+);
+
+/*! ZSTD_CCtxParams_registerSequenceProducer() :
+ * Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params.
+ * This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(),
+ * which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx().
+ *
+ * If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx()
+ * is required, then this function is for you. Otherwise, you probably don't need it.
+ *
+ * See tests/zstreamtest.c for example usage. */
+ZSTDLIB_STATIC_API void
+ZSTD_CCtxParams_registerSequenceProducer(
+ ZSTD_CCtx_params* params,
+ void* sequenceProducerState,
+ ZSTD_sequenceProducer_F sequenceProducer
+);
/* *******************************************************************
-* Buffer-less and synchronous inner streaming functions
+* Buffer-less and synchronous inner streaming functions (DEPRECATED)
*
-* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
-* But it's also a complex one, with several restrictions, documented below.
-* Prefer normal streaming API for an easier experience.
+* This API is deprecated, and will be removed in a future version.
+* It allows streaming (de)compression with user allocated buffers.
+* However, it is hard to use, and not as well tested as the rest of
+* our API.
+*
+* Please use the normal streaming API instead: ZSTD_compressStream2,
+* and ZSTD_decompressStream.
+* If there is functionality that you need, but it doesn't provide,
+* please open an issue on our GitHub.
********************************************************************* */
/*
@@ -2240,12 +2963,10 @@
A ZSTD_CCtx object is required to track streaming operations.
Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
- ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+ ZSTD_CCtx object can be reused multiple times within successive compression operations.
Start by initializing a context.
- Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
- or ZSTD_compressBegin_advanced(), for finer parameter control.
- It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
+ Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
Then, consume your input using ZSTD_compressContinue().
There are some important considerations to keep in mind when using this advanced function :
@@ -2263,37 +2984,49 @@
It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
- `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+ `ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again.
*/
/*===== Buffer-less streaming compression functions =====*/
-ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
-ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*< note: fails if cdict==NULL */
-ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
-ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*< note: fails if cdict==NULL */
-ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.")
+ZSTDLIB_STATIC_API
+size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
+ZSTD_DEPRECATED("use advanced API to access custom parameters")
+ZSTDLIB_STATIC_API
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTD_DEPRECATED("use advanced API to access custom parameters")
+ZSTDLIB_STATIC_API
+size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
/*
Buffer-less streaming decompression (synchronous mode)
A ZSTD_DCtx object is required to track streaming operations.
Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
- A ZSTD_DCtx object can be re-used multiple times.
+ A ZSTD_DCtx object can be reused multiple times.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
- @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
- >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+ result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+ >0 : `srcSize` is too small, please provide at least result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
- It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame,
such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
As a consequence, check that values remain within valid application range.
@@ -2309,7 +3042,7 @@
The most memory efficient way is to use a round buffer of sufficient size.
Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
- which can @return an error code if required value is too large for current system (in 32-bits mode).
+ which can return an error code if required value is too large for current system (in 32-bits mode).
In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
@@ -2329,7 +3062,7 @@
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
- @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
It can also be an error code, which can be tested with ZSTD_isError().
@@ -2352,49 +3085,42 @@
*/
/*===== Buffer-less streaming decompression functions =====*/
-typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
-typedef struct {
- unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
- unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
- unsigned blockSizeMax;
- ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
- unsigned headerSize;
- unsigned dictID;
- unsigned checksumFlag;
-} ZSTD_frameHeader;
-/*! ZSTD_getFrameHeader() :
- * decode Frame Header, or requires larger `srcSize`.
- * @return : 0, `zfhPtr` is correctly filled,
- * >0, `srcSize` is too small, value is wanted `srcSize` amount,
- * or an error code, which can be tested using ZSTD_isError() */
-ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /*< doesn't consume input */
-/*! ZSTD_getFrameHeader_advanced() :
- * same as ZSTD_getFrameHeader(),
- * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
-ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
-ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
-ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
-ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
+ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
-ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/* misc */
-ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+ZSTD_DEPRECATED("This function will likely be removed in the next minor release. It is misleading and has very limited utility.")
+ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
-ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
-/* ============================ */
-/* Block level API */
-/* ============================ */
+/* ========================================= */
+/* Block level API (DEPRECATED) */
+/* ========================================= */
/*!
+
+ This API is deprecated in favor of the regular compression API.
+ You can get the frame header down to 2 bytes by setting:
+ - ZSTD_c_format = ZSTD_f_zstd1_magicless
+ - ZSTD_c_contentSizeFlag = 0
+ - ZSTD_c_checksumFlag = 0
+ - ZSTD_c_dictIDFlag = 0
+
+ This API is not as well tested as our normal API, so we recommend not using it.
+ We will be removing it in a future version. If the normal API doesn't provide
+ the functionality you need, please open a GitHub issue.
+
Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
@@ -2405,7 +3131,6 @@
- It is necessary to init context before starting
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
- + copyCCtx() and copyDCtx() can be used too
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.
@@ -2422,11 +3147,14 @@
*/
/*===== Raw zstd block functions =====*/
-ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
-ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
-
Index: crypto/zstd.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/crypto/zstd.c b/crypto/zstd.c
--- a/crypto/zstd.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/crypto/zstd.c (date 1740124100057)
@@ -15,7 +15,8 @@
#include
-#define ZSTD_DEF_LEVEL 1
+static uint __read_mostly compression_level = 1;
+module_param(compression_level, uint, 0644);
struct zstd_ctx {
zstd_cctx *cctx;
@@ -26,7 +27,11 @@
static zstd_parameters zstd_params(void)
{
- return zstd_get_params(ZSTD_DEF_LEVEL, 0);
+ if (compression_level == 0)
+ compression_level = 1;
+ if (compression_level > zstd_max_clevel())
+ compression_level = zstd_max_clevel();
+ return zstd_get_params(compression_level, PAGE_SIZE);
}
static int zstd_comp_init(struct zstd_ctx *ctx)
Index: include/linux/zstd_errors.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
--- a/include/linux/zstd_errors.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/include/linux/zstd_errors.h (date 1740124164582)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -12,13 +13,18 @@
#define ZSTD_ERRORS_H_398273423
-/*===== dependency =====*/
-#include /* size_t */
-
-
/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
-#define ZSTDERRORLIB_VISIBILITY
-#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+#define ZSTDERRORLIB_VISIBLE
+
+#ifndef ZSTDERRORLIB_HIDDEN
+# if (__GNUC__ >= 4) && !defined(__MINGW32__)
+# define ZSTDERRORLIB_HIDDEN __attribute__ ((visibility ("hidden")))
+# else
+# define ZSTDERRORLIB_HIDDEN
+# endif
+#endif
+
+#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE
/*-*********************************************
* Error codes list
@@ -43,14 +49,18 @@
ZSTD_error_frameParameter_windowTooLarge = 16,
ZSTD_error_corruption_detected = 20,
ZSTD_error_checksum_wrong = 22,
+ ZSTD_error_literals_headerWrong = 24,
ZSTD_error_dictionary_corrupted = 30,
ZSTD_error_dictionary_wrong = 32,
ZSTD_error_dictionaryCreation_failed = 34,
ZSTD_error_parameter_unsupported = 40,
+ ZSTD_error_parameter_combination_unsupported = 41,
ZSTD_error_parameter_outOfBound = 42,
ZSTD_error_tableLog_tooLarge = 44,
ZSTD_error_maxSymbolValue_tooLarge = 46,
ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_cannotProduce_uncompressedBlock = 49,
+ ZSTD_error_stabilityCondition_notRespected = 50,
ZSTD_error_stage_wrong = 60,
ZSTD_error_init_missing = 62,
ZSTD_error_memory_allocation = 64,
@@ -58,18 +68,18 @@
ZSTD_error_dstSize_tooSmall = 70,
ZSTD_error_srcSize_wrong = 72,
ZSTD_error_dstBuffer_null = 74,
+ ZSTD_error_noForwardProgress_destFull = 80,
+ ZSTD_error_noForwardProgress_inputEmpty = 82,
/* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
ZSTD_error_frameIndex_tooLarge = 100,
ZSTD_error_seekableIO = 102,
ZSTD_error_dstBuffer_wrong = 104,
ZSTD_error_srcBuffer_wrong = 105,
+ ZSTD_error_sequenceProducer_failed = 106,
+ ZSTD_error_externalSequences_invalid = 107,
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
} ZSTD_ErrorCode;
-/*! ZSTD_getErrorCode() :
- convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
- which can be used to compare with enum list published above */
-ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /*< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
Index: lib/zstd/zstd_compress_module.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
--- a/lib/zstd/zstd_compress_module.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/zstd_compress_module.c (date 1740124333305)
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,6 +16,7 @@
#include "common/zstd_deps.h"
#include "common/zstd_internal.h"
+#include "compress/zstd_compress_internal.h"
#define ZSTD_FORWARD_IF_ERR(ret) \
do { \
@@ -79,12 +80,64 @@
}
EXPORT_SYMBOL(zstd_get_params);
+size_t zstd_cctx_set_param(zstd_cctx *cctx, ZSTD_cParameter param, int value)
+{
+ return ZSTD_CCtx_setParameter(cctx, param, value);
+}
+EXPORT_SYMBOL(zstd_cctx_set_param);
+
size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCCtxSize_usingCParams(*cparams);
}
EXPORT_SYMBOL(zstd_cctx_workspace_bound);
+// Used by zstd_cctx_workspace_bound_with_ext_seq_prod()
+static size_t dummy_external_sequence_producer(
+ void *sequenceProducerState,
+ ZSTD_Sequence *outSeqs, size_t outSeqsCapacity,
+ const void *src, size_t srcSize,
+ const void *dict, size_t dictSize,
+ int compressionLevel,
+ size_t windowSize)
+{
+ (void)sequenceProducerState;
+ (void)outSeqs; (void)outSeqsCapacity;
+ (void)src; (void)srcSize;
+ (void)dict; (void)dictSize;
+ (void)compressionLevel;
+ (void)windowSize;
+ return ZSTD_SEQUENCE_PRODUCER_ERROR;
+}
+
+static void init_cctx_params_from_compress_params(
+ ZSTD_CCtx_params *cctx_params,
+ const zstd_compression_parameters *compress_params)
+{
+ ZSTD_parameters zstd_params;
+ memset(&zstd_params, 0, sizeof(zstd_params));
+ zstd_params.cParams = *compress_params;
+ ZSTD_CCtxParams_init_advanced(cctx_params, zstd_params);
+}
+
+size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params)
+{
+ ZSTD_CCtx_params cctx_params;
+ init_cctx_params_from_compress_params(&cctx_params, compress_params);
+ ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer);
+ return ZSTD_estimateCCtxSize_usingCCtxParams(&cctx_params);
+}
+EXPORT_SYMBOL(zstd_cctx_workspace_bound_with_ext_seq_prod);
+
+size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params)
+{
+ ZSTD_CCtx_params cctx_params;
+ init_cctx_params_from_compress_params(&cctx_params, compress_params);
+ ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer);
+ return ZSTD_estimateCStreamSize_usingCCtxParams(&cctx_params);
+}
+EXPORT_SYMBOL(zstd_cstream_workspace_bound_with_ext_seq_prod);
+
zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
{
if (workspace == NULL)
@@ -133,7 +186,11 @@
size_t zstd_reset_cstream(zstd_cstream *cstream,
unsigned long long pledged_src_size)
{
- return ZSTD_resetCStream(cstream, pledged_src_size);
+ if (pledged_src_size == 0)
+ pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
+ ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_reset(cstream, ZSTD_reset_session_only) );
+ ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_setPledgedSrcSize(cstream, pledged_src_size) );
+ return 0;
}
EXPORT_SYMBOL(zstd_reset_cstream);
@@ -156,5 +213,25 @@
}
EXPORT_SYMBOL(zstd_end_stream);
+void zstd_register_sequence_producer(
+ zstd_cctx *cctx,
+ void* sequence_producer_state,
+ zstd_sequence_producer_f sequence_producer
+) {
+ ZSTD_registerSequenceProducer(cctx, sequence_producer_state, sequence_producer);
+}
+EXPORT_SYMBOL(zstd_register_sequence_producer);
+
+size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity,
+ const zstd_sequence *in_seqs, size_t in_seqs_size,
+ const void* literals, size_t lit_size, size_t lit_capacity,
+ size_t decompressed_size)
+{
+ return ZSTD_compressSequencesAndLiterals(cctx, dst, dst_capacity, in_seqs,
+ in_seqs_size, literals, lit_size,
+ lit_capacity, decompressed_size);
+}
+EXPORT_SYMBOL(zstd_compress_sequences_and_literals);
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Zstd Compressor");
Index: lib/zstd/zstd_decompress_module.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
--- a/lib/zstd/zstd_decompress_module.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/zstd_decompress_module.c (date 1740124333309)
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -77,7 +77,7 @@
size_t zstd_reset_dstream(zstd_dstream *dstream)
{
- return ZSTD_resetDStream(dstream);
+ return ZSTD_DCtx_reset(dstream, ZSTD_reset_session_only);
}
EXPORT_SYMBOL(zstd_reset_dstream);
Index: lib/zstd/Makefile
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
--- a/lib/zstd/Makefile (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/Makefile (date 1740124333297)
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
# ################################################################
-# Copyright (c) Facebook, Inc.
+# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under both the BSD-style license (found in the
@@ -26,6 +26,7 @@
compress/zstd_lazy.o \
compress/zstd_ldm.o \
compress/zstd_opt.o \
+ compress/zstd_preSplit.o \
zstd_decompress-y := \
zstd_decompress_module.o \
@@ -35,6 +36,7 @@
decompress/zstd_decompress_block.o \
zstd_common-y := \
+ zstd_common_module.o \
common/debug.o \
common/entropy_common.o \
common/error_private.o \
Index: lib/zstd/decompress_sources.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h
--- a/lib/zstd/decompress_sources.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/decompress_sources.h (date 1740124333294)
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,6 +16,12 @@
* decompression.
*/
+/*
+ * Disable the ASM Huffman implementation because we need to
+ * include all the sources.
+ */
+#define ZSTD_DISABLE_ASM 1
+
#include "common/debug.c"
#include "common/entropy_common.c"
#include "common/error_private.c"
Index: lib/zstd/common/debug.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
--- a/lib/zstd/common/debug.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/debug.h (date 1740124241273)
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* debug
* Part of FSE library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -33,7 +34,6 @@
#define DEBUG_H_12987983217
-
/* static assert is triggered at compile time, leaving no runtime artefact.
* static assert only works with compile-time constants.
* Also, this variant can only be used inside a function. */
@@ -82,20 +82,27 @@
It's useful when enabling very verbose levels
on selective conditions (such as position in src) */
-# define RAWLOG(l, ...) { \
- if (l<=g_debuglevel) { \
- ZSTD_DEBUG_PRINT(__VA_ARGS__); \
- } }
-# define DEBUGLOG(l, ...) { \
- if (l<=g_debuglevel) { \
- ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
- ZSTD_DEBUG_PRINT(" \n"); \
- } }
+# define RAWLOG(l, ...) \
+ do { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+#define LINE_AS_STRING TOSTRING(__LINE__)
+
+# define DEBUGLOG(l, ...) \
+ do { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__FILE__ ":" LINE_AS_STRING ": " __VA_ARGS__); \
+ ZSTD_DEBUG_PRINT(" \n"); \
+ } \
+ } while (0)
#else
-# define RAWLOG(l, ...) {} /* disabled */
-# define DEBUGLOG(l, ...) {} /* disabled */
+# define RAWLOG(l, ...) do { } while (0) /* disabled */
+# define DEBUGLOG(l, ...) do { } while (0) /* disabled */
#endif
-
-
#endif /* DEBUG_H_12987983217 */
Index: lib/zstd/common/bitstream.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
--- a/lib/zstd/common/bitstream.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/bitstream.h (date 1740124241253)
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* bitstream
* Part of FSE library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -27,7 +28,7 @@
#include "compiler.h" /* UNLIKELY() */
#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
#include "error_private.h" /* error codes and messages */
-
+#include "bits.h" /* ZSTD_highbit32 */
/*=========================================
* Target specific
@@ -41,12 +42,13 @@
/*-******************************************
* bitStream encoding API (write forward)
********************************************/
+typedef size_t BitContainerType;
/* bitStream can mix input from multiple sources.
* A critical property of these streams is that they encode and decode in **reverse** direction.
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
*/
typedef struct {
- size_t bitContainer;
+ BitContainerType bitContainer;
unsigned bitPos;
char* startPtr;
char* ptr;
@@ -54,7 +56,7 @@
} BIT_CStream_t;
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
-MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits);
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
@@ -63,7 +65,7 @@
* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
*
* bits are first added to a local register.
-* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+* Local register is BitContainerType, 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
* Writing data into memory is an explicit operation, performed by the flushBits function.
* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
* After a flushBits, a maximum of 7 bits might still be stored into local register.
@@ -80,28 +82,28 @@
* bitStream decoding API (read backward)
**********************************************/
typedef struct {
- size_t bitContainer;
+ BitContainerType bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
const char* limitPtr;
} BIT_DStream_t;
-typedef enum { BIT_DStream_unfinished = 0,
- BIT_DStream_endOfBuffer = 1,
- BIT_DStream_completed = 2,
- BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
- /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+typedef enum { BIT_DStream_unfinished = 0, /* fully refilled */
+ BIT_DStream_endOfBuffer = 1, /* still some bits left in bitstream */
+ BIT_DStream_completed = 2, /* bitstream entirely consumed, bit-exact */
+ BIT_DStream_overflow = 3 /* user requested more bits than present in bitstream */
+ } BIT_DStream_status; /* result of BIT_reloadDStream() */
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
/* Start by invoking BIT_initDStream().
* A chunk of the bitStream is then stored into a local register.
-* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (BitContainerType).
* You can then retrieve bitFields stored into the local register, **in reverse order**.
* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
@@ -113,7 +115,7 @@
/*-****************************************
* unsafe API
******************************************/
-MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits);
/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
@@ -122,33 +124,6 @@
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
-
-
-/*-**************************************************************
-* Internal functions
-****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
-{
- assert(val != 0);
- {
-# if (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
-# else /* Software version */
- static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
- 11, 14, 16, 18, 22, 25, 3, 30,
- 8, 12, 20, 28, 15, 17, 24, 7,
- 19, 27, 23, 6, 26, 5, 4, 31 };
- U32 v = val;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-# endif
- }
-}
-
/*===== Local Constants =====*/
static const unsigned BIT_mask[] = {
0, 1, 3, 7, 0xF, 0x1F,
@@ -178,16 +153,22 @@
return 0;
}
+FORCE_INLINE_TEMPLATE BitContainerType BIT_getLowerBits(BitContainerType bitContainer, U32 const nbBits)
+{
+ assert(nbBits < BIT_MASK_SIZE);
+ return bitContainer & BIT_mask[nbBits];
+}
+
/*! BIT_addBits() :
* can add up to 31 bits into `bitC`.
* Note : does not check for register overflow ! */
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
- size_t value, unsigned nbBits)
+ BitContainerType value, unsigned nbBits)
{
DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
assert(nbBits < BIT_MASK_SIZE);
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
- bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+ bitC->bitContainer |= BIT_getLowerBits(value, nbBits) << bitC->bitPos;
bitC->bitPos += nbBits;
}
@@ -195,7 +176,7 @@
* works only if `value` is _clean_,
* meaning all high bits above nbBits are 0 */
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
- size_t value, unsigned nbBits)
+ BitContainerType value, unsigned nbBits)
{
assert((value>>nbBits) == 0);
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
@@ -242,7 +223,7 @@
BIT_addBitsFast(bitC, 1, 1); /* endMark */
BIT_flushBits(bitC);
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
- return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+ return (size_t)(bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
}
@@ -266,35 +247,35 @@
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
- bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
+ bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
} else {
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
- case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+ case 7: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
ZSTD_FALLTHROUGH;
- case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+ case 6: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
ZSTD_FALLTHROUGH;
- case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+ case 5: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
ZSTD_FALLTHROUGH;
- case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+ case 4: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[3]) << 24;
ZSTD_FALLTHROUGH;
- case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+ case 3: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[2]) << 16;
ZSTD_FALLTHROUGH;
- case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
+ case 2: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[1]) << 8;
ZSTD_FALLTHROUGH;
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
- bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0;
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
}
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
@@ -303,23 +284,26 @@
return srcSize;
}
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_getUpperBits(BitContainerType bitContainer, U32 const start)
{
return bitContainer >> start;
}
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_getMiddleBits(BitContainerType bitContainer, U32 const start, U32 const nbBits)
{
U32 const regMask = sizeof(bitContainer)*8 - 1;
/* if start > regMask, bitstream is corrupted, and result is undefined */
assert(nbBits < BIT_MASK_SIZE);
+ /* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better
+ * than accessing memory. When bmi2 instruction is not present, we consider
+ * such cpus old (pre-Haswell, 2013) and their performance is not of that
+ * importance.
+ */
+#if defined(__x86_64__) || defined(_M_X64)
+ return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1);
+#else
return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
-}
-
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
-{
- assert(nbBits < BIT_MASK_SIZE);
- return bitContainer & BIT_mask[nbBits];
+#endif
}
/*! BIT_lookBits() :
@@ -328,7 +312,7 @@
* On 32-bits, maxNbBits==24.
* On 64-bits, maxNbBits==56.
* @return : value extracted */
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
{
/* arbitrate between double-shift and shift+mask */
#if 1
@@ -344,14 +328,14 @@
/*! BIT_lookBitsFast() :
* unsafe version; only works if nbBits >= 1 */
-MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+MEM_STATIC BitContainerType BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
{
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
assert(nbBits >= 1);
return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
}
-MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+FORCE_INLINE_TEMPLATE void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
@@ -360,23 +344,38 @@
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
* @return : extracted value. */
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
{
- size_t const value = BIT_lookBits(bitD, nbBits);
+ BitContainerType const value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
/*! BIT_readBitsFast() :
- * unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
+ * unsafe version; only works if nbBits >= 1 */
+MEM_STATIC BitContainerType BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
{
- size_t const value = BIT_lookBitsFast(bitD, nbBits);
+ BitContainerType const value = BIT_lookBitsFast(bitD, nbBits);
assert(nbBits >= 1);
BIT_skipBits(bitD, nbBits);
return value;
}
+/*! BIT_reloadDStream_internal() :
+ * Simple variant of BIT_reloadDStream(), with two conditions:
+ * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
+ * 2. look window is valid after shifted down : bitD->ptr >= bitD->start
+ */
+MEM_STATIC BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD)
+{
+ assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ assert(bitD->ptr >= bitD->start);
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+}
+
/*! BIT_reloadDStreamFast() :
* Similar to BIT_reloadDStream(), but with two differences:
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
@@ -387,31 +386,35 @@
{
if (UNLIKELY(bitD->ptr < bitD->limitPtr))
return BIT_DStream_overflow;
- assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
- bitD->ptr -= bitD->bitsConsumed >> 3;
- bitD->bitsConsumed &= 7;
- bitD->bitContainer = MEM_readLEST(bitD->ptr);
- return BIT_DStream_unfinished;
+ return BIT_reloadDStream_internal(bitD);
}
/*! BIT_reloadDStream() :
* Refill `bitD` from buffer previously set in BIT_initDStream() .
- * This function is safe, it guarantees it will not read beyond src buffer.
+ * This function is safe, it guarantees it will not never beyond src buffer.
* @return : status of `BIT_DStream_t` internal register.
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+FORCE_INLINE_TEMPLATE BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
+ /* note : once in overflow mode, a bitstream remains in this mode until it's reset */
+ if (UNLIKELY(bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))) {
+ static const BitContainerType zeroFilled = 0;
+ bitD->ptr = (const char*)&zeroFilled; /* aliasing is allowed for char */
+ /* overflow detected, erroneous scenario or end of stream: no update */
return BIT_DStream_overflow;
+ }
+
+ assert(bitD->ptr >= bitD->start);
if (bitD->ptr >= bitD->limitPtr) {
- return BIT_reloadDStreamFast(bitD);
+ return BIT_reloadDStream_internal(bitD);
}
if (bitD->ptr == bitD->start) {
+ /* reached end of bitStream => no update */
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
return BIT_DStream_completed;
}
- /* start < ptr < limitPtr */
+ /* start < ptr < limitPtr => cautious update */
{ U32 nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start) {
@@ -433,5 +436,4 @@
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
-
#endif /* BITSTREAM_H_MODULE */
Index: lib/zstd/common/cpu.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/cpu.h b/lib/zstd/common/cpu.h
--- a/lib/zstd/common/cpu.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/cpu.h (date 1740124241265)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
Index: lib/zstd/common/zstd_common.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
--- a/lib/zstd/common/zstd_common.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/zstd_common.c (date 1740124241329)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -13,9 +14,7 @@
/*-*************************************
* Dependencies
***************************************/
-#include
#define ZSTD_DEPS_NEED_MALLOC
-#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
#include "error_private.h"
#include "zstd_internal.h"
@@ -36,58 +35,15 @@
* tells if a return value is an error code
* symbol is required for external callers */
unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
-EXPORT_SYMBOL_GPL(ZSTD_isError);
/*! ZSTD_getErrorName() :
* provides error code string from function result (useful for debugging) */
const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
-EXPORT_SYMBOL_GPL(ZSTD_getErrorName);
/*! ZSTD_getError() :
* convert a `size_t` function result into a proper ZSTD_errorCode enum */
ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
-EXPORT_SYMBOL_GPL(ZSTD_getErrorCode);
/*! ZSTD_getErrorString() :
* provides error code string from enum */
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
-
-
-
-/*=**************************************************************
-* Custom allocator
-****************************************************************/
-void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
-{
- if (customMem.customAlloc)
- return customMem.customAlloc(customMem.opaque, size);
- return ZSTD_malloc(size);
-}
-EXPORT_SYMBOL_GPL(ZSTD_customMalloc);
-
-void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
-{
- if (customMem.customAlloc) {
- /* calloc implemented as malloc+memset;
- * not as efficient as calloc, but next best guess for custom malloc */
- void* const ptr = customMem.customAlloc(customMem.opaque, size);
- ZSTD_memset(ptr, 0, size);
- return ptr;
- }
- return ZSTD_calloc(1, size);
-}
-EXPORT_SYMBOL_GPL(ZSTD_customCalloc);
-
-void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
-{
- if (ptr!=NULL) {
- if (customMem.customFree)
- customMem.customFree(customMem.opaque, ptr);
- else
- ZSTD_free(ptr);
- }
-}
-EXPORT_SYMBOL_GPL(ZSTD_customFree);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Zstd Common");
Index: lib/zstd/common/compiler.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
--- a/lib/zstd/common/compiler.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/compiler.h (date 1740124241260)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,6 +12,10 @@
#ifndef ZSTD_COMPILER_H
#define ZSTD_COMPILER_H
+#include
+
+#include "portability_macros.h"
+
/*-*******************************************************
* Compiler specifics
*********************************************************/
@@ -34,17 +39,20 @@
/*
On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
- This explictly marks such functions as __cdecl so that the code will still compile
+ This explicitly marks such functions as __cdecl so that the code will still compile
if a CC other than __cdecl has been made the default.
*/
#define WIN_CDECL
+/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
+#define UNUSED_ATTR __attribute__((unused))
+
/*
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
* parameters. They must be inlined for the compiler to eliminate the constant
* branches.
*/
-#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR
/*
* HINT_INLINE is used to help the compiler generate better code. It is *not*
* used for "templates", so it can be tweaked based on the compilers
@@ -59,36 +67,34 @@
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
# define HINT_INLINE static INLINE_KEYWORD
#else
-# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
+# define HINT_INLINE FORCE_INLINE_TEMPLATE
#endif
-/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
-#define UNUSED_ATTR __attribute__((unused))
+/* "soft" inline :
+ * The compiler is free to select if it's a good idea to inline or not.
+ * The main objective is to silence compiler warnings
+ * when a defined function in included but not used.
+ *
+ * Note : this macro is prefixed `MEM_` because it used to be provided by `mem.h` unit.
+ * Updating the prefix is probably preferable, but requires a fairly large codemod,
+ * since this name is used everywhere.
+ */
+#ifndef MEM_STATIC /* already defined in Linux Kernel mem.h */
+#define MEM_STATIC static __inline UNUSED_ATTR
+#endif
/* force no inlining */
#define FORCE_NOINLINE static __attribute__((__noinline__))
/* target attribute */
-#ifndef __has_attribute
- #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
-#endif
#define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
-/* Enable runtime BMI2 dispatch based on the CPU.
- * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
+/* Target attribute for BMI2 dynamic dispatch.
+ * Enable lzcnt, bmi, and bmi2.
+ * We test for bmi1 & bmi2. lzcnt is included in bmi1.
*/
-#ifndef DYNAMIC_BMI2
- #if ((defined(__clang__) && __has_attribute(__target__)) \
- || (defined(__GNUC__) \
- && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
- && (defined(__x86_64__) || defined(_M_X86)) \
- && !defined(__BMI2__)
- # define DYNAMIC_BMI2 1
- #else
- # define DYNAMIC_BMI2 0
- #endif
-#endif
+#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
/* prefetch
* can be disabled, by declaring NO_PREFETCH build macro */
@@ -96,27 +102,29 @@
# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
#elif defined(__aarch64__)
-# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
-# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
+# define PREFETCH_L1(ptr) do { __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))); } while (0)
+# define PREFETCH_L2(ptr) do { __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))); } while (0)
#else
-# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
-# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
+# define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */
+# define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */
#endif /* NO_PREFETCH */
#define CACHELINE_SIZE 64
-#define PREFETCH_AREA(p, s) { \
- const char* const _ptr = (const char*)(p); \
- size_t const _size = (size_t)(s); \
- size_t _pos; \
- for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
- PREFETCH_L2(_ptr + _pos); \
- } \
-}
+#define PREFETCH_AREA(p, s) \
+ do { \
+ const char* const _ptr = (const char*)(p); \
+ size_t const _size = (size_t)(s); \
+ size_t _pos; \
+ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
+ PREFETCH_L2(_ptr + _pos); \
+ } \
+ } while (0)
/* vectorization
- * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
-#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__)
+ * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
+ * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
+#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
# else
@@ -134,20 +142,15 @@
#define LIKELY(x) (__builtin_expect((x), 1))
#define UNLIKELY(x) (__builtin_expect((x), 0))
+#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
+# define ZSTD_UNREACHABLE do { assert(0), __builtin_unreachable(); } while (0)
+#else
+# define ZSTD_UNREACHABLE do { assert(0); } while (0)
+#endif
+
/* disable warnings */
-/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
-
-
-/* compat. with non-clang compilers */
-#ifndef __has_builtin
-# define __has_builtin(x) 0
-#endif
-
-/* compat. with non-clang compilers */
-#ifndef __has_feature
-# define __has_feature(x) 0
-#endif
+/* compile time determination of SIMD support */
/* C-language Attributes are added in C23. */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
@@ -168,10 +171,119 @@
*/
#define ZSTD_FALLTHROUGH fallthrough
-/* detects whether we are being compiled under msan */
+/*-**************************************************************
+* Alignment
+*****************************************************************/
+
+/* @return 1 if @u is a 2^n value, 0 otherwise
+ * useful to check a value is valid for alignment restrictions */
+MEM_STATIC int ZSTD_isPower2(size_t u) {
+ return (u & (u-1)) == 0;
+}
+
+/* this test was initially positioned in mem.h,
+ * but this file is removed (or replaced) for linux kernel
+ * so it's now hosted in compiler.h,
+ * which remains valid for both user & kernel spaces.
+ */
+
+#ifndef ZSTD_ALIGNOF
+/* covers gcc, clang & MSVC */
+/* note : this section must come first, before C11,
+ * due to a limitation in the kernel source generator */
+# define ZSTD_ALIGNOF(T) __alignof(T)
+
+#endif /* ZSTD_ALIGNOF */
+
+#ifndef ZSTD_ALIGNED
+/* C90-compatible alignment macro (GCC/Clang). Adjust for other compilers if needed. */
+#define ZSTD_ALIGNED(a) __attribute__((aligned(a)))
+#endif /* ZSTD_ALIGNED */
+
+
+/*-**************************************************************
+* Sanitizer
+*****************************************************************/
+
+/*
+ * Zstd relies on pointer overflow in its decompressor.
+ * We add this attribute to functions that rely on pointer overflow.
+ */
+#ifndef ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+# if __has_attribute(no_sanitize)
+# if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 8
+ /* gcc < 8 only has signed-integer-overlow which triggers on pointer overflow */
+# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("signed-integer-overflow")))
+# else
+ /* older versions of clang [3.7, 5.0) will warn that pointer-overflow is ignored. */
+# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("pointer-overflow")))
+# endif
+# else
+# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+# endif
+#endif
+
+/*
+ * Helper function to perform a wrapped pointer difference without triggering
+ * UBSAN.
+ *
+ * @returns lhs - rhs with wrapping
+ */
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs)
+{
+ return lhs - rhs;
+}
+
+/*
+ * Helper function to perform a wrapped pointer add without triggering UBSAN.
+ *
+ * @return ptr + add with wrapping
+ */
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add)
+{
+ return ptr + add;
+}
+/*
+ * Helper function to perform a wrapped pointer subtraction without triggering
+ * UBSAN.
+ *
+ * @return ptr - sub with wrapping
+ */
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+unsigned char const* ZSTD_wrappedPtrSub(unsigned char const* ptr, ptrdiff_t sub)
+{
+ return ptr - sub;
+}
-/* detects whether we are being compiled under asan */
+/*
+ * Helper function to add to a pointer that works around C's undefined behavior
+ * of adding 0 to NULL.
+ *
+ * @returns `ptr + add` except it defines `NULL + 0 == NULL`.
+ */
+MEM_STATIC
+unsigned char* ZSTD_maybeNullPtrAdd(unsigned char* ptr, ptrdiff_t add)
+{
+ return add > 0 ? ptr + add : ptr;
+}
+
+/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
+ * abundance of caution, disable our custom poisoning on mingw. */
+#ifdef __MINGW32__
+#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE
+#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1
+#endif
+#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE
+#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1
+#endif
+#endif
+
#endif /* ZSTD_COMPILER_H */
Index: lib/zstd/common/huf.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
--- a/lib/zstd/common/huf.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/huf.h (date 1740124241310)
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* huff0 huffman codec,
* part of Finite State Entropy library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -12,112 +13,33 @@
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
-
#ifndef HUF_H_298734234
#define HUF_H_298734234
/* *** Dependencies *** */
#include "zstd_deps.h" /* size_t */
-
-
-/* *** library symbols visibility *** */
-/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
- * HUF symbols remain "private" (internal symbols for library only).
- * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
-#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
-# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
-#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
-# define HUF_PUBLIC_API __declspec(dllexport)
-#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
-# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
-#else
-# define HUF_PUBLIC_API
-#endif
-
-
-/* ========================== */
-/* *** simple functions *** */
-/* ========================== */
-
-/* HUF_compress() :
- * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
- * 'dst' buffer must be already allocated.
- * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
- * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
- * @return : size of compressed data (<= `dstCapacity`).
- * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
- * if HUF_isError(return), compression failed (more details using HUF_getErrorName())
- */
-HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize);
-
-/* HUF_decompress() :
- * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
- * into already allocated buffer 'dst', of minimum size 'dstSize'.
- * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
- * Note : in contrast with FSE, HUF_decompress can regenerate
- * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
- * because it knows size to regenerate (originalSize).
- * @return : size of regenerated data (== originalSize),
- * or an error code, which can be tested using HUF_isError()
- */
-HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
- const void* cSrc, size_t cSrcSize);
-
-
-/* *** Tool functions *** */
-#define HUF_BLOCKSIZE_MAX (128 * 1024) /*< maximum input size for a single block compressed with HUF_compress */
-HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /*< maximum compressed size (worst case) */
-
-/* Error Management */
-HUF_PUBLIC_API unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */
-HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */
-
-
-/* *** Advanced function *** */
-
-/* HUF_compress2() :
- * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
- * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
- * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
-HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned tableLog);
-
-/* HUF_compress4X_wksp() :
- * Same as HUF_compress2(), but uses externally allocated `workSpace`.
- * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
-#define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
-#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
-HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned tableLog,
- void* workSpace, size_t wkspSize);
-
-#endif /* HUF_H_298734234 */
-
-/* ******************************************************************
- * WARNING !!
- * The following section contains advanced and experimental definitions
- * which shall never be used in the context of a dynamic library,
- * because they are not guaranteed to remain stable in the future.
- * Only consider them in association with static linking.
- * *****************************************************************/
-#if !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
-#define HUF_H_HUF_STATIC_LINKING_ONLY
-
-/* *** Dependencies *** */
-#include "mem.h" /* U32 */
+#include "mem.h" /* U32 */
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
+/* *** Tool functions *** */
+#define HUF_BLOCKSIZE_MAX (128 * 1024) /*< maximum input size for a single block compressed with HUF_compress */
+size_t HUF_compressBound(size_t size); /*< maximum compressed size (worst case) */
+
+/* Error Management */
+unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */
+const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */
+
+
+#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
+#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
/* *** Constants *** */
-#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
#define HUF_SYMBOLVALUE_MAX 255
-#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
# error "HUF_TABLELOG_MAX is too large !"
#endif
@@ -133,15 +55,11 @@
/* static allocation of HUF's Compression Table */
/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
-struct HUF_CElt_s {
- U16 val;
- BYTE nbBits;
-}; /* typedef'd to HUF_CElt */
-typedef struct HUF_CElt_s HUF_CElt; /* consider it an incomplete type */
-#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
-#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
+typedef size_t HUF_CElt; /* consider it an incomplete type */
+#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
+#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
- HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */
+ HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */
/* static allocation of HUF's DTable */
typedef U32 HUF_DTable;
@@ -155,25 +73,49 @@
/* ****************************************
* Advanced decompression functions
******************************************/
-size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
-#endif
-size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< decodes RLE and uncompressed */
-size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
-size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
-#endif
+/*
+ * Huffman flags bitset.
+ * For all flags, 0 is the default value.
+ */
+typedef enum {
+ /*
+ * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime.
+ * Otherwise: Ignored.
+ */
+ HUF_flags_bmi2 = (1 << 0),
+ /*
+ * If set: Test possible table depths to find the one that produces the smallest header + encoded size.
+ * If unset: Use heuristic to find the table depth.
+ */
+ HUF_flags_optimalDepth = (1 << 1),
+ /*
+ * If set: If the previous table can encode the input, always reuse the previous table.
+ * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output.
+ */
+ HUF_flags_preferRepeat = (1 << 2),
+ /*
+ * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress.
+ * If unset: Always histogram the entire input.
+ */
+ HUF_flags_suspectUncompressible = (1 << 3),
+ /*
+ * If set: Don't use assembly implementations
+ * If unset: Allow using assembly implementations
+ */
+ HUF_flags_disableAsm = (1 << 4),
+ /*
+ * If set: Don't use the fast decoding loop, always use the fallback decoding loop.
+ * If unset: Use the fast decoding loop when possible.
+ */
+ HUF_flags_disableFast = (1 << 5)
+} HUF_flags_e;
/* ****************************************
* HUF detailed API
* ****************************************/
+#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra
/*! HUF_compress() does the following:
* 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
@@ -186,11 +128,12 @@
* For example, it's possible to compress several blocks using the same 'CTable',
* or to save and regenerate 'CTable' using external methods.
*/
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
-size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+unsigned HUF_minTableLog(unsigned symbolCardinality);
+unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue);
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace,
+ size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
-size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
@@ -199,22 +142,24 @@
HUF_repeat_check, /*< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
HUF_repeat_valid /*< Can use the previous table and it is assumed to be valid */
} HUF_repeat;
+
/* HUF_compress4X_repeat() :
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
* If it uses hufTable it does not modify hufTable or repeat.
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
- * If preferRepeat then the old table will always be used if valid. */
+ * If preferRepeat then the old table will always be used if valid.
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned tableLog,
void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
/* HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
*/
-#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192)
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
size_t HUF_buildCTable_wksp (HUF_CElt* tree,
const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
@@ -240,17 +185,29 @@
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workspace, size_t wkspSize,
- int bmi2);
+ int flags);
/* HUF_readCTable() :
* Loading a CTable saved with HUF_writeCTable() */
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
-/* HUF_getNbBits() :
+/* HUF_getNbBitsFromCTable() :
* Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
- * Note 1 : is not inlined, as HUF_CElt definition is private
- * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
-U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
+ * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0
+ * Note 2 : is not inlined, as HUF_CElt definition is private
+ */
+U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
+
+typedef struct {
+ BYTE tableLog;
+ BYTE maxSymbolValue;
+ BYTE unused[sizeof(size_t) - 2];
+} HUF_CTableHeader;
+
+/* HUF_readCTableHeader() :
+ * @returns The header from the CTable specifying the tableLog and the maxSymbolValue.
+ */
+HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable);
/*
* HUF_decompress() does the following:
@@ -279,78 +236,43 @@
#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
-#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
-#endif
-
-size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
-
/* ====================== */
/* single stream variants */
/* ====================== */
-size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
-size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /*< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
-size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
/* HUF_compress1X_repeat() :
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
* If it uses hufTable it does not modify hufTable or repeat.
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
- * If preferRepeat then the old table will always be used if valid. */
+ * If preferRepeat then the old table will always be used if valid.
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned tableLog,
void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
-size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /*< double-symbols decoder */
#endif
-size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
+/* BMI2 variants.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
-size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
-#endif
-
-size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /*< automatic selection of sing or double symbol decoder, based on DTable */
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
#endif
-/* BMI2 variants.
- * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
- */
-size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
-#endif
-size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
-size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
-#endif
-
-#endif /* HUF_STATIC_LINKING_ONLY */
-
+#endif /* HUF_H_298734234 */
Index: lib/zstd/common/mem.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
--- a/lib/zstd/common/mem.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/mem.h (date 1740124241317)
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -24,12 +24,15 @@
/*-****************************************
* Compiler specifics
******************************************/
+#undef MEM_STATIC /* may be already defined from common/compiler.h */
#define MEM_STATIC static inline
/*-**************************************************************
* Basic Types
*****************************************************************/
typedef uint8_t BYTE;
+typedef uint8_t U8;
+typedef int8_t S8;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
Index: lib/zstd/common/fse.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
--- a/lib/zstd/common/fse.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/fse.h (date 1740124241297)
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* FSE : Finite State Entropy codec
* Public Prototypes declaration
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -11,8 +12,6 @@
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
-
-
#ifndef FSE_H
#define FSE_H
@@ -22,7 +21,6 @@
******************************************/
#include "zstd_deps.h" /* size_t, ptrdiff_t */
-
/*-*****************************************
* FSE_PUBLIC_API : control library symbols visibility
******************************************/
@@ -50,34 +48,6 @@
FSE_PUBLIC_API unsigned FSE_versionNumber(void); /*< library version number; to be used when checking dll version */
-/*-****************************************
-* FSE simple functions
-******************************************/
-/*! FSE_compress() :
- Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
- 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
- @return : size of compressed data (<= dstCapacity).
- Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
- if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
- if FSE_isError(return), compression failed (more details using FSE_getErrorName())
-*/
-FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize);
-
-/*! FSE_decompress():
- Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
- into already allocated destination buffer 'dst', of size 'dstCapacity'.
- @return : size of regenerated data (<= maxDstSize),
- or an error code, which can be tested using FSE_isError() .
-
- ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
- Why ? : making this distinction requires a header.
- Header management is intentionally delegated to the user layer, which can better manage special cases.
-*/
-FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
- const void* cSrc, size_t cSrcSize);
-
-
/*-*****************************************
* Tool functions
******************************************/
@@ -88,20 +58,6 @@
FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
-/*-*****************************************
-* FSE advanced functions
-******************************************/
-/*! FSE_compress2() :
- Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
- Both parameters can be defined as '0' to mean : use default value
- @return : size of compressed data
- Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
- if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
- if FSE_isError(return), it's an error code.
-*/
-FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
-
-
/*-*****************************************
* FSE detailed API
******************************************/
@@ -161,8 +117,6 @@
/*! Constructor and Destructor of FSE_CTable.
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
-FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
-FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
/*! FSE_buildCTable():
Builds `ct`, which must be already allocated, using FSE_createCTable().
@@ -238,23 +192,7 @@
unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
const void* rBuffer, size_t rBuffSize, int bmi2);
-/*! Constructor and Destructor of FSE_DTable.
- Note that its size depends on 'tableLog' */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
-FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
-FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
-
-/*! FSE_buildDTable():
- Builds 'dt', which must be already allocated, using FSE_createDTable().
- return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*! FSE_decompress_usingDTable():
- Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
- into `dst` which must be already allocated.
- @return : size of regenerated data (necessarily <= `dstCapacity`),
- or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
/*!
Tutorial :
@@ -286,13 +224,11 @@
#endif /* FSE_H */
+
#if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
#define FSE_H_FSE_STATIC_LINKING_ONLY
-
-/* *** Dependency *** */
#include "bitstream.h"
-
/* *****************************************
* Static allocation
*******************************************/
@@ -317,24 +253,15 @@
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
/*< same as FSE_optimalTableLog(), which used `minus==2` */
-/* FSE_compress_wksp() :
- * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
- * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
- */
-#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
-size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
-
-size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
-/*< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
-
size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
/*< build a fake FSE_CTable, designed to compress always the same symbolValue */
/* FSE_buildCTable_wksp() :
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
* `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
+ * See FSE_buildCTable_wksp() for breakdown of workspace usage.
*/
-#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */)
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
@@ -343,19 +270,11 @@
FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
/*< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
-size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
-/*< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
-
-size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
-/*< build a fake FSE_DTable, designed to always generate the same symbolValue */
-
-#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
+#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
-size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
-/*< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
-
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
-/*< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
+/*< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`.
+ * Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */
typedef enum {
FSE_repeat_none, /*< Cannot use the previous table */
@@ -538,20 +457,20 @@
FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
const U16* const stateTable = (const U16*)(statePtr->stateTable);
U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
- BIT_addBits(bitC, statePtr->value, nbBitsOut);
+ BIT_addBits(bitC, (BitContainerType)statePtr->value, nbBitsOut);
statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
}
MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
{
- BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+ BIT_addBits(bitC, (BitContainerType)statePtr->value, statePtr->stateLog);
BIT_flushBits(bitC);
}
/* FSE_getMaxNbBits() :
* Approximate maximum cost of a symbol, in bits.
- * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
+ * Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
@@ -704,7 +623,4 @@
#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
-
#endif /* FSE_STATIC_LINKING_ONLY */
-
-
Index: lib/zstd/common/zstd_deps.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
--- a/lib/zstd/common/zstd_deps.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/zstd_deps.h (date 1740124241333)
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -115,11 +115,7 @@
#ifndef ZSTD_DEPS_STDINT
#define ZSTD_DEPS_STDINT
-/*
- * The Linux Kernel doesn't provide intptr_t, only uintptr_t, which
- * is an unsigned long.
- */
-typedef long intptr_t;
+/* intptr_t already provided by ZSTD_DEPS_COMMON */
#endif /* ZSTD_DEPS_STDINT */
#endif /* ZSTD_DEPS_NEED_STDINT */
Index: lib/zstd/compress/zstd_double_fast.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
--- a/lib/zstd/compress/zstd_double_fast.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_double_fast.c (date 1740124241447)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,8 +12,49 @@
#include "zstd_compress_internal.h"
#include "zstd_double_fast.h"
+#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashLarge = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
+ U32 const mls = cParams->minMatch;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ /* Always insert every fastHashFillStep position into the hash tables.
+ * Insert the other positions into the large hash table if their entry
+ * is empty.
+ */
+ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ U32 i;
+ for (i = 0; i < fastHashFillStep; ++i) {
+ size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls);
+ size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8);
+ if (i == 0) {
+ ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i);
+ }
+ if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) {
+ ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i);
+ }
+ /* Only load extra positions for ZSTD_dtlm_full */
+ if (dtlm == ZSTD_dtlm_fast)
+ break;
+ } }
+}
+
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -43,24 +85,35 @@
/* Only load extra positions for ZSTD_dtlm_full */
if (dtlm == ZSTD_dtlm_fast)
break;
- } }
+ } }
}
-#ifdef DSLAB_OPTIMIZE_COMPRESS
+
+void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp)
+{
+ if (tfp == ZSTD_tfp_forCDict) {
+ ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm);
+ } else {
+ ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm);
+ }
+}
+
+
FORCE_INLINE_TEMPLATE
-size_t DSLAB_compressBlock_doubleFast_generic_nodict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize,
- U32 const mls /* template */)
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls /* template */)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32* const hashLong = ms->hashTable;
const U32 hBitsL = cParams->hashLog;
U32* const hashSmall = ms->chainTable;
- // 这个函数里面的hashLong和hashSmall
const U32 hBitsS = cParams->chainLog;
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
/* presumes that, if there is a dictionary, it must be using Attach mode */
@@ -69,168 +122,214 @@
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
- U32 offsetSaved = 0;
- BYTE loop_mode = 0;
- const U32 dictAndPrefixLength = (U32)(ip - prefixLowest);
-
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
- DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
-
- /* init */
- ip += (dictAndPrefixLength == 0);
- {
- U32 const curr = (U32)(ip - base);
- U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
- U32 const maxRep = curr - windowLow;
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
- }
-
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
- size_t soffset;
- U32 offset;
- const BYTE* match;
+ size_t mLength;
+ U32 offset;
+ U32 curr;
+
+ /* how many positions to search before increasing step size */
+ const size_t kStepIncr = 1 << kSearchStrength;
+ /* the position at which to increment the step size if no match is found */
+ const BYTE* nextStep;
+ size_t step; /* the current step size */
+
+ size_t hl0; /* the long hash at ip */
+ size_t hl1; /* the long hash at ip1 */
+
+ U32 idxl0; /* the long match index for ip */
+ U32 idxl1; /* the long match index for ip1 */
+
+ const BYTE* matchl0; /* the long match for ip */
+ const BYTE* matchs0; /* the short match for ip */
+ const BYTE* matchl1; /* the long match for ip1 */
+ const BYTE* matchs0_safe; /* matchs0 or safe address */
+
+ const BYTE* ip = istart; /* the current position */
+ const BYTE* ip1; /* the next position */
+ /* Array of ~random data, should have low probability of matching data
+ * we load from here instead of from tables, if matchl0/matchl1 are
+ * invalid indices. Used to avoid unpredictable branches. */
+ const BYTE dummy[] = {0x12,0x34,0x56,0x78,0x9a,0xbc,0xde,0xf0,0xe2,0xb4};
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
+
+ /* init */
+ ip += ((ip - prefixLowest) == 0);
+ {
+ U32 const current = (U32)(ip - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
+ U32 const maxRep = current - windowLow;
+ if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
+ }
+
+ /* Outer Loop: one iteration per match found and stored */
+ while (1) {
+ step = 1;
+ nextStep = ip + kStepIncr;
+ ip1 = ip + step;
+
+ if (ip1 > ilimit) {
+ goto _cleanup;
+ }
+
+ hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
+ idxl0 = hashLong[hl0];
+ matchl0 = base + idxl0;
+
+ /* Inner Loop: one iteration per search / position */
+ do {
+ const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
+ const U32 idxs0 = hashSmall[hs0];
+ curr = (U32)(ip-base);
+ matchs0 = base + idxs0;
+
+ hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */
+
+ /* check noDict repcode */
+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
+ goto _match_stored;
+ }
+
+ hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
+
+ /* idxl0 > prefixLowestIndex is a (somewhat) unpredictable branch.
+ * However expression below complies into conditional move. Since
+ * match is unlikely and we only *branch* on idxl0 > prefixLowestIndex
+ * if there is a match, all branches become predictable. */
+ { const BYTE* const matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]);
+
+ /* check prefix long match */
+ if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) {
+ mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
+ offset = (U32)(ip-matchl0);
+ while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
+ goto _match_found;
+ } }
- size_t const hL = ZSTD_hashPtr(ip, hBitsL, 8);
- size_t const hS = ZSTD_hashPtr(ip, hBitsS, mls);
- U32 const curr = (U32)(ip-base);
- U32 hLong_val = curr;
- // 预读取指令
- #if defined(__aarch64__)
- PREFETCH_L1(ip+256);
- #endif
- // 这是原版后面一大坨函数修改而来的,通过增加一个loop_mode变量将其优化成一堆较小的代码
- if (loop_mode == 1) { /* 49.68% true */
- if (offset_2 > 0){
- match = ip - offset_2;
- if (MEM_read32(ip) == MEM_read32(match)){
- U32 const tmpOff = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOff;
- offset = 0;
- soffset = 4;
- goto _match_stored;
- }
- }
- loop_mode = 0;
- }
- //分支预测优化
- if (offset_1 > 0){
- match = ip + 1 - offset_1;
- if (MEM_read32(match) == MEM_read32(ip+1)) {
- ip++;
- offset = 0;
- soffset = 4;
- goto _match_stored;
- }
- }
-
- {
- // 这是两个很大的表,在原版当中多次查表会导致频繁cache miss,浪费很多时间,但有些时候是不需要查表的,所以我们把不需要的分支放上面,然后在结尾放一个goto语句,这样子就节约了时间。
- U32 const matchIndexL = hashLong[hL];
- U32 const matchIndexS = hashSmall[hS];
- if (matchIndexL > prefixLowestIndex) {
- match = base + matchIndexL;
- if (MEM_read64(match) == MEM_read64(ip)) {
- soffset = 8;
- goto _match_found;
- }
- }
-
- match = base + matchIndexS;
- if (matchIndexS <= prefixLowestIndex || MEM_read32(match) != MEM_read32(ip)){
- hashLong[hL] = hashSmall[hS] = curr;
- ip += ((ip-anchor) >> kSearchStrength) + 1;
- continue;
- }
- }
-
- {
- size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
- U32 const matchIndexL3 = hl3 == hL ? curr : hashLong[hl3];
- if (hl3 == hL){
- hLong_val = curr + 1;
- } else {
- hashLong[hl3] = curr + 1;
- }
+ idxl1 = hashLong[hl1];
+ matchl1 = base + idxl1;
+
+ /* Same optimization as matchl0 above */
+ matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]);
+
+ /* check prefix short match */
+ if(MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) {
+ goto _search_next_long;
+ }
+
+ if (ip1 >= nextStep) {
+ PREFETCH_L1(ip1 + 64);
+ PREFETCH_L1(ip1 + 128);
+ step++;
+ nextStep += kStepIncr;
+ }
+ ip = ip1;
+ ip1 += step;
+
+ hl0 = hl1;
+ idxl0 = idxl1;
+ matchl0 = matchl1;
+ #if defined(__aarch64__)
+ PREFETCH_L1(ip+256);
+ #endif
+ } while (ip1 <= ilimit);
+
+_cleanup:
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+
+_search_next_long:
+
+ /* short match found: let's check for a longer one */
+ mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
+ offset = (U32)(ip - matchs0);
+
+ /* check long match at +1 position */
+ if ((idxl1 > prefixLowestIndex) && (MEM_read64(matchl1) == MEM_read64(ip1))) {
+ size_t const l1len = ZSTD_count(ip1+8, matchl1+8, iend) + 8;
+ if (l1len > mLength) {
+ /* use the long match instead */
+ ip = ip1;
+ mLength = l1len;
+ offset = (U32)(ip-matchl1);
+ matchs0 = matchl1;
+ }
+ }
+
+ while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* complete backward */
+
+ /* fall-through */
+
+_match_found: /* requires ip, offset, mLength */
+ offset_2 = offset_1;
+ offset_1 = offset;
- /* check prefix long +1 match */
- if (matchIndexL3 > prefixLowestIndex) {
- if (MEM_read64(base + matchIndexL3) == MEM_read64(ip+1)) {
- match = base + matchIndexL3;
- ip++;
- soffset = 8;
- }
- }
- soffset = 4;
+ if (step < 4) {
+ /* It is unsafe to write this value back to the hashtable when ip1 is
+ * greater than or equal to the new ip we will have after we're done
+ * processing this match. Rather than perform that test directly
+ * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
+ * more predictable test. The minmatch even if we take a short match is
+ * 4 bytes, so as long as step, the distance between ip and ip1
+ * (initially) is less than 4, we know ip1 < new ip. */
+ hashLong[hl1] = (U32)(ip1 - base);
}
-_match_found:
- {
- while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; soffset++; }
- offset_2 = offset_1;
- offset_1 = (U32)(ip-match);
- offset = offset_1 + ZSTD_REP_MOVE;
- }
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
_match_stored:
- {
- const size_t mLength = ZSTD_count(ip+soffset, match+soffset, iend)+soffset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset, mLength-MINMATCH);
-
- hashSmall[hS] = curr;
- hashLong[hL] = hLong_val;
- /* match found */
- ip += mLength;
- anchor = ip;
- }
+ /* match found */
+ ip += mLength;
+ anchor = ip;
- if (loop_mode == 0 && ip <= ilimit) {
+ if (ip <= ilimit) {
/* Complementary insertion */
/* done after iLimit test, as candidates could be > iend-8 */
- U32 const indexToInsert = curr+2;
- loop_mode = 1;
- hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
- hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
- hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
- hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
- }
+ { U32 const indexToInsert = curr+2;
+ hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+ hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+ }
- } /* while (ip < ilimit) */
- //这一堆也是源自于原版后面部分的代码
- if (loop_mode == 1 && ip == ilimit && offset_2 > 0){
- const BYTE* match = ip - offset_2;
- if (MEM_read32(ip) == MEM_read32(match)){
- U32 const curr = (U32)(ip-base);
- size_t const hL = ZSTD_hashPtr(ip, hBitsL, 8);
- size_t const hS = ZSTD_hashPtr(ip, hBitsS, mls);
- const size_t mLength = ZSTD_count(ip+4, match+4, iend)+4;
- U32 const tmpOff = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOff;
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, mLength-MINMATCH);
- hashSmall[hS] = curr;
- hashLong[hL] = curr;
- ip += mLength;
- anchor = ip;
+ /* check immediate repcode */
+ while ( (ip <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
+ ip += rLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ }
}
}
-
- /* save reps for next block */
- rep[0] = offset_1 ? offset_1 : offsetSaved;
- rep[1] = offset_2 ? offset_2 : offsetSaved;
-
- /* Return the last literals size */
- return (size_t)(iend - anchor);
}
-#endif
+
FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_doubleFast_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
- U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
+ U32 const mls /* template */)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32* const hashLong = ms->hashTable;
@@ -248,57 +347,39 @@
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
- U32 offsetSaved = 0;
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
- const ZSTD_compressionParameters* const dictCParams =
- dictMode == ZSTD_dictMatchState ?
- &dms->cParams : NULL;
- const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ?
- dms->hashTable : NULL;
- const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
- dms->chainTable : NULL;
- const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
- dms->window.dictLimit : 0;
- const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
- dms->window.base : NULL;
- const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
- dictBase + dictStartIndex : NULL;
- const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
- dms->window.nextSrc : NULL;
- const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
- prefixLowestIndex - (U32)(dictEnd - dictBase) :
- 0;
- const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ?
- dictCParams->hashLog : hBitsL;
- const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ?
- dictCParams->chainLog : hBitsS;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
+ const U32* const dictHashLong = dms->hashTable;
+ const U32* const dictHashSmall = dms->chainTable;
+ const U32 dictStartIndex = dms->window.dictLimit;
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
+ const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
+ const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
- DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
-
- assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
/* if a dictionary is attached, it must be within window range */
- if (dictMode == ZSTD_dictMatchState) {
- assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
+ assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
+
+ if (ms->prefetchCDictTables) {
+ size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
+ size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32);
+ PREFETCH_AREA(dictHashLong, hashTableBytes);
+ PREFETCH_AREA(dictHashSmall, chainTableBytes);
}
/* init */
ip += (dictAndPrefixLength == 0);
- if (dictMode == ZSTD_noDict) {
- U32 const curr = (U32)(ip - base);
- U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
- U32 const maxRep = curr - windowLow;
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
- }
- if (dictMode == ZSTD_dictMatchState) {
- /* dictMatchState repCode checks don't currently handle repCode == 0
- * disabling. */
- assert(offset_1 <= dictAndPrefixLength);
- assert(offset_2 <= dictAndPrefixLength);
- }
+
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
/* Main Search Loop */
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
@@ -306,51 +387,42 @@
U32 offset;
size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
- size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
- size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
+ size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8);
+ size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls);
+ U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS];
+ U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS];
+ int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL);
+ int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS);
U32 const curr = (U32)(ip-base);
U32 const matchIndexL = hashLong[h2];
U32 matchIndexS = hashSmall[h];
const BYTE* matchLong = base + matchIndexL;
const BYTE* match = base + matchIndexS;
const U32 repIndex = curr + 1 - offset_1;
- const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
- && repIndex < prefixLowestIndex) ?
+ const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
- /* check dictMatchState repcode */
- if (dictMode == ZSTD_dictMatchState
- && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ /* check repcode */
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
goto _match_stored;
}
- /* check noDict repcode */
- if ( dictMode == ZSTD_noDict
- && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
- mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
- ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
- goto _match_stored;
- }
-
- if (matchIndexL > prefixLowestIndex) {
+ if ((matchIndexL >= prefixLowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
/* check prefix long match */
- if (MEM_read64(matchLong) == MEM_read64(ip)) {
- mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
- offset = (U32)(ip-matchLong);
- while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
- goto _match_found;
- }
- } else if (dictMode == ZSTD_dictMatchState) {
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+ offset = (U32)(ip-matchLong);
+ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ goto _match_found;
+ } else if (dictTagsMatchL) {
/* check dictMatchState long match */
- U32 const dictMatchIndexL = dictHashLong[dictHL];
+ U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS;
const BYTE* dictMatchL = dictBase + dictMatchIndexL;
assert(dictMatchL < dictEnd);
@@ -362,13 +434,13 @@
} }
if (matchIndexS > prefixLowestIndex) {
- /* check prefix short match */
+ /* short match candidate */
if (MEM_read32(match) == MEM_read32(ip)) {
goto _search_next_long;
}
- } else if (dictMode == ZSTD_dictMatchState) {
+ } else if (dictTagsMatchS) {
/* check dictMatchState short match */
- U32 const dictMatchIndexS = dictHashSmall[dictHS];
+ U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS;
match = dictBase + dictMatchIndexS;
matchIndexS = dictMatchIndexS + dictIndexDelta;
@@ -383,25 +455,24 @@
continue;
_search_next_long:
-
{ size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
- size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
+ size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
U32 const matchIndexL3 = hashLong[hl3];
+ U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS];
+ int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3);
const BYTE* matchL3 = base + matchIndexL3;
hashLong[hl3] = curr + 1;
/* check prefix long +1 match */
- if (matchIndexL3 > prefixLowestIndex) {
- if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
- mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
- ip++;
- offset = (U32)(ip-matchL3);
- while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
- goto _match_found;
- }
- } else if (dictMode == ZSTD_dictMatchState) {
+ if ((matchIndexL3 >= prefixLowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1))) {
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+ ip++;
+ offset = (U32)(ip-matchL3);
+ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ } else if (dictTagsMatchL3) {
/* check dict long +1 match */
- U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
+ U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS;
const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
assert(dictMatchL3 < dictEnd);
if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
@@ -413,7 +484,7 @@
} } }
/* if no long +1 match, explore the short match we found */
- if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
+ if (matchIndexS < prefixLowestIndex) {
mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
offset = (U32)(curr - matchIndexS);
while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
@@ -427,7 +498,7 @@
offset_2 = offset_1;
offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
_match_stored:
/* match found */
@@ -445,88 +516,78 @@
}
/* check immediate repcode */
- if (dictMode == ZSTD_dictMatchState) {
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
- U32 const repIndex2 = current2 - offset_2;
- const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
- && repIndex2 < prefixLowestIndex ?
- dictBase + repIndex2 - dictIndexDelta :
- base + repIndex2;
- if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
- const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
- U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- } }
-
- if (dictMode == ZSTD_noDict) {
- while ( (ip <= ilimit)
- && ( (offset_2>0)
- & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- } } }
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
+ dictBase + repIndex2 - dictIndexDelta :
+ base + repIndex2;
+ if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2))
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
} /* while (ip < ilimit) */
/* save reps for next block */
- rep[0] = offset_1 ? offset_1 : offsetSaved;
- rep[1] = offset_2 ? offset_2 : offsetSaved;
+ rep[0] = offset_1;
+ rep[1] = offset_2;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
+#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
+ static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+ void const* src, size_t srcSize) \
+ { \
+ return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
+ }
+
+ZSTD_GEN_DFAST_FN(noDict, 4)
+ZSTD_GEN_DFAST_FN(noDict, 5)
+ZSTD_GEN_DFAST_FN(noDict, 6)
+ZSTD_GEN_DFAST_FN(noDict, 7)
+
+ZSTD_GEN_DFAST_FN(dictMatchState, 4)
+ZSTD_GEN_DFAST_FN(dictMatchState, 5)
+ZSTD_GEN_DFAST_FN(dictMatchState, 6)
+ZSTD_GEN_DFAST_FN(dictMatchState, 7)
+
size_t ZSTD_compressBlock_doubleFast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
const U32 mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */
- #ifdef DSLAB_OPTIMIZE_COMPRESS
- // 劫持函数
- case 4 :
- return DSLAB_compressBlock_doubleFast_generic_nodict(ms, seqStore, rep, src, srcSize, 4);
- case 5 :
- return DSLAB_compressBlock_doubleFast_generic_nodict(ms, seqStore, rep, src, srcSize, 5);
- case 6 :
- return DSLAB_compressBlock_doubleFast_generic_nodict(ms, seqStore, rep, src, srcSize, 6);
- case 7 :
- return DSLAB_compressBlock_doubleFast_generic_nodict(ms, seqStore, rep, src, srcSize, 7);
- #else
- case 4 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
- case 5 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
- case 6 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
- case 7 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
- #endif
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
}
}
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
const U32 mls = ms->cParams.minMatch;
@@ -534,19 +595,21 @@
{
default: /* includes case 3 */
case 4 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize);
case 5 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize);
case 6 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize);
case 7 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize);
}
}
-static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls /* template */)
{
@@ -576,7 +639,7 @@
/* if extDict is invalidated due to maxDistance, switch to "regular" variant */
if (prefixStartIndex == dictStartIndex)
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
+ return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
/* Search Loop */
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
@@ -597,13 +660,13 @@
size_t mLength;
hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
- if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
- & (repIndex > dictStartIndex))
+ if (((ZSTD_index_overlap_check(prefixStartIndex, repIndex))
+ & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
} else {
if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
@@ -614,7 +677,7 @@
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
} else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
@@ -639,7 +702,7 @@
}
offset_2 = offset_1;
offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
} else {
ip += ((ip-anchor) >> kSearchStrength) + 1;
@@ -665,13 +728,13 @@
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
- & (repIndex2 > dictStartIndex))
+ if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2))
+ & (offset_2 <= current2 - dictStartIndex))
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
@@ -689,9 +752,13 @@
return (size_t)(iend - anchor);
}
+ZSTD_GEN_DFAST_FN(extDict, 4)
+ZSTD_GEN_DFAST_FN(extDict, 5)
+ZSTD_GEN_DFAST_FN(extDict, 6)
+ZSTD_GEN_DFAST_FN(extDict, 7)
size_t ZSTD_compressBlock_doubleFast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
@@ -699,12 +766,14 @@
{
default: /* includes case 3 */
case 4 :
- return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
+ return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
case 5 :
- return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
+ return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
case 6 :
- return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
+ return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
case 7 :
- return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
+ return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
}
}
+
+#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */
Index: lib/zstd/common/error_private.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c
--- a/lib/zstd/common/error_private.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/error_private.c (date 1740124241284)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -27,9 +28,11 @@
case PREFIX(version_unsupported): return "Version not supported";
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
- case PREFIX(corruption_detected): return "Corrupted block detected";
+ case PREFIX(corruption_detected): return "Data corruption detected";
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+ case PREFIX(literals_headerWrong): return "Header of Literals' block doesn't respect format specification";
case PREFIX(parameter_unsupported): return "Unsupported parameter";
+ case PREFIX(parameter_combination_unsupported): return "Unsupported combination of parameters";
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
case PREFIX(init_missing): return "Context should be init first";
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
@@ -38,17 +41,23 @@
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+ case PREFIX(cannotProduce_uncompressedBlock): return "This mode cannot generate an uncompressed block";
+ case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected";
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
case PREFIX(srcSize_wrong): return "Src size is incorrect";
case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
+ case PREFIX(noForwardProgress_destFull): return "Operation made no progress over multiple calls, due to output buffer being full";
+ case PREFIX(noForwardProgress_inputEmpty): return "Operation made no progress over multiple calls, due to input being empty";
/* following error codes are not stable and may be removed or changed in a future version */
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
+ case PREFIX(sequenceProducer_failed): return "Block-level external sequence producer returned an error code";
+ case PREFIX(externalSequences_invalid): return "External sequences are not valid";
case PREFIX(maxCode):
default: return notErrorCode;
}
Index: lib/zstd/common/error_private.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
--- a/lib/zstd/common/error_private.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/error_private.h (date 1740124241289)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -13,14 +14,13 @@
#ifndef ERROR_H_MODULE
#define ERROR_H_MODULE
-
-
/* ****************************************
* Dependencies
******************************************/
-#include "zstd_deps.h" /* size_t */
#include /* enum list */
-
+#include "compiler.h"
+#include "debug.h"
+#include "zstd_deps.h" /* size_t */
/* ****************************************
* Compiler-specific
@@ -47,8 +47,13 @@
ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
/* check and forward error code */
-#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
-#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
+#define CHECK_V_F(e, f) \
+ size_t const e = f; \
+ do { \
+ if (ERR_isError(e)) \
+ return e; \
+ } while (0)
+#define CHECK_F(f) do { CHECK_V_F(_var_err__, f); } while (0)
/*-****************************************
@@ -62,5 +67,85 @@
return ERR_getErrorString(ERR_getErrorCode(code));
}
+/*
+ * Ignore: this is an internal helper.
+ *
+ * This is a helper function to help force C99-correctness during compilation.
+ * Under strict compilation modes, variadic macro arguments can't be empty.
+ * However, variadic function arguments can be. Using a function therefore lets
+ * us statically check that at least one (string) argument was passed,
+ * independent of the compilation flags.
+ */
+static INLINE_KEYWORD UNUSED_ATTR
+void _force_has_format_string(const char *format, ...) {
+ (void)format;
+}
+
+/*
+ * Ignore: this is an internal helper.
+ *
+ * We want to force this function invocation to be syntactically correct, but
+ * we don't want to force runtime evaluation of its arguments.
+ */
+#define _FORCE_HAS_FORMAT_STRING(...) \
+ do { \
+ if (0) { \
+ _force_has_format_string(__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define ERR_QUOTE(str) #str
+
+/*
+ * Return the specified error if the condition evaluates to true.
+ *
+ * In debug modes, prints additional information.
+ * In order to do that (particularly, printing the conditional that failed),
+ * this can't just wrap RETURN_ERROR().
+ */
+#define RETURN_ERROR_IF(cond, err, ...) \
+ do { \
+ if (cond) { \
+ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
+ __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ } \
+ } while (0)
+
+/*
+ * Unconditionally return the specified error.
+ *
+ * In debug modes, prints additional information.
+ */
+#define RETURN_ERROR(err, ...) \
+ do { \
+ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
+ __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ } while(0)
+
+/*
+ * If the provided expression evaluates to an error code, returns that error code.
+ *
+ * In debug modes, prints additional information.
+ */
+#define FORWARD_IF_ERROR(err, ...) \
+ do { \
+ size_t const err_code = (err); \
+ if (ERR_isError(err_code)) { \
+ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
+ __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return err_code; \
+ } \
+ } while(0)
#endif /* ERROR_H_MODULE */
Index: lib/zstd/common/entropy_common.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
--- a/lib/zstd/common/entropy_common.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/entropy_common.c (date 1740124241277)
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* Common functions of New Generation Entropy library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -15,13 +16,12 @@
/* *************************************
* Dependencies
***************************************/
-#include
#include "mem.h"
#include "error_private.h" /* ERR_*, ERROR */
#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
#include "huf.h"
+#include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */
/*=== Version ===*/
@@ -39,23 +39,6 @@
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
-static U32 FSE_ctz(U32 val)
-{
- assert(val != 0);
- {
-# if (__GNUC__ >= 3) /* GCC Intrinsic */
- return __builtin_ctz(val);
-# else /* Software version */
- U32 count = 0;
- while ((val & 1) == 0) {
- val >>= 1;
- ++count;
- }
- return count;
-# endif
- }
-}
-
FORCE_INLINE_TEMPLATE
size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
@@ -103,7 +86,7 @@
* repeat.
* Avoid UB by setting the high bit to 1.
*/
- int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ int repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
while (repeats >= 12) {
charnum += 3 * 12;
if (LIKELY(ip <= iend-7)) {
@@ -114,7 +97,7 @@
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
- repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
}
charnum += 3 * repeats;
bitStream >>= 2 * repeats;
@@ -179,7 +162,7 @@
* know that threshold > 1.
*/
if (remaining <= 1) break;
- nbBits = BIT_highbit32(remaining) + 1;
+ nbBits = ZSTD_highbit32(remaining) + 1;
threshold = 1 << (nbBits - 1);
}
if (charnum >= maxSV1) break;
@@ -213,7 +196,7 @@
}
#if DYNAMIC_BMI2
-TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
+BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
@@ -240,7 +223,7 @@
{
return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
}
-EXPORT_SYMBOL_GPL(FSE_readNCount);
+
/*! HUF_readStats() :
Read compact Huffman tree, saved by HUF_writeCTable().
@@ -254,9 +237,8 @@
const void* src, size_t srcSize)
{
U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
- return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
+ return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0);
}
-EXPORT_SYMBOL_GPL(HUF_readStats);
FORCE_INLINE_TEMPLATE size_t
HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
@@ -296,21 +278,21 @@
ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
weightTotal = 0;
{ U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
rankStats[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
} }
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
- { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+ { U32 const tableLog = ZSTD_highbit32(weightTotal) + 1;
if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
*tableLogPtr = tableLog;
/* determine last weight */
{ U32 const total = 1 << tableLog;
U32 const rest = total - weightTotal;
- U32 const verif = 1 << BIT_highbit32(rest);
- U32 const lastWeight = BIT_highbit32(rest) + 1;
+ U32 const verif = 1 << ZSTD_highbit32(rest);
+ U32 const lastWeight = ZSTD_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
@@ -334,7 +316,7 @@
}
#if DYNAMIC_BMI2
-static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize)
@@ -347,14 +329,13 @@
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize,
- int bmi2)
+ int flags)
{
#if DYNAMIC_BMI2
- if (bmi2) {
+ if (flags & HUF_flags_bmi2) {
return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
#endif
- (void)bmi2;
+ (void)flags;
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
-EXPORT_SYMBOL_GPL(HUF_readStats_wksp);
Index: lib/zstd/common/fse_decompress.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
--- a/lib/zstd/common/fse_decompress.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/fse_decompress.c (date 1740124241304)
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* FSE : Finite State Entropy decoder
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -22,8 +23,8 @@
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#include "error_private.h"
-#define ZSTD_DEPS_NEED_MALLOC
-#include "zstd_deps.h"
+#include "zstd_deps.h" /* ZSTD_memcpy */
+#include "bits.h" /* ZSTD_highbit32 */
/* **************************************************************
@@ -55,19 +56,6 @@
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
-
-/* Function templates */
-FSE_DTable* FSE_createDTable (unsigned tableLog)
-{
- if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
- return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
-}
-
-void FSE_freeDTable (FSE_DTable* dt)
-{
- ZSTD_free(dt);
-}
-
static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
@@ -96,7 +84,7 @@
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
- symbolNext[s] = normalizedCounter[s];
+ symbolNext[s] = (U16)normalizedCounter[s];
} } }
ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
}
@@ -111,8 +99,7 @@
* all symbols have counts <= 8. We ensure we have 8 bytes at the end of
* our buffer to handle the over-write.
*/
- {
- U64 const add = 0x0101010101010101ull;
+ { U64 const add = 0x0101010101010101ull;
size_t pos = 0;
U64 sv = 0;
U32 s;
@@ -123,14 +110,13 @@
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
- pos += n;
- }
- }
+ pos += (size_t)n;
+ } }
/* Now we spread those positions across the table.
- * The benefit of doing it in two stages is that we avoid the the
+ * The benefit of doing it in two stages is that we avoid the
* variable size inner loop, which caused lots of branch misses.
* Now we can run through all the positions without any branch misses.
- * We unroll the loop twice, since that is what emperically worked best.
+ * We unroll the loop twice, since that is what empirically worked best.
*/
{
size_t position = 0;
@@ -166,7 +152,7 @@
for (u=0; utableLog = 0;
- DTableH->fastMode = 0;
-
- cell->newState = 0;
- cell->symbol = symbolValue;
- cell->nbBits = 0;
-
- return 0;
-}
-
-
-size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
-{
- void* ptr = dt;
- FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
- void* dPtr = dt + 1;
- FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
- const unsigned tableSize = 1 << nbBits;
- const unsigned tableMask = tableSize - 1;
- const unsigned maxSV1 = tableMask+1;
- unsigned s;
-
- /* Sanity checks */
- if (nbBits < 1) return ERROR(GENERIC); /* min size */
-
- /* Build Decoding Table */
- DTableH->tableLog = (U16)nbBits;
- DTableH->fastMode = 1;
- for (s=0; sfastMode;
-
- /* select fast mode (static) */
- if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
- return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
-{
- return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
+ assert(op >= ostart);
+ return (size_t)(op-ostart);
}
typedef struct {
short ncount[FSE_MAX_SYMBOL_VALUE + 1];
- FSE_DTable dtable[]; /* Dynamically sized */
} FSE_DecompressWksp;
@@ -327,13 +252,18 @@
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
+ size_t const dtablePos = sizeof(FSE_DecompressWksp) / sizeof(FSE_DTable);
+ FSE_DTable* const dtable = (FSE_DTable*)workSpace + dtablePos;
- DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
+ FSE_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
+ /* correct offset to dtable depends on this property */
+ FSE_STATIC_ASSERT(sizeof(FSE_DecompressWksp) % sizeof(FSE_DTable) == 0);
+
/* normal FSE decoding mode */
- {
- size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
+ { size_t const NCountLength =
+ FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
if (FSE_isError(NCountLength)) return NCountLength;
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
assert(NCountLength <= cSrcSize);
@@ -342,19 +272,20 @@
}
if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
- workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
+ assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize);
+ workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
- CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
+ CHECK_F( FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
{
- const void* ptr = wksp->dtable;
+ const void* ptr = dtable;
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
- if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
- return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1);
+ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0);
}
}
@@ -365,7 +296,7 @@
}
#if DYNAMIC_BMI2
-TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
{
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
}
@@ -382,9 +313,4 @@
return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
-
-typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
-
-
-
#endif /* FSE_COMMONDEFS_ONLY */
Index: lib/zstd/common/debug.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
--- a/lib/zstd/common/debug.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/debug.c (date 1740124241269)
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* debug
* Part of FSE library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -21,4 +22,10 @@
#include "debug.h"
+#if (DEBUGLEVEL>=2)
+/* We only use this when DEBUGLEVEL>=2, but we get -Werror=pedantic errors if a
+ * translation unit is empty. So remove this from Linux kernel builds, but
+ * otherwise just leave it in.
+ */
int g_debuglevel = DEBUGLEVEL;
+#endif
Index: lib/zstd/common/zstd_internal.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
--- a/lib/zstd/common/zstd_internal.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/common/zstd_internal.h (date 1740124241339)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -20,6 +21,7 @@
* Dependencies
***************************************/
#include "compiler.h"
+#include "cpu.h"
#include "mem.h"
#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
#include "error_private.h"
@@ -27,12 +29,10 @@
#include
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
#include /* XXH_reset, update, digest */
#define ZSTD_TRACE 0
-
/* ---- static assert (debug) --- */
#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
#define ZSTD_isError ERR_isError /* for inlining */
@@ -47,81 +47,7 @@
#undef MAX
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
-
-/*
- * Ignore: this is an internal helper.
- *
- * This is a helper function to help force C99-correctness during compilation.
- * Under strict compilation modes, variadic macro arguments can't be empty.
- * However, variadic function arguments can be. Using a function therefore lets
- * us statically check that at least one (string) argument was passed,
- * independent of the compilation flags.
- */
-static INLINE_KEYWORD UNUSED_ATTR
-void _force_has_format_string(const char *format, ...) {
- (void)format;
-}
-
-/*
- * Ignore: this is an internal helper.
- *
- * We want to force this function invocation to be syntactically correct, but
- * we don't want to force runtime evaluation of its arguments.
- */
-#define _FORCE_HAS_FORMAT_STRING(...) \
- if (0) { \
- _force_has_format_string(__VA_ARGS__); \
- }
-
-/*
- * Return the specified error if the condition evaluates to true.
- *
- * In debug modes, prints additional information.
- * In order to do that (particularly, printing the conditional that failed),
- * this can't just wrap RETURN_ERROR().
- */
-#define RETURN_ERROR_IF(cond, err, ...) \
- if (cond) { \
- RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
- __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
- _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
- RAWLOG(3, ": " __VA_ARGS__); \
- RAWLOG(3, "\n"); \
- return ERROR(err); \
- }
-
-/*
- * Unconditionally return the specified error.
- *
- * In debug modes, prints additional information.
- */
-#define RETURN_ERROR(err, ...) \
- do { \
- RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
- __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
- _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
- RAWLOG(3, ": " __VA_ARGS__); \
- RAWLOG(3, "\n"); \
- return ERROR(err); \
- } while(0);
-
-/*
- * If the provided expression evaluates to an error code, returns that error code.
- *
- * In debug modes, prints additional information.
- */
-#define FORWARD_IF_ERROR(err, ...) \
- do { \
- size_t const err_code = (err); \
- if (ERR_isError(err_code)) { \
- RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
- __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
- _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
- RAWLOG(3, ": " __VA_ARGS__); \
- RAWLOG(3, "\n"); \
- return err_code; \
- } \
- } while(0);
+#define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))
/*-*************************************
@@ -130,7 +56,6 @@
#define ZSTD_OPT_NUM (1<<12)
#define ZSTD_REP_NUM 3 /* number of repcodes */
-#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
#define KB *(1 <<10)
@@ -157,16 +82,17 @@
#define ZSTD_FRAMECHECKSUMSIZE 4
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
-#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */
+#define MIN_LITERALS_FOR_4_STREAMS 6
-#define HufLog 12
-typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } SymbolEncodingType_e;
#define LONGNBSEQ 0x7F00
#define MINMATCH 3
#define Litbits 8
+#define LitHufLog 11
#define MaxLit ((1<= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
-
if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
/* Handle short offset copies. */
do {
- COPY8(op, ip)
+ COPY8(op, ip);
} while (op < oend);
} else {
assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
@@ -282,12 +227,6 @@
* one COPY16() in the first call. Then, do two calls per loop since
* at that point it is more likely to have a high trip count.
*/
-#ifdef __aarch64__
- do {
- COPY16(op, ip);
- }
- while (op < oend);
-#else
ZSTD_copy16(op, ip);
if (16 >= length) return;
op += 16;
@@ -297,7 +236,6 @@
COPY16(op, ip);
}
while (op < oend);
-#endif
}
}
@@ -330,55 +268,6 @@
/*-*******************************************
* Private declarations
*********************************************/
-typedef struct seqDef_s {
- U32 offset; /* Offset code of the sequence */
- U16 litLength;
- U16 matchLength;
-} seqDef;
-
-typedef struct {
- seqDef* sequencesStart;
- seqDef* sequences; /* ptr to end of sequences */
- BYTE* litStart;
- BYTE* lit; /* ptr to end of literals */
- BYTE* llCode;
- BYTE* mlCode;
- BYTE* ofCode;
- size_t maxNbSeq;
- size_t maxNbLit;
-
- /* longLengthPos and longLengthID to allow us to represent either a single litLength or matchLength
- * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
- * the existing value of the litLength or matchLength by 0x10000.
- */
- U32 longLengthID; /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */
- U32 longLengthPos; /* Index of the sequence to apply long length modification to */
-} seqStore_t;
-
-typedef struct {
- U32 litLength;
- U32 matchLength;
-} ZSTD_sequenceLength;
-
-/*
- * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
- * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength.
- */
-MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
-{
- ZSTD_sequenceLength seqLen;
- seqLen.litLength = seq->litLength;
- seqLen.matchLength = seq->matchLength + MINMATCH;
- if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
- if (seqStore->longLengthID == 1) {
- seqLen.litLength += 0xFFFF;
- }
- if (seqStore->longLengthID == 2) {
- seqLen.matchLength += 0xFFFF;
- }
- }
- return seqLen;
-}
/*
* Contains the compressed frame size and an upper-bound for the decompressed frame size.
@@ -387,39 +276,11 @@
* `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
*/
typedef struct {
+ size_t nbBlocks;
size_t compressedSize;
unsigned long long decompressedBound;
} ZSTD_frameSizeInfo; /* decompress & legacy */
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
-void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
-
-/* custom memory allocation functions */
-void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
-void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
-void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
-
-
-MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
-{
- assert(val != 0);
- {
-# if (__GNUC__ >= 3) /* GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
-# else /* Software version */
- static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
- U32 v = val;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
-# endif
- }
-}
-
-
/* ZSTD_invalidateRepCodes() :
* ensures next compression will not use repcodes from previous block.
* Note : only works with regular variant;
@@ -435,16 +296,23 @@
/*! ZSTD_getcBlockSize() :
* Provides the size of compressed block from block header `src` */
-/* Used by: decompress, fullbench (does not get its definition from here) */
+/* Used by: decompress, fullbench */
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
blockProperties_t* bpPtr);
/*! ZSTD_decodeSeqHeaders() :
* decode sequence header from src */
-/* Used by: decompress, fullbench (does not get its definition from here) */
+/* Used by: zstd_decompress_block, fullbench */
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
const void* src, size_t srcSize);
-
+/*
+ * @returns true iff the CPU supports dynamic BMI2 dispatch.
+ */
+MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
+{
+ ZSTD_cpuid_t cpuid = ZSTD_cpuid();
+ return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
+}
#endif /* ZSTD_CCOMMON_H_MODULE */
Index: lib/zstd/compress/zstd_ldm.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
--- a/lib/zstd/compress/zstd_ldm.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_ldm.c (date 1740124241481)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,7 +17,7 @@
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
#include "zstd_ldm_geartab.h"
-#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_BUCKET_SIZE_LOG 4
#define LDM_MIN_MATCH_LENGTH 64
#define LDM_HASH_RLOG 7
@@ -57,6 +58,33 @@
}
}
+/* ZSTD_ldm_gear_reset()
+ * Feeds [data, data + minMatchLength) into the hash without registering any
+ * splits. This effectively resets the hash state. This is used when skipping
+ * over data, either at the beginning of a block, or skipping sections.
+ */
+static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
+ BYTE const* data, size_t minMatchLength)
+{
+ U64 hash = state->rolling;
+ size_t n = 0;
+
+#define GEAR_ITER_ONCE() do { \
+ hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
+ n += 1; \
+ } while (0)
+ while (n + 3 < minMatchLength) {
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ }
+ while (n < minMatchLength) {
+ GEAR_ITER_ONCE();
+ }
+#undef GEAR_ITER_ONCE
+}
+
/* ZSTD_ldm_gear_feed():
*
* Registers in the splits array all the split points found in the first
@@ -106,21 +134,35 @@
}
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
- ZSTD_compressionParameters const* cParams)
+ const ZSTD_compressionParameters* cParams)
{
params->windowLog = cParams->windowLog;
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
- if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
- if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
- if (params->hashLog == 0) {
- params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
- assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ if (params->hashRateLog == 0) {
+ if (params->hashLog > 0) {
+ /* if params->hashLog is set, derive hashRateLog from it */
+ assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ if (params->windowLog > params->hashLog) {
+ params->hashRateLog = params->windowLog - params->hashLog;
+ }
+ } else {
+ assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9);
+ /* mapping from [fast, rate7] to [btultra2, rate4] */
+ params->hashRateLog = 7 - (cParams->strategy/3);
+ }
+ }
+ if (params->hashLog == 0) {
+ params->hashLog = BOUNDED(ZSTD_HASHLOG_MIN, params->windowLog - params->hashRateLog, ZSTD_HASHLOG_MAX);
}
- if (params->hashRateLog == 0) {
- params->hashRateLog = params->windowLog < params->hashLog
- ? 0
- : params->windowLog - params->hashLog;
+ if (params->minMatchLength == 0) {
+ params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (cParams->strategy >= ZSTD_btultra)
+ params->minMatchLength /= 2;
+ }
+ if (params->bucketSizeLog==0) {
+ assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9);
+ params->bucketSizeLog = BOUNDED(LDM_BUCKET_SIZE_LOG, (U32)cParams->strategy, ZSTD_LDM_BUCKETSIZELOG_MAX);
}
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
}
@@ -132,33 +174,33 @@
size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
+ ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
- return params.enableLdm ? totalSize : 0;
+ return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
}
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
{
- return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
+ return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
}
/* ZSTD_ldm_getBucket() :
* Returns a pointer to the start of the bucket associated with hash. */
static ldmEntry_t* ZSTD_ldm_getBucket(
- ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+ const ldmState_t* ldmState, size_t hash, U32 const bucketSizeLog)
{
- return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+ return ldmState->hashTable + (hash << bucketSizeLog);
}
/* ZSTD_ldm_insertEntry() :
* Insert the entry with corresponding hash into the hash table */
static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
size_t const hash, const ldmEntry_t entry,
- ldmParams_t const ldmParams)
+ U32 const bucketSizeLog)
{
BYTE* const pOffset = ldmState->bucketOffsets + hash;
unsigned const offset = *pOffset;
- *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
- *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
+ *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry;
+ *pOffset = (BYTE)((offset + 1) & ((1u << bucketSizeLog) - 1));
}
@@ -207,7 +249,7 @@
*
* The tables for the other strategies are filled within their
* block compressors. */
-static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+static size_t ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms,
void const* end)
{
const BYTE* const iend = (const BYTE*)end;
@@ -215,11 +257,15 @@
switch(ms->cParams.strategy)
{
case ZSTD_fast:
- ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
+ ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
break;
case ZSTD_dfast:
- ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
+#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+ ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
+#else
+ assert(0); /* shouldn't be called: cparams should've been adjusted. */
+#endif
break;
case ZSTD_greedy:
@@ -242,7 +288,8 @@
const BYTE* iend, ldmParams_t const* params)
{
U32 const minMatchLength = params->minMatchLength;
- U32 const hBits = params->hashLog - params->bucketSizeLog;
+ U32 const bucketSizeLog = params->bucketSizeLog;
+ U32 const hBits = params->hashLog - bucketSizeLog;
BYTE const* const base = ldmState->window.base;
BYTE const* const istart = ip;
ldmRollingHashState_t hashState;
@@ -257,7 +304,7 @@
unsigned n;
numSplits = 0;
- hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, (size_t)(iend - ip), splits, &numSplits);
for (n = 0; n < numSplits; n++) {
if (ip + splits[n] >= istart + minMatchLength) {
@@ -268,7 +315,7 @@
entry.offset = (U32)(split - base);
entry.checksum = (U32)(xxhash >> 32);
- ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, entry, params->bucketSizeLog);
}
}
@@ -282,7 +329,7 @@
* Sets cctx->nextToUpdate to a position corresponding closer to anchor
* if it is far way
* (after a long match, only update tables a limited amount). */
-static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, const BYTE* anchor)
{
U32 const curr = (U32)(anchor - ms->window.base);
if (curr > ms->nextToUpdate + 1024) {
@@ -291,8 +338,10 @@
}
}
-static size_t ZSTD_ldm_generateSequences_internal(
- ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_ldm_generateSequences_internal(
+ ldmState_t* ldmState, RawSeqStore_t* rawSeqStore,
ldmParams_t const* params, void const* src, size_t srcSize)
{
/* LDM parameters */
@@ -327,16 +376,8 @@
/* Initialize the rolling hash state with the first minMatchLength bytes */
ZSTD_ldm_gear_init(&hashState, params);
- {
- size_t n = 0;
-
- while (n < minMatchLength) {
- numSplits = 0;
- n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n,
- splits, &numSplits);
- }
- ip += minMatchLength;
- }
+ ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
+ ip += minMatchLength;
while (ip < ilimit) {
size_t hashed;
@@ -354,13 +395,14 @@
candidates[n].split = split;
candidates[n].hash = hash;
candidates[n].checksum = (U32)(xxhash >> 32);
- candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
+ candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, params->bucketSizeLog);
PREFETCH_L1(candidates[n].bucket);
}
for (n = 0; n < numSplits; n++) {
size_t forwardMatchLength = 0, backwardMatchLength = 0,
bestMatchLength = 0, mLength;
+ U32 offset;
BYTE const* const split = candidates[n].split;
U32 const checksum = candidates[n].checksum;
U32 const hash = candidates[n].hash;
@@ -376,7 +418,7 @@
* the previous one, we merely register it in the hash table and
* move on */
if (split < anchor) {
- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
continue;
}
@@ -423,14 +465,14 @@
/* No match found -- insert an entry into the hash table
* and process the next candidate match */
if (bestEntry == NULL) {
- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
continue;
}
/* Match found */
+ offset = (U32)(split - base) - bestEntry->offset;
mLength = forwardMatchLength + backwardMatchLength;
{
- U32 const offset = (U32)(split - base) - bestEntry->offset;
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
/* Out of sequence storage */
@@ -444,9 +486,24 @@
/* Insert the current entry into the hash table --- it must be
* done after the previous block to avoid clobbering bestEntry */
- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
anchor = split + forwardMatchLength;
+
+ /* If we find a match that ends after the data that we've hashed
+ * then we have a repeating, overlapping, pattern. E.g. all zeros.
+ * If one repetition of the pattern matches our `stopMask` then all
+ * repetitions will. We don't need to insert them all into out table,
+ * only the first one. So skip over overlapping matches.
+ * This is a major speed boost (20x) for compressing a single byte
+ * repeated, when that byte ends up in the table.
+ */
+ if (anchor > ip + hashed) {
+ ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
+ /* Continue the outer loop at anchor (ip + hashed == anchor). */
+ ip = anchor - hashed;
+ break;
+ }
}
ip += hashed;
@@ -468,7 +525,7 @@
}
size_t ZSTD_ldm_generateSequences(
- ldmState_t* ldmState, rawSeqStore_t* sequences,
+ ldmState_t* ldmState, RawSeqStore_t* sequences,
ldmParams_t const* params, void const* src, size_t srcSize)
{
U32 const maxDist = 1U << params->windowLog;
@@ -500,7 +557,7 @@
assert(chunkStart < iend);
/* 1. Perform overflow correction if necessary. */
- if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
+ if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
U32 const ldmHSize = 1U << params->hashLog;
U32 const correction = ZSTD_window_correctOverflow(
&ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
@@ -514,7 +571,7 @@
* the window through early invalidation.
* TODO: * Test the chunk size.
* * Try invalidation after the sequence generation and test the
- * the offset against maxDist directly.
+ * offset against maxDist directly.
*
* NOTE: Because of dictionaries + sequence splitting we MUST make sure
* that any offset used is valid at the END of the sequence, since it may
@@ -544,7 +601,9 @@
return 0;
}
-void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
+void
+ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
+{
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
if (srcSize <= seq->litLength) {
@@ -579,7 +638,7 @@
* Returns the current sequence to handle, or if the rest of the block should
* be literals, it returns a sequence with offset == 0.
*/
-static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
+static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore,
U32 const remaining, U32 const minMatch)
{
rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
@@ -603,7 +662,7 @@
return sequence;
}
-void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) {
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
@@ -620,14 +679,15 @@
}
}
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_ParamSwitch_e useRowMatchFinder,
void const* src, size_t srcSize)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
unsigned const minMatch = cParams->minMatch;
- ZSTD_blockCompressor const blockCompressor =
- ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
+ ZSTD_BlockCompressor_f const blockCompressor =
+ ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
BYTE const* const iend = istart + srcSize;
@@ -651,7 +711,6 @@
/* maybeSplitSequence updates rawSeqStore->pos */
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
(U32)(iend - ip), minMatch);
- int i;
/* End signal */
if (sequence.offset == 0)
break;
@@ -664,6 +723,7 @@
/* Run the block compressor */
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
{
+ int i;
size_t const newLitLength =
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
ip += sequence.litLength;
@@ -673,8 +733,8 @@
rep[0] = sequence.offset;
/* Store the sequence */
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
- sequence.offset + ZSTD_REP_MOVE,
- sequence.matchLength - MINMATCH);
+ OFFSET_TO_OFFBASE(sequence.offset),
+ sequence.matchLength);
ip += sequence.matchLength;
}
}
Index: lib/zstd/compress/zstd_compress_sequences.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c
--- a/lib/zstd/compress/zstd_compress_sequences.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_compress_sequences.c (date 1740124241420)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -58,7 +59,7 @@
{
/* Heuristic: This should cover most blocks <= 16K and
* start to fade out after 16K to about 32K depending on
- * comprssibility.
+ * compressibility.
*/
return nbSeq >= 2048;
}
@@ -85,6 +86,8 @@
{
unsigned cost = 0;
unsigned s;
+
+ assert(total > 0);
for (s = 0; s <= max; ++s) {
unsigned norm = (unsigned)((256 * count[s]) / total);
if (count[s] != 0 && norm == 0)
@@ -151,20 +154,20 @@
return cost >> 8;
}
-symbolEncodingType_e
+SymbolEncodingType_e
ZSTD_selectEncodingType(
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
FSE_CTable const* prevCTable,
short const* defaultNorm, U32 defaultNormLog,
- ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_DefaultPolicy_e const isDefaultAllowed,
ZSTD_strategy const strategy)
{
ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
if (mostFrequent == nbSeq) {
*repeatMode = FSE_repeat_none;
if (isDefaultAllowed && nbSeq <= 2) {
- /* Prefer set_basic over set_rle when there are 2 or less symbols,
+ /* Prefer set_basic over set_rle when there are 2 or fewer symbols,
* since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
* If basic encoding isn't possible, always choose RLE.
*/
@@ -239,7 +242,7 @@
size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity,
- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type,
unsigned* count, U32 max,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
@@ -273,10 +276,11 @@
assert(nbSeq_1 > 1);
assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
(void)entropyWorkspaceSize;
- FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
- { size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog); /* overflow protected */
+ FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
+ assert(oend >= op);
+ { size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
- FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
return NCountSize;
}
}
@@ -290,7 +294,7 @@
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
{
BIT_CStream_t blockStream;
FSE_CState_t stateMatchLength;
@@ -310,19 +314,19 @@
FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
if (MEM_32bits()) BIT_flushBits(&blockStream);
- BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
if (MEM_32bits()) BIT_flushBits(&blockStream);
if (longOffsets) {
U32 const ofBits = ofCodeTable[nbSeq-1];
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
BIT_flushBits(&blockStream);
}
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
ofBits - extraBits);
} else {
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
}
BIT_flushBits(&blockStream);
@@ -336,8 +340,8 @@
U32 const mlBits = ML_bits[mlCode];
DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
(unsigned)sequences[n].litLength,
- (unsigned)sequences[n].matchLength + MINMATCH,
- (unsigned)sequences[n].offset);
+ (unsigned)sequences[n].mlBase + MINMATCH,
+ (unsigned)sequences[n].offBase);
/* 32b*/ /* 64b*/
/* (7)*/ /* (7)*/
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
@@ -348,18 +352,18 @@
BIT_flushBits(&blockStream); /* (7)*/
BIT_addBits(&blockStream, sequences[n].litLength, llBits);
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
- BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
+ BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
if (longOffsets) {
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
- BIT_addBits(&blockStream, sequences[n].offset, extraBits);
+ BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
BIT_flushBits(&blockStream); /* (7)*/
}
- BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
+ BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
ofBits - extraBits); /* 31 */
} else {
- BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
+ BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
}
BIT_flushBits(&blockStream); /* (7)*/
DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
@@ -384,7 +388,7 @@
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
{
return ZSTD_encodeSequences_body(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
@@ -396,13 +400,13 @@
#if DYNAMIC_BMI2
-static TARGET_ATTRIBUTE("bmi2") size_t
+static BMI2_TARGET_ATTRIBUTE size_t
ZSTD_encodeSequences_bmi2(
void* dst, size_t dstCapacity,
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
{
return ZSTD_encodeSequences_body(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
@@ -418,7 +422,7 @@
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
{
DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
#if DYNAMIC_BMI2
Index: lib/zstd/compress/zstd_compress_sequences.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h
--- a/lib/zstd/compress/zstd_compress_sequences.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_compress_sequences.h (date 1740124241425)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,26 +12,27 @@
#ifndef ZSTD_COMPRESS_SEQUENCES_H
#define ZSTD_COMPRESS_SEQUENCES_H
+#include "zstd_compress_internal.h" /* SeqDef */
#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
-#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
+#include "../common/zstd_internal.h" /* SymbolEncodingType_e, ZSTD_strategy */
typedef enum {
ZSTD_defaultDisallowed = 0,
ZSTD_defaultAllowed = 1
-} ZSTD_defaultPolicy_e;
+} ZSTD_DefaultPolicy_e;
-symbolEncodingType_e
+SymbolEncodingType_e
ZSTD_selectEncodingType(
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
FSE_CTable const* prevCTable,
short const* defaultNorm, U32 defaultNormLog,
- ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_DefaultPolicy_e const isDefaultAllowed,
ZSTD_strategy const strategy);
size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity,
- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type,
unsigned* count, U32 max,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
@@ -42,7 +44,7 @@
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
+ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
size_t ZSTD_fseBitCost(
FSE_CTable const* ctable,
Index: lib/zstd/compress/zstd_compress.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
--- a/lib/zstd/compress/zstd_compress.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_compress.c (date 1740124241392)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,13 +12,13 @@
/*-*************************************
* Dependencies
***************************************/
+#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
-#include "../common/cpu.h"
#include "../common/mem.h"
+#include "../common/error_private.h"
#include "hist.h" /* HIST_countFast_wksp */
#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "zstd_compress_internal.h"
#include "zstd_compress_sequences.h"
@@ -28,6 +29,7 @@
#include "zstd_opt.h"
#include "zstd_ldm.h"
#include "zstd_compress_superblock.h"
+#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_rotateRight_U64 */
/* ***************************************************************
* Tuning parameters
@@ -39,19 +41,34 @@
* Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
*/
+/*!
+ * ZSTD_HASHLOG3_MAX :
+ * Maximum size of the hash table dedicated to find 3-bytes matches,
+ * in log format, aka 17 => 1 << 17 == 128Ki positions.
+ * This structure is only used in zstd_opt.
+ * Since allocation is centralized for all strategies, it has to be known here.
+ * The actual (selected) size of the hash table is then stored in ZSTD_MatchState_t.hashLog3,
+ * so that zstd_opt.c doesn't need to know about this constant.
+ */
+#ifndef ZSTD_HASHLOG3_MAX
+# define ZSTD_HASHLOG3_MAX 17
+#endif
/*-*************************************
* Helper functions
***************************************/
/* ZSTD_compressBound()
- * Note that the result from this function is only compatible with the "normal"
- * full-block strategy.
- * When there are a lot of small blocks due to frequent flush in streaming mode
- * the overhead of headers can make the compressed data to be larger than the
- * return value of ZSTD_compressBound().
+ * Note that the result from this function is only valid for
+ * the one-pass compression functions.
+ * When employing the streaming mode,
+ * if flushes are frequently altering the size of blocks,
+ * the overhead from block headers can make the compressed data larger
+ * than the return value of ZSTD_compressBound().
*/
size_t ZSTD_compressBound(size_t srcSize) {
- return ZSTD_COMPRESSBOUND(srcSize);
+ size_t const r = ZSTD_COMPRESSBOUND(srcSize);
+ if (r==0) return ERROR(srcSize_wrong);
+ return r;
}
@@ -64,11 +81,15 @@
ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
ZSTD_cwksp workspace;
- ZSTD_matchState_t matchState;
+ ZSTD_MatchState_t matchState;
ZSTD_compressedBlockState_t cBlockState;
ZSTD_customMem customMem;
U32 dictID;
int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
+ ZSTD_ParamSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
+ * row-based matchfinder. Unless the cdict is reloaded, we will use
+ * the same greedy/lazy matchfinder at compression time.
+ */
}; /* typedef'd to ZSTD_CDict within "zstd.h" */
ZSTD_CCtx* ZSTD_createCCtx(void)
@@ -81,7 +102,7 @@
assert(cctx != NULL);
ZSTD_memset(cctx, 0, sizeof(*cctx));
cctx->customMem = memManager;
- cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ cctx->bmi2 = ZSTD_cpuSupportsBmi2();
{ size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
assert(!ZSTD_isError(err));
(void)err;
@@ -115,11 +136,12 @@
ZSTD_cwksp_move(&cctx->workspace, &ws);
cctx->staticSize = workspaceSize;
- /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
- if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
+ /* statically sized space. tmpWorkspace never moves (but prev/next block swap places) */
+ if (!ZSTD_cwksp_check_available(&cctx->workspace, TMP_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
- cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
+ cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, TMP_WORKSPACE_SIZE);
+ cctx->tmpWkspSize = TMP_WORKSPACE_SIZE;
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
return cctx;
}
@@ -153,15 +175,13 @@
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
{
+ DEBUGLOG(3, "ZSTD_freeCCtx (address: %p)", (void*)cctx);
if (cctx==NULL) return 0; /* support free on NULL */
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"not compatible with static CCtx");
- {
- int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
+ { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
ZSTD_freeCCtxContent(cctx);
- if (!cctxInWorkspace) {
- ZSTD_customFree(cctx, cctx->customMem);
- }
+ if (!cctxInWorkspace) ZSTD_customFree(cctx, cctx->customMem);
}
return 0;
}
@@ -190,14 +210,85 @@
}
/* private API call, for dictBuilder only */
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+
+/* Returns true if the strategy supports using a row based matchfinder */
+static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
+ return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2);
+}
-/* Returns 1 if compression parameters are such that we should
+/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
+ * for this compression.
+ */
+static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_ParamSwitch_e mode) {
+ assert(mode != ZSTD_ps_auto);
+ return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);
+}
+
+/* Returns row matchfinder usage given an initial mode and cParams */
+static ZSTD_ParamSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_ParamSwitch_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+ if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
+ mode = ZSTD_ps_disable;
+ if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
+ if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
+ return mode;
+}
+
+/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */
+static ZSTD_ParamSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_ParamSwitch_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+ if (mode != ZSTD_ps_auto) return mode;
+ return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable;
+}
+
+/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
+static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
+ const U32 forDDSDict) {
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+ /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
+ * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder.
+ */
+ return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
+}
+
+/* Returns ZSTD_ps_enable if compression parameters are such that we should
* enable long distance matching (wlog >= 27, strategy >= btopt).
- * Returns 0 otherwise.
+ * Returns ZSTD_ps_disable otherwise.
*/
-static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
- return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
+static ZSTD_ParamSwitch_e ZSTD_resolveEnableLdm(ZSTD_ParamSwitch_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+ if (mode != ZSTD_ps_auto) return mode;
+ return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
+}
+
+static int ZSTD_resolveExternalSequenceValidation(int mode) {
+ return mode;
+}
+
+/* Resolves maxBlockSize to the default if no value is present. */
+static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) {
+ if (maxBlockSize == 0) {
+ return ZSTD_BLOCKSIZE_MAX;
+ } else {
+ return maxBlockSize;
+ }
+}
+
+static ZSTD_ParamSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_ParamSwitch_e value, int cLevel) {
+ if (value != ZSTD_ps_auto) return value;
+ if (cLevel < 10) {
+ return ZSTD_ps_disable;
+ } else {
+ return ZSTD_ps_enable;
+ }
+}
+
+/* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged.
+ * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */
+static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) {
+ return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast;
}
static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
@@ -208,15 +299,19 @@
ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
cctxParams.cParams = cParams;
- if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
- DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
- cctxParams.ldmParams.enableLdm = 1;
- /* LDM is enabled by default for optimal parser and window size >= 128MB */
+ /* Adjust advanced params according to cParams */
+ cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams);
+ if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) {
ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
assert(cctxParams.ldmParams.hashRateLog < 32);
}
-
+ cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams);
+ cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
+ cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences);
+ cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize);
+ cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes,
+ cctxParams.compressionLevel);
assert(!ZSTD_checkCParams(cParams));
return cctxParams;
}
@@ -262,10 +357,13 @@
#define ZSTD_NO_CLEVEL 0
/*
- * Initializes the cctxParams from params and compressionLevel.
+ * Initializes `cctxParams` from `params` and `compressionLevel`.
* @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
*/
-static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
+static void
+ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams,
+ const ZSTD_parameters* params,
+ int compressionLevel)
{
assert(!ZSTD_checkCParams(params->cParams));
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
@@ -275,6 +373,14 @@
* But, set it for tracing anyway.
*/
cctxParams->compressionLevel = compressionLevel;
+ cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams);
+ cctxParams->postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, ¶ms->cParams);
+ cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams);
+ cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences);
+ cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize);
+ cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel);
+ DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
+ cctxParams->useRowMatchFinder, cctxParams->postBlockSplitter, cctxParams->ldmParams.enableLdm);
}
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
@@ -287,7 +393,7 @@
/*
* Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
- * @param param Validated zstd parameters.
+ * @param params Validated zstd parameters.
*/
static void ZSTD_CCtxParams_setZstdParams(
ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
@@ -383,8 +489,8 @@
return bounds;
case ZSTD_c_enableLongDistanceMatching:
- bounds.lowerBound = 0;
- bounds.upperBound = 1;
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
case ZSTD_c_ldmHashLog:
@@ -431,9 +537,9 @@
return bounds;
case ZSTD_c_literalCompressionMode:
- ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
- bounds.lowerBound = ZSTD_lcm_auto;
- bounds.upperBound = ZSTD_lcm_uncompressed;
+ ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable);
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
case ZSTD_c_targetCBlockSize:
@@ -462,6 +568,46 @@
bounds.upperBound = 1;
return bounds;
+ case ZSTD_c_splitAfterSequences:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_blockSplitterLevel:
+ bounds.lowerBound = 0;
+ bounds.upperBound = ZSTD_BLOCKSPLITTER_LEVEL_MAX;
+ return bounds;
+
+ case ZSTD_c_useRowMatchFinder:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_deterministicRefPrefix:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_prefetchCDictTables:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_enableSeqProducerFallback:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_maxBlockSize:
+ bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN;
+ bounds.upperBound = ZSTD_BLOCKSIZE_MAX;
+ return bounds;
+
+ case ZSTD_c_repcodeResolution:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
default:
bounds.error = ERROR(parameter_unsupported);
return bounds;
@@ -480,10 +626,11 @@
return 0;
}
-#define BOUNDCHECK(cParam, val) { \
- RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
- parameter_outOfBound, "Param out of bounds"); \
-}
+#define BOUNDCHECK(cParam, val) \
+ do { \
+ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+ parameter_outOfBound, "Param out of bounds"); \
+ } while (0)
static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
@@ -497,6 +644,7 @@
case ZSTD_c_minMatch:
case ZSTD_c_targetLength:
case ZSTD_c_strategy:
+ case ZSTD_c_blockSplitterLevel:
return 1;
case ZSTD_c_format:
@@ -523,6 +671,13 @@
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
case ZSTD_c_validateSequences:
+ case ZSTD_c_splitAfterSequences:
+ case ZSTD_c_useRowMatchFinder:
+ case ZSTD_c_deterministicRefPrefix:
+ case ZSTD_c_prefetchCDictTables:
+ case ZSTD_c_enableSeqProducerFallback:
+ case ZSTD_c_maxBlockSize:
+ case ZSTD_c_repcodeResolution:
default:
return 0;
}
@@ -535,7 +690,7 @@
if (ZSTD_isUpdateAuthorized(param)) {
cctx->cParamsChanged = 1;
} else {
- RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
+ RETURN_ERROR(stage_wrong, "can only set params in cctx init stage");
} }
switch(param)
@@ -575,6 +730,14 @@
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
case ZSTD_c_validateSequences:
+ case ZSTD_c_splitAfterSequences:
+ case ZSTD_c_blockSplitterLevel:
+ case ZSTD_c_useRowMatchFinder:
+ case ZSTD_c_deterministicRefPrefix:
+ case ZSTD_c_prefetchCDictTables:
+ case ZSTD_c_enableSeqProducerFallback:
+ case ZSTD_c_maxBlockSize:
+ case ZSTD_c_repcodeResolution:
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
@@ -630,12 +793,12 @@
case ZSTD_c_minMatch :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_minMatch, value);
- CCtxParams->cParams.minMatch = value;
+ CCtxParams->cParams.minMatch = (U32)value;
return CCtxParams->cParams.minMatch;
case ZSTD_c_targetLength :
BOUNDCHECK(ZSTD_c_targetLength, value);
- CCtxParams->cParams.targetLength = value;
+ CCtxParams->cParams.targetLength = (U32)value;
return CCtxParams->cParams.targetLength;
case ZSTD_c_strategy :
@@ -648,12 +811,12 @@
/* Content size written in frame header _when known_ (default:1) */
DEBUGLOG(4, "set content size flag = %u", (value!=0));
CCtxParams->fParams.contentSizeFlag = value != 0;
- return CCtxParams->fParams.contentSizeFlag;
+ return (size_t)CCtxParams->fParams.contentSizeFlag;
case ZSTD_c_checksumFlag :
/* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
CCtxParams->fParams.checksumFlag = value != 0;
- return CCtxParams->fParams.checksumFlag;
+ return (size_t)CCtxParams->fParams.checksumFlag;
case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
@@ -662,18 +825,18 @@
case ZSTD_c_forceMaxWindow :
CCtxParams->forceWindow = (value != 0);
- return CCtxParams->forceWindow;
+ return (size_t)CCtxParams->forceWindow;
case ZSTD_c_forceAttachDict : {
const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
- BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
+ BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref);
CCtxParams->attachDictPref = pref;
return CCtxParams->attachDictPref;
}
case ZSTD_c_literalCompressionMode : {
- const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
- BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
+ const ZSTD_ParamSwitch_e lcm = (ZSTD_ParamSwitch_e)value;
+ BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm);
CCtxParams->literalCompressionMode = lcm;
return CCtxParams->literalCompressionMode;
}
@@ -696,47 +859,50 @@
case ZSTD_c_enableDedicatedDictSearch :
CCtxParams->enableDedicatedDictSearch = (value!=0);
- return CCtxParams->enableDedicatedDictSearch;
+ return (size_t)CCtxParams->enableDedicatedDictSearch;
case ZSTD_c_enableLongDistanceMatching :
- CCtxParams->ldmParams.enableLdm = (value!=0);
+ BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value);
+ CCtxParams->ldmParams.enableLdm = (ZSTD_ParamSwitch_e)value;
return CCtxParams->ldmParams.enableLdm;
case ZSTD_c_ldmHashLog :
if (value!=0) /* 0 ==> auto */
BOUNDCHECK(ZSTD_c_ldmHashLog, value);
- CCtxParams->ldmParams.hashLog = value;
+ CCtxParams->ldmParams.hashLog = (U32)value;
return CCtxParams->ldmParams.hashLog;
case ZSTD_c_ldmMinMatch :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
- CCtxParams->ldmParams.minMatchLength = value;
+ CCtxParams->ldmParams.minMatchLength = (U32)value;
return CCtxParams->ldmParams.minMatchLength;
case ZSTD_c_ldmBucketSizeLog :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
- CCtxParams->ldmParams.bucketSizeLog = value;
+ CCtxParams->ldmParams.bucketSizeLog = (U32)value;
return CCtxParams->ldmParams.bucketSizeLog;
case ZSTD_c_ldmHashRateLog :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
- CCtxParams->ldmParams.hashRateLog = value;
+ CCtxParams->ldmParams.hashRateLog = (U32)value;
return CCtxParams->ldmParams.hashRateLog;
case ZSTD_c_targetCBlockSize :
- if (value!=0) /* 0 ==> default */
+ if (value!=0) { /* 0 ==> default */
+ value = MAX(value, ZSTD_TARGETCBLOCKSIZE_MIN);
BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
- CCtxParams->targetCBlockSize = value;
+ }
+ CCtxParams->targetCBlockSize = (U32)value;
return CCtxParams->targetCBlockSize;
case ZSTD_c_srcSizeHint :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_srcSizeHint, value);
CCtxParams->srcSizeHint = value;
- return CCtxParams->srcSizeHint;
+ return (size_t)CCtxParams->srcSizeHint;
case ZSTD_c_stableInBuffer:
BOUNDCHECK(ZSTD_c_stableInBuffer, value);
@@ -750,13 +916,55 @@
case ZSTD_c_blockDelimiters:
BOUNDCHECK(ZSTD_c_blockDelimiters, value);
- CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
+ CCtxParams->blockDelimiters = (ZSTD_SequenceFormat_e)value;
return CCtxParams->blockDelimiters;
case ZSTD_c_validateSequences:
BOUNDCHECK(ZSTD_c_validateSequences, value);
CCtxParams->validateSequences = value;
- return CCtxParams->validateSequences;
+ return (size_t)CCtxParams->validateSequences;
+
+ case ZSTD_c_splitAfterSequences:
+ BOUNDCHECK(ZSTD_c_splitAfterSequences, value);
+ CCtxParams->postBlockSplitter = (ZSTD_ParamSwitch_e)value;
+ return CCtxParams->postBlockSplitter;
+
+ case ZSTD_c_blockSplitterLevel:
+ BOUNDCHECK(ZSTD_c_blockSplitterLevel, value);
+ CCtxParams->preBlockSplitter_level = value;
+ return (size_t)CCtxParams->preBlockSplitter_level;
+
+ case ZSTD_c_useRowMatchFinder:
+ BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
+ CCtxParams->useRowMatchFinder = (ZSTD_ParamSwitch_e)value;
+ return CCtxParams->useRowMatchFinder;
+
+ case ZSTD_c_deterministicRefPrefix:
+ BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
+ CCtxParams->deterministicRefPrefix = !!value;
+ return (size_t)CCtxParams->deterministicRefPrefix;
+
+ case ZSTD_c_prefetchCDictTables:
+ BOUNDCHECK(ZSTD_c_prefetchCDictTables, value);
+ CCtxParams->prefetchCDictTables = (ZSTD_ParamSwitch_e)value;
+ return CCtxParams->prefetchCDictTables;
+
+ case ZSTD_c_enableSeqProducerFallback:
+ BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value);
+ CCtxParams->enableMatchFinderFallback = value;
+ return (size_t)CCtxParams->enableMatchFinderFallback;
+
+ case ZSTD_c_maxBlockSize:
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_maxBlockSize, value);
+ assert(value>=0);
+ CCtxParams->maxBlockSize = (size_t)value;
+ return CCtxParams->maxBlockSize;
+
+ case ZSTD_c_repcodeResolution:
+ BOUNDCHECK(ZSTD_c_repcodeResolution, value);
+ CCtxParams->searchForExternalRepcodes = (ZSTD_ParamSwitch_e)value;
+ return CCtxParams->searchForExternalRepcodes;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
@@ -773,7 +981,7 @@
switch(param)
{
case ZSTD_c_format :
- *value = CCtxParams->format;
+ *value = (int)CCtxParams->format;
break;
case ZSTD_c_compressionLevel :
*value = CCtxParams->compressionLevel;
@@ -788,16 +996,16 @@
*value = (int)CCtxParams->cParams.chainLog;
break;
case ZSTD_c_searchLog :
- *value = CCtxParams->cParams.searchLog;
+ *value = (int)CCtxParams->cParams.searchLog;
break;
case ZSTD_c_minMatch :
- *value = CCtxParams->cParams.minMatch;
+ *value = (int)CCtxParams->cParams.minMatch;
break;
case ZSTD_c_targetLength :
- *value = CCtxParams->cParams.targetLength;
+ *value = (int)CCtxParams->cParams.targetLength;
break;
case ZSTD_c_strategy :
- *value = (unsigned)CCtxParams->cParams.strategy;
+ *value = (int)CCtxParams->cParams.strategy;
break;
case ZSTD_c_contentSizeFlag :
*value = CCtxParams->fParams.contentSizeFlag;
@@ -812,10 +1020,10 @@
*value = CCtxParams->forceWindow;
break;
case ZSTD_c_forceAttachDict :
- *value = CCtxParams->attachDictPref;
+ *value = (int)CCtxParams->attachDictPref;
break;
case ZSTD_c_literalCompressionMode :
- *value = CCtxParams->literalCompressionMode;
+ *value = (int)CCtxParams->literalCompressionMode;
break;
case ZSTD_c_nbWorkers :
assert(CCtxParams->nbWorkers == 0);
@@ -831,19 +1039,19 @@
*value = CCtxParams->enableDedicatedDictSearch;
break;
case ZSTD_c_enableLongDistanceMatching :
- *value = CCtxParams->ldmParams.enableLdm;
+ *value = (int)CCtxParams->ldmParams.enableLdm;
break;
case ZSTD_c_ldmHashLog :
- *value = CCtxParams->ldmParams.hashLog;
+ *value = (int)CCtxParams->ldmParams.hashLog;
break;
case ZSTD_c_ldmMinMatch :
- *value = CCtxParams->ldmParams.minMatchLength;
+ *value = (int)CCtxParams->ldmParams.minMatchLength;
break;
case ZSTD_c_ldmBucketSizeLog :
- *value = CCtxParams->ldmParams.bucketSizeLog;
+ *value = (int)CCtxParams->ldmParams.bucketSizeLog;
break;
case ZSTD_c_ldmHashRateLog :
- *value = CCtxParams->ldmParams.hashRateLog;
+ *value = (int)CCtxParams->ldmParams.hashRateLog;
break;
case ZSTD_c_targetCBlockSize :
*value = (int)CCtxParams->targetCBlockSize;
@@ -862,6 +1070,30 @@
break;
case ZSTD_c_validateSequences :
*value = (int)CCtxParams->validateSequences;
+ break;
+ case ZSTD_c_splitAfterSequences :
+ *value = (int)CCtxParams->postBlockSplitter;
+ break;
+ case ZSTD_c_blockSplitterLevel :
+ *value = CCtxParams->preBlockSplitter_level;
+ break;
+ case ZSTD_c_useRowMatchFinder :
+ *value = (int)CCtxParams->useRowMatchFinder;
+ break;
+ case ZSTD_c_deterministicRefPrefix:
+ *value = (int)CCtxParams->deterministicRefPrefix;
+ break;
+ case ZSTD_c_prefetchCDictTables:
+ *value = (int)CCtxParams->prefetchCDictTables;
+ break;
+ case ZSTD_c_enableSeqProducerFallback:
+ *value = CCtxParams->enableMatchFinderFallback;
+ break;
+ case ZSTD_c_maxBlockSize:
+ *value = (int)CCtxParams->maxBlockSize;
+ break;
+ case ZSTD_c_repcodeResolution:
+ *value = (int)CCtxParams->searchForExternalRepcodes;
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
@@ -889,9 +1121,47 @@
return 0;
}
-ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
+size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams)
+{
+ ZSTD_STATIC_ASSERT(sizeof(cparams) == 7 * 4 /* all params are listed below */);
+ DEBUGLOG(4, "ZSTD_CCtx_setCParams");
+ /* only update if all parameters are valid */
+ FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, (int)cparams.windowLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, (int)cparams.chainLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, (int)cparams.hashLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, (int)cparams.searchLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, (int)cparams.minMatch), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, (int)cparams.targetLength), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, (int)cparams.strategy), "");
+ return 0;
+}
+
+size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams)
+{
+ ZSTD_STATIC_ASSERT(sizeof(fparams) == 3 * 4 /* all params are listed below */);
+ DEBUGLOG(4, "ZSTD_CCtx_setFParams");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, fparams.checksumFlag != 0), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0), "");
+ return 0;
+}
+
+size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParams");
+ /* First check cParams, because we want to update all or none. */
+ FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
+ /* Next set fParams, because this could fail if the cctx isn't in init stage. */
+ FORWARD_IF_ERROR(ZSTD_CCtx_setFParams(cctx, params.fParams), "");
+ /* Finally set cParams, which should succeed. */
+ FORWARD_IF_ERROR(ZSTD_CCtx_setCParams(cctx, params.cParams), "");
+ return 0;
+}
+
+size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
{
- DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
+ DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize);
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't set pledgedSrcSize when not in init stage.");
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
@@ -907,9 +1177,9 @@
ZSTD_compressionParameters* cParams);
/*
- * Initializes the local dict using the requested parameters.
- * NOTE: This does not use the pledged src size, because it may be used for more
- * than one compression.
+ * Initializes the local dictionary using requested parameters.
+ * NOTE: Initialization does not employ the pledged src size,
+ * because the dictionary may be used for multiple compressions.
*/
static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
{
@@ -922,8 +1192,8 @@
return 0;
}
if (dl->cdict != NULL) {
- assert(cctx->cdict == dl->cdict);
/* Local dictionary already initialized. */
+ assert(cctx->cdict == dl->cdict);
return 0;
}
assert(dl->dictSize > 0);
@@ -943,40 +1213,44 @@
}
size_t ZSTD_CCtx_loadDictionary_advanced(
- ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+ ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
{
- RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
- "Can't load a dictionary when ctx is not in init stage.");
DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
- ZSTD_clearAllDicts(cctx); /* in case one already exists */
- if (dict == NULL || dictSize == 0) /* no dictionary mode */
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't load a dictionary when cctx is not in init stage.");
+ ZSTD_clearAllDicts(cctx); /* erase any previously set dictionary */
+ if (dict == NULL || dictSize == 0) /* no dictionary */
return 0;
if (dictLoadMethod == ZSTD_dlm_byRef) {
cctx->localDict.dict = dict;
} else {
+ /* copy dictionary content inside CCtx to own its lifetime */
void* dictBuffer;
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
- "no malloc for static CCtx");
+ "static CCtx can't allocate for an internal copy of dictionary");
dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
- RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
+ RETURN_ERROR_IF(dictBuffer==NULL, memory_allocation,
+ "allocation failed for dictionary content");
ZSTD_memcpy(dictBuffer, dict, dictSize);
- cctx->localDict.dictBuffer = dictBuffer;
- cctx->localDict.dict = dictBuffer;
+ cctx->localDict.dictBuffer = dictBuffer; /* owned ptr to free */
+ cctx->localDict.dict = dictBuffer; /* read-only reference */
}
cctx->localDict.dictSize = dictSize;
cctx->localDict.dictContentType = dictContentType;
return 0;
}
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
+size_t ZSTD_CCtx_loadDictionary_byReference(
ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
{
return ZSTD_CCtx_loadDictionary_advanced(
cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
}
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
{
return ZSTD_CCtx_loadDictionary_advanced(
cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
@@ -1032,7 +1306,7 @@
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
- "Can't reset parameters only when not in init stage.");
+ "Reset parameters is only possible during init stage.");
ZSTD_clearAllDicts(cctx);
return ZSTD_CCtxParams_reset(&cctx->requestedParams);
}
@@ -1051,7 +1325,7 @@
BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
- BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
+ BOUNDCHECK(ZSTD_c_strategy, (int)cParams.strategy);
return 0;
}
@@ -1061,11 +1335,12 @@
static ZSTD_compressionParameters
ZSTD_clampCParams(ZSTD_compressionParameters cParams)
{
-# define CLAMP_TYPE(cParam, val, type) { \
- ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
- if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \
- }
+# define CLAMP_TYPE(cParam, val, type) \
+ do { \
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
+ if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \
+ } while (0)
# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
CLAMP(ZSTD_c_windowLog, cParams.windowLog);
CLAMP(ZSTD_c_chainLog, cParams.chainLog);
@@ -1123,19 +1398,62 @@
* optimize `cPar` for a specified input (`srcSize` and `dictSize`).
* mostly downsize to reduce memory consumption and initialization latency.
* `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
- * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
+ * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`.
* note : `srcSize==0` means 0!
* condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
static ZSTD_compressionParameters
ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize,
- ZSTD_cParamMode_e mode)
+ ZSTD_CParamMode_e mode,
+ ZSTD_ParamSwitch_e useRowMatchFinder)
{
const U64 minSrcSize = 513; /* (1<<9) + 1 */
const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
assert(ZSTD_checkCParams(cPar)==0);
+ /* Cascade the selected strategy down to the next-highest one built into
+ * this binary. */
+#ifdef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_btultra2) {
+ cPar.strategy = ZSTD_btultra;
+ }
+ if (cPar.strategy == ZSTD_btultra) {
+ cPar.strategy = ZSTD_btopt;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_btopt) {
+ cPar.strategy = ZSTD_btlazy2;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_btlazy2) {
+ cPar.strategy = ZSTD_lazy2;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_lazy2) {
+ cPar.strategy = ZSTD_lazy;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_lazy) {
+ cPar.strategy = ZSTD_greedy;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_greedy) {
+ cPar.strategy = ZSTD_dfast;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_dfast) {
+ cPar.strategy = ZSTD_fast;
+ cPar.targetLength = 0;
+ }
+#endif
+
switch (mode) {
case ZSTD_cpm_unknown:
case ZSTD_cpm_noAttachDict:
@@ -1146,7 +1464,7 @@
break;
case ZSTD_cpm_createCDict:
/* Assume a small source size when creating a dictionary
- * with an unkown source size.
+ * with an unknown source size.
*/
if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
srcSize = minSrcSize;
@@ -1164,8 +1482,8 @@
}
/* resize windowLog if input is small enough, to use less memory */
- if ( (srcSize < maxWindowResize)
- && (dictSize < maxWindowResize) ) {
+ if ( (srcSize <= maxWindowResize)
+ && (dictSize <= maxWindowResize) ) {
U32 const tSize = (U32)(srcSize + dictSize);
static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
@@ -1183,6 +1501,42 @@
if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
+ /* We can't use more than 32 bits of hash in total, so that means that we require:
+ * (hashLog + 8) <= 32 && (chainLog + 8) <= 32
+ */
+ if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) {
+ U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS;
+ if (cPar.hashLog > maxShortCacheHashLog) {
+ cPar.hashLog = maxShortCacheHashLog;
+ }
+ if (cPar.chainLog > maxShortCacheHashLog) {
+ cPar.chainLog = maxShortCacheHashLog;
+ }
+ }
+
+
+ /* At this point, we aren't 100% sure if we are using the row match finder.
+ * Unless it is explicitly disabled, conservatively assume that it is enabled.
+ * In this case it will only be disabled for small sources, so shrinking the
+ * hash log a little bit shouldn't result in any ratio loss.
+ */
+ if (useRowMatchFinder == ZSTD_ps_auto)
+ useRowMatchFinder = ZSTD_ps_enable;
+
+ /* We can't hash more than 32-bits in total. So that means that we require:
+ * (hashLog - rowLog + 8) <= 32
+ */
+ if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) {
+ /* Switch to 32-entry rows if searchLog is 5 (or more) */
+ U32 const rowLog = BOUNDED(4, cPar.searchLog, 6);
+ U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS;
+ U32 const maxHashLog = maxRowHashLog + rowLog;
+ assert(cPar.hashLog >= rowLog);
+ if (cPar.hashLog > maxHashLog) {
+ cPar.hashLog = maxHashLog;
+ }
+ }
+
return cPar;
}
@@ -1193,11 +1547,11 @@
{
cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
- return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
+ return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto);
}
-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);
static void ZSTD_overrideCParams(
ZSTD_compressionParameters* cParams,
@@ -1213,25 +1567,31 @@
}
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
{
ZSTD_compressionParameters cParams;
if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
- srcSizeHint = CCtxParams->srcSizeHint;
+ assert(CCtxParams->srcSizeHint>=0);
+ srcSizeHint = (U64)CCtxParams->srcSizeHint;
}
cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
- if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
+ if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
assert(!ZSTD_checkCParams(cParams));
/* srcSizeHint == 0 means 0 */
- return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
+ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder);
}
static size_t
ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
+ const int enableDedicatedDictSearch,
const U32 forCCtx)
{
- size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+ /* chain table size should be 0 for fast or row-hash strategies */
+ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx)
+ ? ((size_t)1 << cParams->chainLog)
+ : 0;
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
@@ -1241,43 +1601,61 @@
+ hSize * sizeof(U32)
+ h3Size * sizeof(U32);
size_t const optPotentialSpace =
- ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
- + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
- + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
- + ZSTD_cwksp_alloc_size((1<strategy, useRowMatchFinder)
+ ? ZSTD_cwksp_aligned64_alloc_size(hSize)
+ : 0;
size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
? optPotentialSpace
: 0;
+ size_t const slackSpace = ZSTD_cwksp_slack_space_required();
+
+ /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
+ ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+
DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
(U32)chainSize, (U32)hSize, (U32)h3Size);
- return tableSpace + optSpace;
+ return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
+}
+
+/* Helper function for calculating memory requirements.
+ * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */
+static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) {
+ U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4;
+ return blockSize / divider;
}
static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const ZSTD_compressionParameters* cParams,
const ldmParams_t* ldmParams,
const int isStatic,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
const size_t buffInSize,
const size_t buffOutSize,
- const U64 pledgedSrcSize)
+ const U64 pledgedSrcSize,
+ int useSequenceProducer,
+ size_t maxBlockSize)
{
- size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
- U32 const divider = (cParams->minMatch==3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
+ size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize);
+ size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize);
+ size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer);
size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
- + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
+ + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * sizeof(SeqDef))
+ 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
- size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
+ size_t const tmpWorkSpace = ZSTD_cwksp_alloc_size(TMP_WORKSPACE_SIZE);
size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
- size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1);
+ size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
- size_t const ldmSeqSpace = ldmParams->enableLdm ?
- ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
+ size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
+ ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
@@ -1285,15 +1663,21 @@
size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
+ size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
+ size_t const externalSeqSpace = useSequenceProducer
+ ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence))
+ : 0;
+
size_t const neededSpace =
cctxSpace +
- entropySpace +
+ tmpWorkSpace +
blockStateSpace +
ldmSpace +
ldmSeqSpace +
matchStateSize +
tokenSpace +
- bufferSpace;
+ bufferSpace +
+ externalSeqSpace;
DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
return neededSpace;
@@ -1303,19 +1687,32 @@
{
ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
+ &cParams);
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
/* estimateCCtxSize is for one-shot compression. So no buffers should
* be needed. However, we still allocate two 0-sized buffers, which can
* take space under ASAN. */
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
- &cParams, ¶ms->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
+ &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
}
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
{
- ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
- return ZSTD_estimateCCtxSize_usingCCtxParams(¶ms);
+ ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
+ if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
+ /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
+ size_t noRowCCtxSize;
+ size_t rowCCtxSize;
+ initialParams.useRowMatchFinder = ZSTD_ps_disable;
+ noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ initialParams.useRowMatchFinder = ZSTD_ps_enable;
+ rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ return MAX(noRowCCtxSize, rowCCtxSize);
+ } else {
+ return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ }
}
static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
@@ -1348,24 +1745,36 @@
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
{ ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+ size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog);
size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
? ((size_t)1 << cParams.windowLog) + blockSize
: 0;
size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
? ZSTD_compressBound(blockSize) + 1
: 0;
+ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams);
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
- &cParams, ¶ms->ldmParams, 1, inBuffSize, outBuffSize,
- ZSTD_CONTENTSIZE_UNKNOWN);
+ &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
+ ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
}
}
size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
{
- ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
- return ZSTD_estimateCStreamSize_usingCCtxParams(¶ms);
+ ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
+ if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
+ /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
+ size_t noRowCCtxSize;
+ size_t rowCCtxSize;
+ initialParams.useRowMatchFinder = ZSTD_ps_disable;
+ noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ initialParams.useRowMatchFinder = ZSTD_ps_enable;
+ rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ return MAX(noRowCCtxSize, rowCCtxSize);
+ } else {
+ return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ }
}
static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
@@ -1443,7 +1852,7 @@
* Invalidate all the matches in the match finder tables.
* Requires nextSrc and base to be set (can be NULL).
*/
-static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
+static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms)
{
ZSTD_window_clear(&ms->window);
@@ -1480,26 +1889,47 @@
ZSTD_resetTarget_CCtx
} ZSTD_resetTarget_e;
+/* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */
+static U64 ZSTD_bitmix(U64 val, U64 len) {
+ val ^= ZSTD_rotateRight_U64(val, 49) ^ ZSTD_rotateRight_U64(val, 24);
+ val *= 0x9FB21C651E98DF25ULL;
+ val ^= (val >> 35) + len ;
+ val *= 0x9FB21C651E98DF25ULL;
+ return val ^ (val >> 28);
+}
+
+/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */
+static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) {
+ ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4);
+}
+
static size_t
-ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+ZSTD_reset_matchState(ZSTD_MatchState_t* ms,
ZSTD_cwksp* ws,
const ZSTD_compressionParameters* cParams,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
const ZSTD_compResetPolicy_e crp,
const ZSTD_indexResetPolicy_e forceResetIndex,
const ZSTD_resetTarget_e forWho)
{
- size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+ /* disable chain table allocation for fast or row-based strategies */
+ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder,
+ ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict))
+ ? ((size_t)1 << cParams->chainLog)
+ : 0;
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
+ assert(useRowMatchFinder != ZSTD_ps_auto);
if (forceResetIndex == ZSTDirp_reset) {
ZSTD_window_init(&ms->window);
ZSTD_cwksp_mark_tables_dirty(ws);
}
ms->hashLog3 = hashLog3;
+ ms->lazySkipping = 0;
ZSTD_invalidateMatchState(ms);
@@ -1521,22 +1951,42 @@
ZSTD_cwksp_clean_tables(ws);
}
+ if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
+ /* Row match finder needs an additional table of hashes ("tags") */
+ size_t const tagTableSize = hSize;
+ /* We want to generate a new salt in case we reset a Cctx, but we always want to use
+ * 0 when we reset a Cdict */
+ if(forWho == ZSTD_resetTarget_CCtx) {
+ ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize);
+ ZSTD_advanceHashSalt(ms);
+ } else {
+ /* When we are not salting we want to always memset the memory */
+ ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned64(ws, tagTableSize);
+ ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ ms->hashSalt = 0;
+ }
+ { /* Switch to 32-entry rows if searchLog is 5 (or more) */
+ U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
+ assert(cParams->hashLog >= rowLog);
+ ms->rowHashLog = cParams->hashLog - rowLog;
+ }
+ }
+
/* opt parser space */
if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
DEBUGLOG(4, "reserving optimal parser space");
- ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
- ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
- ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
- ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
- ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxLL+1) * sizeof(unsigned));
+ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxML+1) * sizeof(unsigned));
+ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxOff+1) * sizeof(unsigned));
+ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t));
+ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));
}
ms->cParams = *cParams;
RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
"failed a workspace allocation in ZSTD_reset_matchState");
-
return 0;
}
@@ -1553,61 +2003,86 @@
return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
}
+/* ZSTD_dictTooBig():
+ * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in
+ * one go generically. So we ensure that in that case we reset the tables to zero,
+ * so that we can load as much of the dictionary as possible.
+ */
+static int ZSTD_dictTooBig(size_t const loadedDictSize)
+{
+ return loadedDictSize > ZSTD_CHUNKSIZE_MAX;
+}
+
/*! ZSTD_resetCCtx_internal() :
- note : `params` are assumed fully validated at this stage */
+ * @param loadedDictSize The size of the dictionary to be loaded
+ * into the context, if any. If no dictionary is used, or the
+ * dictionary is being attached / copied, then pass 0.
+ * note : `params` are assumed fully validated at this stage.
+ */
static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
- ZSTD_CCtx_params params,
+ ZSTD_CCtx_params const* params,
U64 const pledgedSrcSize,
+ size_t const loadedDictSize,
ZSTD_compResetPolicy_e const crp,
ZSTD_buffered_policy_e const zbuff)
{
ZSTD_cwksp* const ws = &zc->workspace;
- DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
- (U32)pledgedSrcSize, params.cParams.windowLog);
- assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
+ (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->postBlockSplitter);
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
zc->isFirstBlock = 1;
- if (params.ldmParams.enableLdm) {
+ /* Set applied params early so we can modify them for LDM,
+ * and point params at the applied params.
+ */
+ zc->appliedParams = *params;
+ params = &zc->appliedParams;
+
+ assert(params->useRowMatchFinder != ZSTD_ps_auto);
+ assert(params->postBlockSplitter != ZSTD_ps_auto);
+ assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
+ assert(params->maxBlockSize != 0);
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* Adjust long distance matching parameters */
- ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
- assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
- assert(params.ldmParams.hashRateLog < 32);
+ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams);
+ assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog);
+ assert(params->ldmParams.hashRateLog < 32);
}
- { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
- U32 const divider = (params.cParams.minMatch==3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
- size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered)
+ { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
+ size_t const blockSize = MIN(params->maxBlockSize, windowSize);
+ size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, ZSTD_hasExtSeqProd(params));
+ size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
? ZSTD_compressBound(blockSize) + 1
: 0;
- size_t const buffInSize = (zbuff == ZSTDb_buffered && params.inBufferMode == ZSTD_bm_buffered)
+ size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered)
? windowSize + blockSize
: 0;
- size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize);
int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
+ int const dictTooBig = ZSTD_dictTooBig(loadedDictSize);
ZSTD_indexResetPolicy_e needsIndexReset =
- (!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset;
+ (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue;
size_t const neededSpace =
ZSTD_estimateCCtxSize_usingCCtxParams_internal(
- ¶ms.cParams, ¶ms.ldmParams, zc->staticSize != 0,
- buffInSize, buffOutSize, pledgedSrcSize);
+ ¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
+ buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
+
FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
- /* Check if workspace is large enough, alloc a new one if needed */
- {
+ { /* Check if workspace is large enough, alloc a new one if needed */
int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
-
+ int resizeWorkspace = workspaceTooSmall || workspaceWasteful;
DEBUGLOG(4, "Need %zu B workspace", neededSpace);
DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
- if (workspaceTooSmall || workspaceWasteful) {
+ if (resizeWorkspace) {
DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
ZSTD_cwksp_sizeof(ws) >> 10,
neededSpace >> 10);
@@ -1621,22 +2096,23 @@
DEBUGLOG(5, "reserving object space");
/* Statically sized space.
- * entropyWorkspace never moves,
+ * tmpWorkspace never moves,
* though prev/next block swap places */
assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
- zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
- RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
+ zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, TMP_WORKSPACE_SIZE);
+ RETURN_ERROR_IF(zc->tmpWorkspace == NULL, memory_allocation, "couldn't allocate tmpWorkspace");
+ zc->tmpWkspSize = TMP_WORKSPACE_SIZE;
} }
ZSTD_cwksp_clear(ws);
/* init params */
- zc->appliedParams = params;
- zc->blockState.matchState.cParams = params.cParams;
+ zc->blockState.matchState.cParams = params->cParams;
+ zc->blockState.matchState.prefetchCDictTables = params->prefetchCDictTables == ZSTD_ps_enable;
zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
zc->consumedSrcSize = 0;
zc->producedCSize = 0;
@@ -1644,7 +2120,7 @@
zc->appliedParams.fParams.contentSizeFlag = 0;
DEBUGLOG(4, "pledged content size : %u ; flag : %u",
(unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
- zc->blockSize = blockSize;
+ zc->blockSizeMax = blockSize;
xxh64_reset(&zc->xxhState, 0);
zc->stage = ZSTDcs_init;
@@ -1653,13 +2129,46 @@
ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &zc->blockState.matchState,
+ ws,
+ ¶ms->cParams,
+ params->useRowMatchFinder,
+ crp,
+ needsIndexReset,
+ ZSTD_resetTarget_CCtx), "");
+
+ zc->seqStore.sequencesStart = (SeqDef*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * sizeof(SeqDef));
+
+ /* ldm hash table */
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
+ /* TODO: avoid memset? */
+ size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
+ zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * sizeof(ldmEntry_t));
+ ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
+ zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * sizeof(rawSeq));
+ zc->maxNbLdmSequences = maxNbLdmSeq;
+
+ ZSTD_window_init(&zc->ldmState.window);
+ zc->ldmState.loadedDictEnd = 0;
+ }
+
+ /* reserve space for block-level external sequences */
+ if (ZSTD_hasExtSeqProd(params)) {
+ size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
+ zc->extSeqBufCapacity = maxNbExternalSeq;
+ zc->extSeqBuf =
+ (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence));
+ }
+
+ /* buffers */
+
/* ZSTD_wildcopy() is used to copy into the literals buffer,
* so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
*/
zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
zc->seqStore.maxNbLit = blockSize;
- /* buffers */
zc->bufferedPolicy = zbuff;
zc->inBuffSize = buffInSize;
zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
@@ -1667,11 +2176,11 @@
zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
/* ldm bucketOffsets table */
- if (params.ldmParams.enableLdm) {
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* TODO: avoid memset? */
size_t const numBuckets =
- ((size_t)1) << (params.ldmParams.hashLog -
- params.ldmParams.bucketSizeLog);
+ ((size_t)1) << (params->ldmParams.hashLog -
+ params->ldmParams.bucketSizeLog);
zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
}
@@ -1682,37 +2191,10 @@
zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
- zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
-
- FORWARD_IF_ERROR(ZSTD_reset_matchState(
- &zc->blockState.matchState,
- ws,
- ¶ms.cParams,
- crp,
- needsIndexReset,
- ZSTD_resetTarget_CCtx), "");
-
- /* ldm hash table */
- if (params.ldmParams.enableLdm) {
- /* TODO: avoid memset? */
- size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
- zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
- ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
- zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
- zc->maxNbLdmSequences = maxNbLdmSeq;
-
- ZSTD_window_init(&zc->ldmState.window);
- ZSTD_window_clear(&zc->ldmState.window);
- zc->ldmState.loadedDictEnd = 0;
- }
-
- /* Due to alignment, when reusing a workspace, we can actually consume
- * up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
- */
- assert(ZSTD_cwksp_used(ws) >= neededSpace &&
- ZSTD_cwksp_used(ws) <= neededSpace + 3);
DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
+ assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace));
+
zc->initialized = 1;
return 0;
@@ -1768,6 +2250,8 @@
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
+ DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu",
+ (unsigned long long)pledgedSrcSize);
{
ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
unsigned const windowLog = params.cParams.windowLog;
@@ -1781,9 +2265,12 @@
}
params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
- cdict->dictContentSize, ZSTD_cpm_attachDict);
+ cdict->dictContentSize, ZSTD_cpm_attachDict,
+ params.useRowMatchFinder);
params.cParams.windowLog = windowLog;
- FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize,
+ /* loadedDictSize */ 0,
ZSTDcrp_makeClean, zbuff), "");
assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
}
@@ -1818,6 +2305,22 @@
return 0;
}
+static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize,
+ ZSTD_compressionParameters const* cParams) {
+ if (ZSTD_CDictIndicesAreTagged(cParams)){
+ /* Remove tags from the CDict table if they are present.
+ * See docs on "short cache" in zstd_compress_internal.h for context. */
+ size_t i;
+ for (i = 0; i < tableSize; i++) {
+ U32 const taggedIndex = src[i];
+ U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS;
+ dst[i] = index;
+ }
+ } else {
+ ZSTD_memcpy(dst, src, tableSize * sizeof(U32));
+ }
+}
+
static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
@@ -1827,15 +2330,17 @@
const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
assert(!cdict->matchState.dedicatedDictSearch);
-
- DEBUGLOG(4, "copying dictionary into context");
+ DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu",
+ (unsigned long long)pledgedSrcSize);
{ unsigned const windowLog = params.cParams.windowLog;
assert(windowLog != 0);
/* Copy only compression parameters related to tables. */
params.cParams = *cdict_cParams;
params.cParams.windowLog = windowLog;
- FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ params.useRowMatchFinder = cdict->useRowMatchFinder;
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize,
+ /* loadedDictSize */ 0,
ZSTDcrp_leaveDirty, zbuff), "");
assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
@@ -1843,21 +2348,37 @@
}
ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
+ assert(params.useRowMatchFinder != ZSTD_ps_auto);
/* copy tables */
- { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
+ { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
+ ? ((size_t)1 << cdict_cParams->chainLog)
+ : 0;
size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
- ZSTD_memcpy(cctx->blockState.matchState.hashTable,
- cdict->matchState.hashTable,
- hSize * sizeof(U32));
- ZSTD_memcpy(cctx->blockState.matchState.chainTable,
- cdict->matchState.chainTable,
- chainSize * sizeof(U32));
+ ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable,
+ cdict->matchState.hashTable,
+ hSize, cdict_cParams);
+
+ /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
+ if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
+ ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable,
+ cdict->matchState.chainTable,
+ chainSize, cdict_cParams);
+ }
+ /* copy tag table */
+ if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
+ size_t const tagTableSize = hSize;
+ ZSTD_memcpy(cctx->blockState.matchState.tagTable,
+ cdict->matchState.tagTable,
+ tagTableSize);
+ cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt;
+ }
}
/* Zero the hashTable3, since the cdict never fills it */
- { int const h3log = cctx->blockState.matchState.hashLog3;
+ assert(cctx->blockState.matchState.hashLog3 <= 31);
+ { U32 const h3log = cctx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
assert(cdict->matchState.hashLog3 == 0);
ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
@@ -1866,8 +2387,8 @@
ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
/* copy dictionary offsets */
- { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
- ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
+ { ZSTD_MatchState_t const* srcMatchState = &cdict->matchState;
+ ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
@@ -1917,16 +2438,23 @@
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
- DEBUGLOG(5, "ZSTD_copyCCtx_internal");
RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
"Can't copy a ctx that's not in init stage.");
-
+ DEBUGLOG(5, "ZSTD_copyCCtx_internal");
ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
{ ZSTD_CCtx_params params = dstCCtx->requestedParams;
/* Copy only compression parameters related to tables. */
params.cParams = srcCCtx->appliedParams.cParams;
+ assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
+ assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_ps_auto);
+ assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
+ params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
+ params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter;
+ params.ldmParams = srcCCtx->appliedParams.ldmParams;
params.fParams = fParams;
- ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
+ params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize;
+ ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize,
+ /* loadedDictSize */ 0,
ZSTDcrp_leaveDirty, zbuff);
assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
@@ -1938,9 +2466,13 @@
ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
/* copy tables */
- { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
+ { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
+ srcCCtx->appliedParams.useRowMatchFinder,
+ 0 /* forDDSDict */)
+ ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
+ : 0;
size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
- int const h3log = srcCCtx->blockState.matchState.hashLog3;
+ U32 const h3log = srcCCtx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
@@ -1958,8 +2490,8 @@
/* copy dictionary offsets */
{
- const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
- ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
+ const ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState;
+ ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
@@ -2005,19 +2537,26 @@
int const nbRows = (int)size / ZSTD_ROWSIZE;
int cellNb = 0;
int rowNb;
+ /* Protect special index values < ZSTD_WINDOW_START_INDEX. */
+ U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
- assert(size < (1U<<31)); /* can be casted to int */
+ assert(size < (1U<<31)); /* can be cast to int */
for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
int column;
for (column=0; columncParams.hashLog;
ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
}
- if (params->cParams.strategy != ZSTD_fast) {
+ if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) {
U32 const chainSize = (U32)1 << params->cParams.chainLog;
if (params->cParams.strategy == ZSTD_btlazy2)
ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
@@ -2061,26 +2600,32 @@
/* See doc/zstd_compression_format.md for detailed format description */
-void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr)
{
- const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const SeqDef* const sequences = seqStorePtr->sequencesStart;
BYTE* const llCodeTable = seqStorePtr->llCode;
BYTE* const ofCodeTable = seqStorePtr->ofCode;
BYTE* const mlCodeTable = seqStorePtr->mlCode;
U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
U32 u;
+ int longOffsets = 0;
assert(nbSeq <= seqStorePtr->maxNbSeq);
for (u=0; u= STREAM_ACCUMULATOR_MIN));
+ if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN)
+ longOffsets = 1;
}
- if (seqStorePtr->longLengthID==1)
+ if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
- if (seqStorePtr->longLengthID==2)
+ if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)
mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+ return longOffsets;
}
/* ZSTD_useTargetCBlockSize():
@@ -2093,53 +2638,209 @@
return (cctxParams->targetCBlockSize != 0);
}
-/* ZSTD_entropyCompressSequences_internal():
- * actually compresses both literals and sequences */
+/* ZSTD_blockSplitterEnabled():
+ * Returns if block splitting param is being used
+ * If used, compression will do best effort to split a block in order to improve compression ratio.
+ * At the time this function is called, the parameter must be finalized.
+ * Returns 1 if true, 0 otherwise. */
+static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
+{
+ DEBUGLOG(5, "ZSTD_blockSplitterEnabled (postBlockSplitter=%d)", cctxParams->postBlockSplitter);
+ assert(cctxParams->postBlockSplitter != ZSTD_ps_auto);
+ return (cctxParams->postBlockSplitter == ZSTD_ps_enable);
+}
+
+/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
+ * and size of the sequences statistics
+ */
+typedef struct {
+ U32 LLtype;
+ U32 Offtype;
+ U32 MLtype;
+ size_t size;
+ size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
+ int longOffsets;
+} ZSTD_symbolEncodingTypeStats_t;
+
+/* ZSTD_buildSequencesStatistics():
+ * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field.
+ * Modifies `nextEntropy` to have the appropriate values as a side effect.
+ * nbSeq must be greater than 0.
+ *
+ * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
+ */
+static ZSTD_symbolEncodingTypeStats_t
+ZSTD_buildSequencesStatistics(
+ const SeqStore_t* seqStorePtr, size_t nbSeq,
+ const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
+ BYTE* dst, const BYTE* const dstEnd,
+ ZSTD_strategy strategy, unsigned* countWorkspace,
+ void* entropyWorkspace, size_t entropyWkspSize)
+{
+ BYTE* const ostart = dst;
+ const BYTE* const oend = dstEnd;
+ BYTE* op = ostart;
+ FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ ZSTD_symbolEncodingTypeStats_t stats;
+
+ stats.lastCountSize = 0;
+ /* convert length/distances into codes */
+ stats.longOffsets = ZSTD_seqToCodes(seqStorePtr);
+ assert(op <= oend);
+ assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
+ /* build CTable for Literal Lengths */
+ { unsigned max = MaxLL;
+ size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building LL table");
+ nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
+ stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ LLFSELog, prevEntropy->litlengthCTable,
+ LL_defaultNorm, LL_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(set_basic < set_compressed && set_rle < set_compressed);
+ assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_LitLength, LLFSELog, (SymbolEncodingType_e)stats.LLtype,
+ countWorkspace, max, llCodeTable, nbSeq,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ prevEntropy->litlengthCTable,
+ sizeof(prevEntropy->litlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.LLtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for Offsets */
+ { unsigned max = MaxOff;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
+ ZSTD_DefaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ DEBUGLOG(5, "Building OF table");
+ nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
+ stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ OffFSELog, prevEntropy->offcodeCTable,
+ OF_defaultNorm, OF_defaultNormLog,
+ defaultPolicy, strategy);
+ assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_OffsetBits, OffFSELog, (SymbolEncodingType_e)stats.Offtype,
+ countWorkspace, max, ofCodeTable, nbSeq,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ prevEntropy->offcodeCTable,
+ sizeof(prevEntropy->offcodeCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.Offtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for MatchLengths */
+ { unsigned max = MaxML;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
+ nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
+ stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ MLFSELog, prevEntropy->matchlengthCTable,
+ ML_defaultNorm, ML_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_MatchLength, MLFSELog, (SymbolEncodingType_e)stats.MLtype,
+ countWorkspace, max, mlCodeTable, nbSeq,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ prevEntropy->matchlengthCTable,
+ sizeof(prevEntropy->matchlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.MLtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ stats.size = (size_t)(op-ostart);
+ return stats;
+}
+
+/* ZSTD_entropyCompressSeqStore_internal():
+ * compresses both literals and sequences
+ * Returns compressed size of block, or a zstd error.
+ */
+#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
MEM_STATIC size_t
-ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr,
- const ZSTD_entropyCTables_t* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- void* dst, size_t dstCapacity,
- void* entropyWorkspace, size_t entropyWkspSize,
- const int bmi2)
+ZSTD_entropyCompressSeqStore_internal(
+ void* dst, size_t dstCapacity,
+ const void* literals, size_t litSize,
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ const int bmi2)
{
- const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
unsigned* count = (unsigned*)entropyWorkspace;
FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
- U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
- const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const SeqDef* const sequences = seqStorePtr->sequencesStart;
+ const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
- size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- BYTE* seqHead;
- BYTE* lastNCount = NULL;
+ size_t lastCountSize;
+ int longOffsets = 0;
entropyWorkspace = count + (MaxSeq + 1);
entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
- DEBUGLOG(4, "ZSTD_entropyCompressSequences_internal (nbSeq=%zu)", nbSeq);
+ DEBUGLOG(5, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu, dstCapacity=%zu)", nbSeq, dstCapacity);
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= HUF_WORKSPACE_SIZE);
/* Compress literals */
- { const BYTE* const literals = seqStorePtr->litStart;
- size_t const litSize = (size_t)(seqStorePtr->lit - literals);
+ { size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ /* Base suspicion of uncompressibility on ratio of literals to sequences */
+ int const suspectUncompressible = (numSequences == 0) || (litSize / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
+
size_t const cSize = ZSTD_compressLiterals(
- &prevEntropy->huf, &nextEntropy->huf,
- cctxParams->cParams.strategy,
- ZSTD_disableLiteralsCompression(cctxParams),
op, dstCapacity,
literals, litSize,
entropyWorkspace, entropyWkspSize,
- bmi2);
+ &prevEntropy->huf, &nextEntropy->huf,
+ cctxParams->cParams.strategy,
+ ZSTD_literalsCompressionIsDisabled(cctxParams),
+ suspectUncompressible, bmi2);
FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
assert(cSize <= dstCapacity);
op += cSize;
@@ -2165,95 +2866,20 @@
ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
return (size_t)(op - ostart);
}
-
- /* seqHead : flags for FSE encoding type */
- seqHead = op++;
- assert(op <= oend);
-
- /* convert length/distances into codes */
- ZSTD_seqToCodes(seqStorePtr);
- /* build CTable for Literal Lengths */
- { unsigned max = MaxLL;
- size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
- DEBUGLOG(5, "Building LL table");
- nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
- LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
- count, max, mostFrequent, nbSeq,
- LLFSELog, prevEntropy->fse.litlengthCTable,
- LL_defaultNorm, LL_defaultNormLog,
- ZSTD_defaultAllowed, strategy);
- assert(set_basic < set_compressed && set_rle < set_compressed);
- assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(
- op, (size_t)(oend - op),
- CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
- count, max, llCodeTable, nbSeq,
- LL_defaultNorm, LL_defaultNormLog, MaxLL,
- prevEntropy->fse.litlengthCTable,
- sizeof(prevEntropy->fse.litlengthCTable),
- entropyWorkspace, entropyWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
- if (LLtype == set_compressed)
- lastNCount = op;
- op += countSize;
- assert(op <= oend);
- } }
- /* build CTable for Offsets */
- { unsigned max = MaxOff;
- size_t const mostFrequent = HIST_countFast_wksp(
- count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
- /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
- ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
- DEBUGLOG(5, "Building OF table");
- nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
- Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
- count, max, mostFrequent, nbSeq,
- OffFSELog, prevEntropy->fse.offcodeCTable,
- OF_defaultNorm, OF_defaultNormLog,
- defaultPolicy, strategy);
- assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(
- op, (size_t)(oend - op),
- CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
- count, max, ofCodeTable, nbSeq,
- OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
- prevEntropy->fse.offcodeCTable,
- sizeof(prevEntropy->fse.offcodeCTable),
- entropyWorkspace, entropyWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
- if (Offtype == set_compressed)
- lastNCount = op;
- op += countSize;
- assert(op <= oend);
- } }
- /* build CTable for MatchLengths */
- { unsigned max = MaxML;
- size_t const mostFrequent = HIST_countFast_wksp(
- count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
- DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
- nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
- MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
- count, max, mostFrequent, nbSeq,
- MLFSELog, prevEntropy->fse.matchlengthCTable,
- ML_defaultNorm, ML_defaultNormLog,
- ZSTD_defaultAllowed, strategy);
- assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
- { size_t const countSize = ZSTD_buildCTable(
- op, (size_t)(oend - op),
- CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
- count, max, mlCodeTable, nbSeq,
- ML_defaultNorm, ML_defaultNormLog, MaxML,
- prevEntropy->fse.matchlengthCTable,
- sizeof(prevEntropy->fse.matchlengthCTable),
- entropyWorkspace, entropyWkspSize);
- FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
- if (MLtype == set_compressed)
- lastNCount = op;
- op += countSize;
- assert(op <= oend);
- } }
-
- *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+ { BYTE* const seqHead = op++;
+ /* build stats for sequences */
+ const ZSTD_symbolEncodingTypeStats_t stats =
+ ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
+ &prevEntropy->fse, &nextEntropy->fse,
+ op, oend,
+ strategy, count,
+ entropyWorkspace, entropyWkspSize);
+ FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
+ *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
+ lastCountSize = stats.lastCountSize;
+ op += stats.size;
+ longOffsets = stats.longOffsets;
+ }
{ size_t const bitstreamSize = ZSTD_encodeSequences(
op, (size_t)(oend - op),
@@ -2273,9 +2899,9 @@
* In this exceedingly rare case, we will simply emit an uncompressed
* block, since it isn't worth optimizing.
*/
- if (lastNCount && (op - lastNCount) < 4) {
- /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
- assert(op - lastNCount == 3);
+ if (lastCountSize && (lastCountSize + bitstreamSize) < 4) {
+ /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(lastCountSize + bitstreamSize == 3);
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
"emitting an uncompressed block.");
return 0;
@@ -2286,116 +2912,275 @@
return (size_t)(op - ostart);
}
-MEM_STATIC size_t
-ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr,
- const ZSTD_entropyCTables_t* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- void* dst, size_t dstCapacity,
- size_t srcSize,
- void* entropyWorkspace, size_t entropyWkspSize,
- int bmi2)
+static size_t
+ZSTD_entropyCompressSeqStore_wExtLitBuffer(
+ void* dst, size_t dstCapacity,
+ const void* literals, size_t litSize,
+ size_t blockSize,
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ int bmi2)
{
- size_t const cSize = ZSTD_entropyCompressSequences_internal(
+ size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
+ dst, dstCapacity,
+ literals, litSize,
seqStorePtr, prevEntropy, nextEntropy, cctxParams,
- dst, dstCapacity,
entropyWorkspace, entropyWkspSize, bmi2);
if (cSize == 0) return 0;
/* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
* Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
*/
- if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
+ if ((cSize == ERROR(dstSize_tooSmall)) & (blockSize <= dstCapacity)) {
+ DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity);
return 0; /* block not compressed */
- FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSequences_internal failed");
+ }
+ FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
/* Check compressibility */
- { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
+ { size_t const maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy);
if (cSize >= maxCSize) return 0; /* block not compressed */
}
- DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize);
+ DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
+ /* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly.
+ * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above.
+ */
+ assert(cSize < ZSTD_BLOCKSIZE_MAX);
return cSize;
}
+static size_t
+ZSTD_entropyCompressSeqStore(
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ size_t srcSize,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ int bmi2)
+{
+ return ZSTD_entropyCompressSeqStore_wExtLitBuffer(
+ dst, dstCapacity,
+ seqStorePtr->litStart, (size_t)(seqStorePtr->lit - seqStorePtr->litStart),
+ srcSize,
+ seqStorePtr,
+ prevEntropy, nextEntropy,
+ cctxParams,
+ entropyWorkspace, entropyWkspSize,
+ bmi2);
+}
+
/* ZSTD_selectBlockCompressor() :
* Not static, but internal use only (used by long distance matcher)
* assumption : strat is a valid strategy */
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
+ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
{
- static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
+ static const ZSTD_BlockCompressor_f blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
{ ZSTD_compressBlock_fast /* default for 0 */,
ZSTD_compressBlock_fast,
- ZSTD_compressBlock_doubleFast,
- ZSTD_compressBlock_greedy,
- ZSTD_compressBlock_lazy,
- ZSTD_compressBlock_lazy2,
- ZSTD_compressBlock_btlazy2,
- ZSTD_compressBlock_btopt,
- ZSTD_compressBlock_btultra,
- ZSTD_compressBlock_btultra2 },
+ ZSTD_COMPRESSBLOCK_DOUBLEFAST,
+ ZSTD_COMPRESSBLOCK_GREEDY,
+ ZSTD_COMPRESSBLOCK_LAZY,
+ ZSTD_COMPRESSBLOCK_LAZY2,
+ ZSTD_COMPRESSBLOCK_BTLAZY2,
+ ZSTD_COMPRESSBLOCK_BTOPT,
+ ZSTD_COMPRESSBLOCK_BTULTRA,
+ ZSTD_COMPRESSBLOCK_BTULTRA2
+ },
{ ZSTD_compressBlock_fast_extDict /* default for 0 */,
ZSTD_compressBlock_fast_extDict,
- ZSTD_compressBlock_doubleFast_extDict,
- ZSTD_compressBlock_greedy_extDict,
- ZSTD_compressBlock_lazy_extDict,
- ZSTD_compressBlock_lazy2_extDict,
- ZSTD_compressBlock_btlazy2_extDict,
- ZSTD_compressBlock_btopt_extDict,
- ZSTD_compressBlock_btultra_extDict,
- ZSTD_compressBlock_btultra_extDict },
+ ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT,
+ ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT,
+ ZSTD_COMPRESSBLOCK_LAZY_EXTDICT,
+ ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT,
+ ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT,
+ ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT,
+ ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT,
+ ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT
+ },
{ ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
ZSTD_compressBlock_fast_dictMatchState,
- ZSTD_compressBlock_doubleFast_dictMatchState,
- ZSTD_compressBlock_greedy_dictMatchState,
- ZSTD_compressBlock_lazy_dictMatchState,
- ZSTD_compressBlock_lazy2_dictMatchState,
- ZSTD_compressBlock_btlazy2_dictMatchState,
- ZSTD_compressBlock_btopt_dictMatchState,
- ZSTD_compressBlock_btultra_dictMatchState,
- ZSTD_compressBlock_btultra_dictMatchState },
+ ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE
+ },
{ NULL /* default for 0 */,
NULL,
NULL,
- ZSTD_compressBlock_greedy_dedicatedDictSearch,
- ZSTD_compressBlock_lazy_dedicatedDictSearch,
- ZSTD_compressBlock_lazy2_dedicatedDictSearch,
+ ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH,
+ ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH,
+ ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH,
NULL,
NULL,
NULL,
NULL }
};
- ZSTD_blockCompressor selectedCompressor;
+ ZSTD_BlockCompressor_f selectedCompressor;
ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
- assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
- selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat));
+ DEBUGLOG(5, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
+ if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
+ static const ZSTD_BlockCompressor_f rowBasedBlockCompressors[4][3] = {
+ {
+ ZSTD_COMPRESSBLOCK_GREEDY_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY2_ROW
+ },
+ {
+ ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW
+ },
+ {
+ ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW
+ },
+ {
+ ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW
+ }
+ };
+ DEBUGLOG(5, "Selecting a row-based matchfinder");
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+ selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
+ } else {
+ selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+ }
assert(selectedCompressor != NULL);
return selectedCompressor;
}
-static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr,
const BYTE* anchor, size_t lastLLSize)
{
ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
seqStorePtr->lit += lastLLSize;
}
-void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+void ZSTD_resetSeqStore(SeqStore_t* ssPtr)
{
ssPtr->lit = ssPtr->litStart;
ssPtr->sequences = ssPtr->sequencesStart;
- ssPtr->longLengthID = 0;
+ ssPtr->longLengthType = ZSTD_llt_none;
+}
+
+/* ZSTD_postProcessSequenceProducerResult() :
+ * Validates and post-processes sequences obtained through the external matchfinder API:
+ * - Checks whether nbExternalSeqs represents an error condition.
+ * - Appends a block delimiter to outSeqs if one is not already present.
+ * See zstd.h for context regarding block delimiters.
+ * Returns the number of sequences after post-processing, or an error code. */
+static size_t ZSTD_postProcessSequenceProducerResult(
+ ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize
+) {
+ RETURN_ERROR_IF(
+ nbExternalSeqs > outSeqsCapacity,
+ sequenceProducer_failed,
+ "External sequence producer returned error code %lu",
+ (unsigned long)nbExternalSeqs
+ );
+
+ RETURN_ERROR_IF(
+ nbExternalSeqs == 0 && srcSize > 0,
+ sequenceProducer_failed,
+ "Got zero sequences from external sequence producer for a non-empty src buffer!"
+ );
+
+ if (srcSize == 0) {
+ ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence));
+ return 1;
+ }
+
+ {
+ ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1];
+
+ /* We can return early if lastSeq is already a block delimiter. */
+ if (lastSeq.offset == 0 && lastSeq.matchLength == 0) {
+ return nbExternalSeqs;
+ }
+
+ /* This error condition is only possible if the external matchfinder
+ * produced an invalid parse, by definition of ZSTD_sequenceBound(). */
+ RETURN_ERROR_IF(
+ nbExternalSeqs == outSeqsCapacity,
+ sequenceProducer_failed,
+ "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!"
+ );
+
+ /* lastSeq is not a block delimiter, so we need to append one. */
+ ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence));
+ return nbExternalSeqs + 1;
+ }
}
-typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
+/* ZSTD_fastSequenceLengthSum() :
+ * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*.
+ * Similar to another function in zstd_compress.c (determine_blockSize),
+ * except it doesn't check for a block delimiter to end summation.
+ * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P).
+ * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */
+static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) {
+ size_t matchLenSum, litLenSum, i;
+ matchLenSum = 0;
+ litLenSum = 0;
+ for (i = 0; i < seqBufSize; i++) {
+ litLenSum += seqBuf[i].litLength;
+ matchLenSum += seqBuf[i].matchLength;
+ }
+ return litLenSum + matchLenSum;
+}
+
+/*
+ * Function to validate sequences produced by a block compressor.
+ */
+static void ZSTD_validateSeqStore(const SeqStore_t* seqStore, const ZSTD_compressionParameters* cParams)
+{
+#if DEBUGLEVEL >= 1
+ const SeqDef* seq = seqStore->sequencesStart;
+ const SeqDef* const seqEnd = seqStore->sequences;
+ size_t const matchLenLowerBound = cParams->minMatch == 3 ? 3 : 4;
+ for (; seq < seqEnd; ++seq) {
+ const ZSTD_SequenceLength seqLength = ZSTD_getSequenceLength(seqStore, seq);
+ assert(seqLength.matchLength >= matchLenLowerBound);
+ (void)seqLength;
+ (void)matchLenLowerBound;
+ }
+#else
+ (void)seqStore;
+ (void)cParams;
+#endif
+}
+
+static size_t
+ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch);
+
+typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_BuildSeqStore_e;
static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
{
- ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ ZSTD_MatchState_t* const ms = &zc->blockState.matchState;
DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */
ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
- if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
+ * additional 1. We need to revisit and change this logic to be more consistent */
+ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {
if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
} else {
@@ -2430,16 +3215,34 @@
zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
}
if (zc->externSeqStore.pos < zc->externSeqStore.size) {
- assert(!zc->appliedParams.ldmParams.enableLdm);
+ assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
+
+ /* External matchfinder + LDM is technically possible, just not implemented yet.
+ * We need to revisit soon and implement it. */
+ RETURN_ERROR_IF(
+ ZSTD_hasExtSeqProd(&zc->appliedParams),
+ parameter_combination_unsupported,
+ "Long-distance matching with external sequence producer enabled is not currently supported."
+ );
+
/* Updates ldmSeqStore.pos */
lastLLSize =
ZSTD_ldm_blockCompress(&zc->externSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
+ zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
- } else if (zc->appliedParams.ldmParams.enableLdm) {
- rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
+ } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
+ RawSeqStore_t ldmSeqStore = kNullRawSeqStore;
+
+ /* External matchfinder + LDM is technically possible, just not implemented yet.
+ * We need to revisit soon and implement it. */
+ RETURN_ERROR_IF(
+ ZSTD_hasExtSeqProd(&zc->appliedParams),
+ parameter_combination_unsupported,
+ "Long-distance matching with external sequence producer enabled is not currently supported."
+ );
ldmSeqStore.seq = zc->ldmSequences;
ldmSeqStore.capacity = zc->maxNbLdmSequences;
@@ -2452,90 +3255,196 @@
ZSTD_ldm_blockCompress(&ldmSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
+ zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(ldmSeqStore.pos == ldmSeqStore.size);
- } else { /* not long range mode */
- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
+ } else if (ZSTD_hasExtSeqProd(&zc->appliedParams)) {
+ assert(
+ zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize)
+ );
+ assert(zc->appliedParams.extSeqProdFunc != NULL);
+
+ { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog;
+
+ size_t const nbExternalSeqs = (zc->appliedParams.extSeqProdFunc)(
+ zc->appliedParams.extSeqProdState,
+ zc->extSeqBuf,
+ zc->extSeqBufCapacity,
+ src, srcSize,
+ NULL, 0, /* dict and dictSize, currently not supported */
+ zc->appliedParams.compressionLevel,
+ windowSize
+ );
+
+ size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult(
+ zc->extSeqBuf,
+ nbExternalSeqs,
+ zc->extSeqBufCapacity,
+ srcSize
+ );
+
+ /* Return early if there is no error, since we don't need to worry about last literals */
+ if (!ZSTD_isError(nbPostProcessedSeqs)) {
+ ZSTD_SequencePosition seqPos = {0,0,0};
+ size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs);
+ RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!");
+ FORWARD_IF_ERROR(
+ ZSTD_transferSequences_wBlockDelim(
+ zc, &seqPos,
+ zc->extSeqBuf, nbPostProcessedSeqs,
+ src, srcSize,
+ zc->appliedParams.searchForExternalRepcodes
+ ),
+ "Failed to copy external sequences to seqStore!"
+ );
+ ms->ldmSeqStore = NULL;
+ DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs);
+ return ZSTDbss_compress;
+ }
+
+ /* Propagate the error if fallback is disabled */
+ if (!zc->appliedParams.enableMatchFinderFallback) {
+ return nbPostProcessedSeqs;
+ }
+
+ /* Fallback to software matchfinder */
+ { ZSTD_BlockCompressor_f const blockCompressor =
+ ZSTD_selectBlockCompressor(
+ zc->appliedParams.cParams.strategy,
+ zc->appliedParams.useRowMatchFinder,
+ dictMode);
+ ms->ldmSeqStore = NULL;
+ DEBUGLOG(
+ 5,
+ "External sequence producer returned error code %lu. Falling back to internal parser.",
+ (unsigned long)nbExternalSeqs
+ );
+ lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
+ } }
+ } else { /* not long range mode and no external matchfinder */
+ ZSTD_BlockCompressor_f const blockCompressor = ZSTD_selectBlockCompressor(
+ zc->appliedParams.cParams.strategy,
+ zc->appliedParams.useRowMatchFinder,
+ dictMode);
ms->ldmSeqStore = NULL;
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
}
{ const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
} }
+ ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams);
return ZSTDbss_compress;
}
-static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
+static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const SeqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM])
{
- const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
- const seqDef* seqStoreSeqs = seqStore->sequencesStart;
- size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
- size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
- size_t literalsRead = 0;
- size_t lastLLSize;
+ const SeqDef* inSeqs = seqStore->sequencesStart;
+ const size_t nbInSequences = (size_t)(seqStore->sequences - inSeqs);
+ const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart);
- ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
+ ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex;
+ const size_t nbOutSequences = nbInSequences + 1;
+ size_t nbOutLiterals = 0;
+ Repcodes_t repcodes;
size_t i;
- repcodes_t updatedRepcodes;
- assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
- /* Ensure we have enough space for last literals "sequence" */
- assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
- ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
- for (i = 0; i < seqStoreSeqSize; ++i) {
- U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
- outSeqs[i].litLength = seqStoreSeqs[i].litLength;
- outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
+ /* Bounds check that we have enough space for every input sequence
+ * and the block delimiter
+ */
+ assert(seqCollector->seqIndex <= seqCollector->maxSequences);
+ RETURN_ERROR_IF(
+ nbOutSequences > (size_t)(seqCollector->maxSequences - seqCollector->seqIndex),
+ dstSize_tooSmall,
+ "Not enough space to copy sequences");
+
+ ZSTD_memcpy(&repcodes, prevRepcodes, sizeof(repcodes));
+ for (i = 0; i < nbInSequences; ++i) {
+ U32 rawOffset;
+ outSeqs[i].litLength = inSeqs[i].litLength;
+ outSeqs[i].matchLength = inSeqs[i].mlBase + MINMATCH;
outSeqs[i].rep = 0;
+ /* Handle the possible single length >= 64K
+ * There can only be one because we add MINMATCH to every match length,
+ * and blocks are at most 128K.
+ */
if (i == seqStore->longLengthPos) {
- if (seqStore->longLengthID == 1) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
outSeqs[i].litLength += 0x10000;
- } else if (seqStore->longLengthID == 2) {
+ } else if (seqStore->longLengthType == ZSTD_llt_matchLength) {
outSeqs[i].matchLength += 0x10000;
}
}
- if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
- /* Derive the correct offset corresponding to a repcode */
- outSeqs[i].rep = seqStoreSeqs[i].offset;
+ /* Determine the raw offset given the offBase, which may be a repcode. */
+ if (OFFBASE_IS_REPCODE(inSeqs[i].offBase)) {
+ const U32 repcode = OFFBASE_TO_REPCODE(inSeqs[i].offBase);
+ assert(repcode > 0);
+ outSeqs[i].rep = repcode;
if (outSeqs[i].litLength != 0) {
- rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
+ rawOffset = repcodes.rep[repcode - 1];
} else {
- if (outSeqs[i].rep == 3) {
- rawOffset = updatedRepcodes.rep[0] - 1;
+ if (repcode == 3) {
+ assert(repcodes.rep[0] > 1);
+ rawOffset = repcodes.rep[0] - 1;
} else {
- rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
+ rawOffset = repcodes.rep[repcode];
}
}
+ } else {
+ rawOffset = OFFBASE_TO_OFFSET(inSeqs[i].offBase);
}
outSeqs[i].offset = rawOffset;
- /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
- so we provide seqStoreSeqs[i].offset - 1 */
- updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
- seqStoreSeqs[i].offset - 1,
- seqStoreSeqs[i].litLength == 0);
- literalsRead += outSeqs[i].litLength;
+
+ /* Update repcode history for the sequence */
+ ZSTD_updateRep(repcodes.rep,
+ inSeqs[i].offBase,
+ inSeqs[i].litLength == 0);
+
+ nbOutLiterals += outSeqs[i].litLength;
}
/* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
* If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
* for the block boundary, according to the API.
*/
- assert(seqStoreLiteralsSize >= literalsRead);
- lastLLSize = seqStoreLiteralsSize - literalsRead;
- outSeqs[i].litLength = (U32)lastLLSize;
- outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
- seqStoreSeqSize++;
- zc->seqCollector.seqIndex += seqStoreSeqSize;
+ assert(nbInLiterals >= nbOutLiterals);
+ {
+ const size_t lastLLSize = nbInLiterals - nbOutLiterals;
+ outSeqs[nbInSequences].litLength = (U32)lastLLSize;
+ outSeqs[nbInSequences].matchLength = 0;
+ outSeqs[nbInSequences].offset = 0;
+ assert(nbOutSequences == nbInSequences + 1);
+ }
+ seqCollector->seqIndex += nbOutSequences;
+ assert(seqCollector->seqIndex <= seqCollector->maxSequences);
+
+ return 0;
+}
+
+size_t ZSTD_sequenceBound(size_t srcSize) {
+ const size_t maxNbSeq = (srcSize / ZSTD_MINMATCH_MIN) + 1;
+ const size_t maxNbDelims = (srcSize / ZSTD_BLOCKSIZE_MAX_MIN) + 1;
+ return maxNbSeq + maxNbDelims;
}
size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize)
{
const size_t dstCapacity = ZSTD_compressBound(srcSize);
- void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
+ void* dst; /* Make C90 happy. */
SeqCollector seqCollector;
+ {
+ int targetCBlockSize;
+ FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_targetCBlockSize, &targetCBlockSize), "");
+ RETURN_ERROR_IF(targetCBlockSize != 0, parameter_unsupported, "targetCBlockSize != 0");
+ }
+ {
+ int nbWorkers;
+ FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers), "");
+ RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0");
+ }
+ dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
seqCollector.collectSequences = 1;
@@ -2544,8 +3453,12 @@
seqCollector.maxSequences = outSeqsSize;
zc->seqCollector = seqCollector;
- ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
- ZSTD_customFree(dst, ZSTD_defaultCMem);
+ {
+ const size_t ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
+ ZSTD_customFree(dst, ZSTD_defaultCMem);
+ FORWARD_IF_ERROR(ret, "ZSTD_compress2 failed");
+ }
+ assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize));
return zc->seqCollector.seqIndex;
}
@@ -2574,19 +3487,17 @@
const size_t unrollMask = unrollSize - 1;
const size_t prefixLength = length & unrollMask;
size_t i;
- size_t u;
if (length == 1) return 1;
/* Check if prefix is RLE first before using unrolled loop */
if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
return 0;
}
for (i = prefixLength; i != length; i += unrollSize) {
+ size_t u;
for (u = 0; u < unrollSize; u += sizeof(size_t)) {
if (MEM_readST(ip + i + u) != valueST) {
return 0;
- }
- }
- }
+ } } }
return 1;
}
@@ -2594,7 +3505,7 @@
* This is just a heuristic based on the compressibility.
* It may return both false positives and false negatives.
*/
-static int ZSTD_maybeRLE(seqStore_t const* seqStore)
+static int ZSTD_maybeRLE(SeqStore_t const* seqStore)
{
size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
@@ -2602,20 +3513,790 @@
return nbSeqs < 4 && nbLits < 10;
}
-static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
+static void
+ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
+{
+ ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
+ bs->prevCBlock = bs->nextCBlock;
+ bs->nextCBlock = tmp;
+}
+
+/* Writes the block header */
+static void
+writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock)
+{
+ U32 const cBlockHeader = cSize == 1 ?
+ lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
+ lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ DEBUGLOG(5, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
+}
+
+/* ZSTD_buildBlockEntropyStats_literals() :
+ * Builds entropy for the literals.
+ * Stores literals block type (raw, rle, compressed, repeat) and
+ * huffman description table to hufMetadata.
+ * Requires ENTROPY_WORKSPACE_SIZE workspace
+ * @return : size of huffman description table, or an error code
+ */
+static size_t
+ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const int literalsCompressionIsDisabled,
+ void* workspace, size_t wkspSize,
+ int hufFlags)
+{
+ BYTE* const wkspStart = (BYTE*)workspace;
+ BYTE* const wkspEnd = wkspStart + wkspSize;
+ BYTE* const countWkspStart = wkspStart;
+ unsigned* const countWksp = (unsigned*)workspace;
+ const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
+ BYTE* const nodeWksp = countWkspStart + countWkspSize;
+ const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp);
+ unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ unsigned huffLog = LitHufLog;
+ HUF_repeat repeat = prevHuf->repeatMode;
+ DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
+
+ /* Prepare nextEntropy assuming reusing the existing table */
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+ if (literalsCompressionIsDisabled) {
+ DEBUGLOG(5, "set_basic - disabled");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+
+ /* small ? don't even attempt compression (speed opt) */
+#ifndef COMPRESS_LITERALS_SIZE_MIN
+# define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */
+#endif
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ if (srcSize <= minLitSize) {
+ DEBUGLOG(5, "set_basic - too small");
+ hufMetadata->hType = set_basic;
+ return 0;
+ } }
+
+ /* Scan input and build symbol stats */
+ { size_t const largest =
+ HIST_count_wksp (countWksp, &maxSymbolValue,
+ (const BYTE*)src, srcSize,
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
+ if (largest == srcSize) {
+ /* only one literal symbol */
+ DEBUGLOG(5, "set_rle");
+ hufMetadata->hType = set_rle;
+ return 0;
+ }
+ if (largest <= (srcSize >> 7)+4) {
+ /* heuristic: likely not compressible */
+ DEBUGLOG(5, "set_basic - no gain");
+ hufMetadata->hType = set_basic;
+ return 0;
+ } }
+
+ /* Validate the previous Huffman table */
+ if (repeat == HUF_repeat_check
+ && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
+ repeat = HUF_repeat_none;
+ }
+
+ /* Build Huffman Tree */
+ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags);
+ assert(huffLog <= LitHufLog);
+ { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
+ maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
+ huffLog = (U32)maxBits;
+ }
+ { /* Build and write the CTable */
+ size_t const newCSize = HUF_estimateCompressedSize(
+ (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
+ size_t const hSize = HUF_writeCTable_wksp(
+ hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
+ (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ /* Check against repeating the previous CTable */
+ if (repeat != HUF_repeat_none) {
+ size_t const oldCSize = HUF_estimateCompressedSize(
+ (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
+ if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
+ DEBUGLOG(5, "set_repeat - smaller");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_repeat;
+ return 0;
+ } }
+ if (newCSize + hSize >= srcSize) {
+ DEBUGLOG(5, "set_basic - no gains");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
+ hufMetadata->hType = set_compressed;
+ nextHuf->repeatMode = HUF_repeat_check;
+ return hSize;
+ }
+}
+
+
+/* ZSTD_buildDummySequencesStatistics():
+ * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic,
+ * and updates nextEntropy to the appropriate repeatMode.
+ */
+static ZSTD_symbolEncodingTypeStats_t
+ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy)
+{
+ ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0};
+ nextEntropy->litlength_repeatMode = FSE_repeat_none;
+ nextEntropy->offcode_repeatMode = FSE_repeat_none;
+ nextEntropy->matchlength_repeatMode = FSE_repeat_none;
+ return stats;
+}
+
+/* ZSTD_buildBlockEntropyStats_sequences() :
+ * Builds entropy for the sequences.
+ * Stores symbol compression modes and fse table to fseMetadata.
+ * Requires ENTROPY_WORKSPACE_SIZE wksp.
+ * @return : size of fse tables or error code */
+static size_t
+ZSTD_buildBlockEntropyStats_sequences(
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_fseCTables_t* prevEntropy,
+ ZSTD_fseCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize)
+{
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ BYTE* const ostart = fseMetadata->fseTablesBuffer;
+ BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
+ BYTE* op = ostart;
+ unsigned* countWorkspace = (unsigned*)workspace;
+ unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1);
+ size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace);
+ ZSTD_symbolEncodingTypeStats_t stats;
+
+ DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq);
+ stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
+ prevEntropy, nextEntropy, op, oend,
+ strategy, countWorkspace,
+ entropyWorkspace, entropyWorkspaceSize)
+ : ZSTD_buildDummySequencesStatistics(nextEntropy);
+ FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
+ fseMetadata->llType = (SymbolEncodingType_e) stats.LLtype;
+ fseMetadata->ofType = (SymbolEncodingType_e) stats.Offtype;
+ fseMetadata->mlType = (SymbolEncodingType_e) stats.MLtype;
+ fseMetadata->lastCountSize = stats.lastCountSize;
+ return stats.size;
+}
+
+
+/* ZSTD_buildBlockEntropyStats() :
+ * Builds entropy for the block.
+ * Requires workspace size ENTROPY_WORKSPACE_SIZE
+ * @return : 0 on success, or an error code
+ * Note : also employed in superblock
+ */
+size_t ZSTD_buildBlockEntropyStats(
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize)
+{
+ size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart);
+ int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD);
+ int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0;
+
+ entropyMetadata->hufMetadata.hufDesSize =
+ ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,
+ &prevEntropy->huf, &nextEntropy->huf,
+ &entropyMetadata->hufMetadata,
+ ZSTD_literalsCompressionIsDisabled(cctxParams),
+ workspace, wkspSize, hufFlags);
+
+ FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
+ entropyMetadata->fseMetadata.fseTablesSize =
+ ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
+ &prevEntropy->fse, &nextEntropy->fse,
+ cctxParams,
+ &entropyMetadata->fseMetadata,
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed");
+ return 0;
+}
+
+/* Returns the size estimate for the literals section (header + content) of a block */
+static size_t
+ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
+ const ZSTD_hufCTables_t* huf,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB);
+ U32 singleStream = litSize < 256;
+
+ if (hufMetadata->hType == set_basic) return litSize;
+ else if (hufMetadata->hType == set_rle) return 1;
+ else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
+ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
+ if (ZSTD_isError(largest)) return litSize;
+ { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
+ if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
+ if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */
+ return cLitSizeEstimate + literalSectionHeaderSize;
+ } }
+ assert(0); /* impossible */
+ return 0;
+}
+
+/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
+static size_t
+ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type,
+ const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
+ const FSE_CTable* fseCTable,
+ const U8* additionalBits,
+ short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ void* workspace, size_t wkspSize)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ const BYTE* ctp = codeTable;
+ const BYTE* const ctStart = ctp;
+ const BYTE* const ctEnd = ctStart + nbSeq;
+ size_t cSymbolTypeSizeEstimateInBits = 0;
+ unsigned max = maxCode;
+
+ HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ if (type == set_basic) {
+ /* We selected this encoding type, so it must be valid. */
+ assert(max <= defaultMax);
+ (void)defaultMax;
+ cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
+ } else if (type == set_rle) {
+ cSymbolTypeSizeEstimateInBits = 0;
+ } else if (type == set_compressed || type == set_repeat) {
+ cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
+ }
+ if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) {
+ return nbSeq * 10;
+ }
+ while (ctp < ctEnd) {
+ if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
+ else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
+ ctp++;
+ }
+ return cSymbolTypeSizeEstimateInBits >> 3;
+}
+
+/* Returns the size estimate for the sequences section (header + content) of a block */
+static size_t
+ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);
+ size_t cSeqSizeEstimate = 0;
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff,
+ fseTables->offcodeCTable, NULL,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL,
+ fseTables->litlengthCTable, LL_bits,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML,
+ fseTables->matchlengthCTable, ML_bits,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ workspace, wkspSize);
+ if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
+ return cSeqSizeEstimate + sequencesSectionHeaderSize;
+}
+
+/* Returns the size estimate for a given stream of literals, of, ll, ml */
+static size_t
+ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
+ const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize,
+ int writeLitEntropy, int writeSeqEntropy)
+{
+ size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize,
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
+ size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
+ workspace, wkspSize, writeSeqEntropy);
+ return seqSize + literalsSize + ZSTD_blockHeaderSize;
+}
+
+/* Builds entropy statistics and uses them for blocksize estimation.
+ *
+ * @return: estimated compressed size of the seqStore, or a zstd error.
+ */
+static size_t
+ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx* zc)
+{
+ ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
+ DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");
+ FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,
+ &zc->blockState.prevCBlock->entropy,
+ &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ entropyMetadata,
+ zc->tmpWorkspace, zc->tmpWkspSize), "");
+ return ZSTD_estimateBlockSize(
+ seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
+ seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
+ (size_t)(seqStore->sequences - seqStore->sequencesStart),
+ &zc->blockState.nextCBlock->entropy,
+ entropyMetadata,
+ zc->tmpWorkspace, zc->tmpWkspSize,
+ (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);
+}
+
+/* Returns literals bytes represented in a seqStore */
+static size_t ZSTD_countSeqStoreLiteralsBytes(const SeqStore_t* const seqStore)
+{
+ size_t literalsBytes = 0;
+ size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
+ size_t i;
+ for (i = 0; i < nbSeqs; ++i) {
+ SeqDef const seq = seqStore->sequencesStart[i];
+ literalsBytes += seq.litLength;
+ if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
+ literalsBytes += 0x10000;
+ } }
+ return literalsBytes;
+}
+
+/* Returns match bytes represented in a seqStore */
+static size_t ZSTD_countSeqStoreMatchBytes(const SeqStore_t* const seqStore)
{
- ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
- zc->blockState.prevCBlock = zc->blockState.nextCBlock;
- zc->blockState.nextCBlock = tmp;
+ size_t matchBytes = 0;
+ size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
+ size_t i;
+ for (i = 0; i < nbSeqs; ++i) {
+ SeqDef seq = seqStore->sequencesStart[i];
+ matchBytes += seq.mlBase + MINMATCH;
+ if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
+ matchBytes += 0x10000;
+ } }
+ return matchBytes;
+}
+
+/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
+ * Stores the result in resultSeqStore.
+ */
+static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore,
+ const SeqStore_t* originalSeqStore,
+ size_t startIdx, size_t endIdx)
+{
+ *resultSeqStore = *originalSeqStore;
+ if (startIdx > 0) {
+ resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
+ resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ }
+
+ /* Move longLengthPos into the correct position if necessary */
+ if (originalSeqStore->longLengthType != ZSTD_llt_none) {
+ if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) {
+ resultSeqStore->longLengthType = ZSTD_llt_none;
+ } else {
+ resultSeqStore->longLengthPos -= (U32)startIdx;
+ }
+ }
+ resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
+ resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
+ if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
+ /* This accounts for possible last literals if the derived chunk reaches the end of the block */
+ assert(resultSeqStore->lit == originalSeqStore->lit);
+ } else {
+ size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ resultSeqStore->lit = resultSeqStore->litStart + literalsBytes;
+ }
+ resultSeqStore->llCode += startIdx;
+ resultSeqStore->mlCode += startIdx;
+ resultSeqStore->ofCode += startIdx;
}
-static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize, U32 frame)
+/*
+ * Returns the raw offset represented by the combination of offBase, ll0, and repcode history.
+ * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq().
+ */
+static U32
+ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0)
+{
+ U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */
+ assert(OFFBASE_IS_REPCODE(offBase));
+ if (adjustedRepCode == ZSTD_REP_NUM) {
+ assert(ll0);
+ /* litlength == 0 and offCode == 2 implies selection of first repcode - 1
+ * This is only valid if it results in a valid offset value, aka > 0.
+ * Note : it may happen that `rep[0]==1` in exceptional circumstances.
+ * In which case this function will return 0, which is an invalid offset.
+ * It's not an issue though, since this value will be
+ * compared and discarded within ZSTD_seqStore_resolveOffCodes().
+ */
+ return rep[0] - 1;
+ }
+ return rep[adjustedRepCode];
+}
+
+/*
+ * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
+ * due to emission of RLE/raw blocks that disturb the offset history,
+ * and replaces any repcodes within the seqStore that may be invalid.
+ *
+ * dRepcodes are updated as would be on the decompression side.
+ * cRepcodes are updated exactly in accordance with the seqStore.
+ *
+ * Note : this function assumes seq->offBase respects the following numbering scheme :
+ * 0 : invalid
+ * 1-3 : repcode 1-3
+ * 4+ : real_offset+3
+ */
+static void
+ZSTD_seqStore_resolveOffCodes(Repcodes_t* const dRepcodes, Repcodes_t* const cRepcodes,
+ const SeqStore_t* const seqStore, U32 const nbSeq)
+{
+ U32 idx = 0;
+ U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq;
+ for (; idx < nbSeq; ++idx) {
+ SeqDef* const seq = seqStore->sequencesStart + idx;
+ U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx);
+ U32 const offBase = seq->offBase;
+ assert(offBase > 0);
+ if (OFFBASE_IS_REPCODE(offBase)) {
+ U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0);
+ U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0);
+ /* Adjust simulated decompression repcode history if we come across a mismatch. Replace
+ * the repcode with the offset it actually references, determined by the compression
+ * repcode history.
+ */
+ if (dRawOffset != cRawOffset) {
+ seq->offBase = OFFSET_TO_OFFBASE(cRawOffset);
+ }
+ }
+ /* Compression repcode history is always updated with values directly from the unmodified seqStore.
+ * Decompression repcode history may use modified seq->offset value taken from compression repcode history.
+ */
+ ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0);
+ ZSTD_updateRep(cRepcodes->rep, offBase, ll0);
+ }
+}
+
+/* ZSTD_compressSeqStore_singleBlock():
+ * Compresses a seqStore into a block with a block header, into the buffer dst.
+ *
+ * Returns the total size of that block (including header) or a ZSTD error code.
+ */
+static size_t
+ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc,
+ const SeqStore_t* const seqStore,
+ Repcodes_t* const dRep, Repcodes_t* const cRep,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastBlock, U32 isPartition)
+{
+ const U32 rleMaxLength = 25;
+ BYTE* op = (BYTE*)dst;
+ const BYTE* ip = (const BYTE*)src;
+ size_t cSize;
+ size_t cSeqsSize;
+
+ /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
+ Repcodes_t const dRepOriginal = *dRep;
+ DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
+ if (isPartition)
+ ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit");
+ cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore,
+ &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
+ srcSize,
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */,
+ zc->bmi2);
+ FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
+
+ if (!zc->isFirstBlock &&
+ cSeqsSize < rleMaxLength &&
+ ZSTD_isRLE((BYTE const*)src, srcSize)) {
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ cSeqsSize = 1;
+ }
+
+ /* Sequence collection not supported when block splitting */
+ if (zc->seqCollector.collectSequences) {
+ FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep), "copyBlockSequences failed");
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return 0;
+ }
+
+ if (cSeqsSize == 0) {
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "Nocompress block failed");
+ DEBUGLOG(5, "Writing out nocompress block, size: %zu", cSize);
+ *dRep = dRepOriginal; /* reset simulated decompression repcode history */
+ } else if (cSeqsSize == 1) {
+ cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "RLE compress block failed");
+ DEBUGLOG(5, "Writing out RLE block, size: %zu", cSize);
+ *dRep = dRepOriginal; /* reset simulated decompression repcode history */
+ } else {
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
+ cSize = ZSTD_blockHeaderSize + cSeqsSize;
+ DEBUGLOG(5, "Writing out compressed block, size: %zu", cSize);
+ }
+
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+/* Struct to keep track of where we are in our recursive calls. */
+typedef struct {
+ U32* splitLocations; /* Array of split indices */
+ size_t idx; /* The current index within splitLocations being worked on */
+} seqStoreSplits;
+
+#define MIN_SEQUENCES_BLOCK_SPLITTING 300
+
+/* Helper function to perform the recursive search for block splits.
+ * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
+ * If advantageous to split, then we recurse down the two sub-blocks.
+ * If not, or if an error occurred in estimation, then we do not recurse.
+ *
+ * Note: The recursion depth is capped by a heuristic minimum number of sequences,
+ * defined by MIN_SEQUENCES_BLOCK_SPLITTING.
+ * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).
+ * In practice, recursion depth usually doesn't go beyond 4.
+ *
+ * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS.
+ * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
+ * maximum of 128 KB, this value is actually impossible to reach.
+ */
+static void
+ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
+ ZSTD_CCtx* zc, const SeqStore_t* origSeqStore)
+{
+ SeqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
+ SeqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
+ SeqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
+ size_t estimatedOriginalSize;
+ size_t estimatedFirstHalfSize;
+ size_t estimatedSecondHalfSize;
+ size_t midIdx = (startIdx + endIdx)/2;
+
+ DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);
+ assert(endIdx >= startIdx);
+ if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) {
+ DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx);
+ return;
+ }
+ ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx);
+ ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx);
+ ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx);
+ estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc);
+ estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc);
+ estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc);
+ DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
+ estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);
+ if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {
+ return;
+ }
+ if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) {
+ DEBUGLOG(5, "split decided at seqNb:%zu", midIdx);
+ ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
+ splits->splitLocations[splits->idx] = (U32)midIdx;
+ splits->idx++;
+ ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore);
+ }
+}
+
+/* Base recursive function.
+ * Populates a table with intra-block partition indices that can improve compression ratio.
+ *
+ * @return: number of splits made (which equals the size of the partition table - 1).
+ */
+static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)
+{
+ seqStoreSplits splits;
+ splits.splitLocations = partitions;
+ splits.idx = 0;
+ if (nbSeq <= 4) {
+ DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq);
+ /* Refuse to try and split anything with less than 4 sequences */
+ return 0;
+ }
+ ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore);
+ splits.splitLocations[splits.idx] = nbSeq;
+ DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1);
+ return splits.idx;
+}
+
+/* ZSTD_compressBlock_splitBlock():
+ * Attempts to split a given block into multiple blocks to improve compression ratio.
+ *
+ * Returns combined size of all blocks (which includes headers), or a ZSTD error code.
+ */
+static size_t
+ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t blockSize,
+ U32 lastBlock, U32 nbSeq)
+{
+ size_t cSize = 0;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ size_t i = 0;
+ size_t srcBytesTotal = 0;
+ U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
+ SeqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
+ SeqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore;
+ size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
+
+ /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
+ * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
+ * separate repcode histories that simulate repcode history on compression and decompression side,
+ * and use the histories to determine whether we must replace a particular repcode with its raw offset.
+ *
+ * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed
+ * or RLE. This allows us to retrieve the offset value that an invalid repcode references within
+ * a nocompress/RLE block.
+ * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use
+ * the replacement offset value rather than the original repcode to update the repcode history.
+ * dRep also will be the final repcode history sent to the next block.
+ *
+ * See ZSTD_seqStore_resolveOffCodes() for more details.
+ */
+ Repcodes_t dRep;
+ Repcodes_t cRep;
+ ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+ ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+ ZSTD_memset(nextSeqStore, 0, sizeof(SeqStore_t));
+
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
+ (unsigned)zc->blockState.matchState.nextToUpdate);
+
+ if (numSplits == 0) {
+ size_t cSizeSingleBlock =
+ ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
+ &dRep, &cRep,
+ op, dstCapacity,
+ ip, blockSize,
+ lastBlock, 0 /* isPartition */);
+ FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
+ assert(zc->blockSizeMax <= ZSTD_BLOCKSIZE_MAX);
+ assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize);
+ return cSizeSingleBlock;
+ }
+
+ ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]);
+ for (i = 0; i <= numSplits; ++i) {
+ size_t cSizeChunk;
+ U32 const lastPartition = (i == numSplits);
+ U32 lastBlockEntireSrc = 0;
+
+ size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);
+ srcBytesTotal += srcBytes;
+ if (lastPartition) {
+ /* This is the final partition, need to account for possible last literals */
+ srcBytes += blockSize - srcBytesTotal;
+ lastBlockEntireSrc = lastBlock;
+ } else {
+ ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]);
+ }
+
+ cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore,
+ &dRep, &cRep,
+ op, dstCapacity,
+ ip, srcBytes,
+ lastBlockEntireSrc, 1 /* isPartition */);
+ DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size",
+ ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);
+ FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
+
+ ip += srcBytes;
+ op += cSizeChunk;
+ dstCapacity -= cSizeChunk;
+ cSize += cSizeChunk;
+ *currSeqStore = *nextSeqStore;
+ assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize);
+ }
+ /* cRep and dRep may have diverged during the compression.
+ * If so, we use the dRep repcodes for the next block.
+ */
+ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(Repcodes_t));
+ return cSize;
+}
+
+static size_t
+ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, U32 lastBlock)
+{
+ U32 nbSeq;
+ size_t cSize;
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock");
+ assert(zc->appliedParams.postBlockSplitter == ZSTD_ps_enable);
+
+ { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+ if (bss == ZSTDbss_noCompress) {
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+ RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block");
+ cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock: Nocompress block");
+ return cSize;
+ }
+ nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
+ }
+
+ cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq);
+ FORWARD_IF_ERROR(cSize, "Splitting blocks failed!");
+ return cSize;
+}
+
+static size_t
+ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, U32 frame)
{
- /* This the upper bound for the length of an rle block.
- * This isn't the actual upper bound. Finding the real threshold
- * needs further investigation.
+ /* This is an estimated upper bound for the length of an rle block.
+ * This isn't the actual upper bound.
+ * Finding the real threshold needs further investigation.
*/
const U32 rleMaxLength = 25;
size_t cSize;
@@ -2627,30 +4308,28 @@
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
- if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
+ if (bss == ZSTDbss_noCompress) {
+ RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block");
+ cSize = 0;
+ goto out;
+ }
}
if (zc->seqCollector.collectSequences) {
- ZSTD_copyBlockSequences(zc);
- ZSTD_confirmRepcodesAndEntropyTables(zc);
+ FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep), "copyBlockSequences failed");
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return 0;
}
/* encode sequences and literals */
- cSize = ZSTD_entropyCompressSequences(&zc->seqStore,
+ cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore,
&zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
dst, dstCapacity,
srcSize,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */,
zc->bmi2);
- if (zc->seqCollector.collectSequences) {
- ZSTD_copyBlockSequences(zc);
- return 0;
- }
-
-
if (frame &&
/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
@@ -2666,7 +4345,7 @@
out:
if (!ZSTD_isError(cSize) && cSize > 1) {
- ZSTD_confirmRepcodesAndEntropyTables(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
}
/* We check that dictionaries have offset codes available for the first
* block. After the first block, the offcode table might not have large
@@ -2713,18 +4392,19 @@
* * cSize >= blockBound(srcSize): We have expanded the block too much so
* emit an uncompressed block.
*/
- {
- size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
+ { size_t const cSize =
+ ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
if (cSize != ERROR(dstSize_tooSmall)) {
- size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
+ size_t const maxCSize =
+ srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
- ZSTD_confirmRepcodesAndEntropyTables(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return cSize;
}
}
}
- }
+ } /* if (bss == ZSTDbss_compress)*/
DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
/* Superblock compression failed, attempt to emit a single no compress block.
@@ -2753,15 +4433,15 @@
return cSize;
}
-static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
void const* ip,
void const* iend)
{
- if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
- U32 const maxDist = (U32)1 << params->cParams.windowLog;
- U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
+ U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
+ U32 const maxDist = (U32)1 << params->cParams.windowLog;
+ if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) {
U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
@@ -2777,43 +4457,87 @@
}
}
+#include "zstd_preSplit.h"
+
+static size_t ZSTD_optimalBlockSize(ZSTD_CCtx* cctx, const void* src, size_t srcSize, size_t blockSizeMax, int splitLevel, ZSTD_strategy strat, S64 savings)
+{
+ /* split level based on compression strategy, from `fast` to `btultra2` */
+ static const int splitLevels[] = { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 };
+ /* note: conservatively only split full blocks (128 KB) currently.
+ * While it's possible to go lower, let's keep it simple for a first implementation.
+ * Besides, benefits of splitting are reduced when blocks are already small.
+ */
+ if (srcSize < 128 KB || blockSizeMax < 128 KB)
+ return MIN(srcSize, blockSizeMax);
+ /* do not split incompressible data though:
+ * require verified savings to allow pre-splitting.
+ * Note: as a consequence, the first full block is not split.
+ */
+ if (savings < 3) {
+ DEBUGLOG(6, "don't attempt splitting: savings (%i) too low", (int)savings);
+ return 128 KB;
+ }
+ /* apply @splitLevel, or use default value (which depends on @strat).
+ * note that splitting heuristic is still conditioned by @savings >= 3,
+ * so the first block will not reach this code path */
+ if (splitLevel == 1) return 128 KB;
+ if (splitLevel == 0) {
+ assert(ZSTD_fast <= strat && strat <= ZSTD_btultra2);
+ splitLevel = splitLevels[strat];
+ } else {
+ assert(2 <= splitLevel && splitLevel <= 6);
+ splitLevel -= 2;
+ }
+ return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize);
+}
+
/*! ZSTD_compress_frameChunk() :
* Compress a chunk of data into one or multiple blocks.
* All blocks will be terminated, all input will be consumed.
* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
* Frame is supposed already started (header already produced)
-* @return : compressed size, or an error code
+* @return : compressed size, or an error code
*/
-static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
+static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastFrameChunk)
{
- size_t blockSize = cctx->blockSize;
+ size_t blockSizeMax = cctx->blockSizeMax;
size_t remaining = srcSize;
const BYTE* ip = (const BYTE*)src;
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
+ S64 savings = (S64)cctx->consumedSrcSize - (S64)cctx->producedCSize;
assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
- DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
+ DEBUGLOG(5, "ZSTD_compress_frameChunk (srcSize=%u, blockSizeMax=%u)", (unsigned)srcSize, (unsigned)blockSizeMax);
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
xxh64_update(&cctx->xxhState, src, srcSize);
while (remaining) {
- ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
- U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+ ZSTD_MatchState_t* const ms = &cctx->blockState.matchState;
+ size_t const blockSize = ZSTD_optimalBlockSize(cctx,
+ ip, remaining,
+ blockSizeMax,
+ cctx->appliedParams.preBlockSplitter_level,
+ cctx->appliedParams.cParams.strategy,
+ savings);
+ U32 const lastBlock = lastFrameChunk & (blockSize == remaining);
+ assert(blockSize <= remaining);
- RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
+ /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
+ * additional 1. We need to revisit and change this logic to be more consistent */
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1,
dstSize_tooSmall,
"not enough space to store compressed block");
- if (remaining < blockSize) blockSize = remaining;
ZSTD_overflowCorrectIfNeeded(
ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
+ ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
/* Ensure hash/chain table insertion resumes no sooner than lowlimit */
if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
@@ -2824,6 +4548,10 @@
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
assert(cSize > 0);
assert(cSize <= blockSize + ZSTD_blockHeaderSize);
+ } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) {
+ cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed");
+ assert(cSize > 0 || cctx->seqCollector.collectSequences == 1);
} else {
cSize = ZSTD_compressBlock_internal(cctx,
op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
@@ -2840,8 +4568,23 @@
MEM_writeLE24(op, cBlockHeader);
cSize += ZSTD_blockHeaderSize;
}
- }
+ } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/
+ /* @savings is employed to ensure that splitting doesn't worsen expansion of incompressible data.
+ * Without splitting, the maximum expansion is 3 bytes per full block.
+ * An adversarial input could attempt to fudge the split detector,
+ * and make it split incompressible data, resulting in more block headers.
+ * Note that, since ZSTD_COMPRESSBOUND() assumes a worst case scenario of 1KB per block,
+ * and the splitter never creates blocks that small (current lower limit is 8 KB),
+ * there is already no risk to expand beyond ZSTD_COMPRESSBOUND() limit.
+ * But if the goal is to not expand by more than 3-bytes per 128 KB full block,
+ * then yes, it becomes possible to make the block splitter oversplit incompressible data.
+ * Using @savings, we enforce an even more conservative condition,
+ * requiring the presence of enough savings (at least 3 bytes) to authorize splitting,
+ * otherwise only full blocks are used.
+ * But being conservative is fine,
+ * since splitting barely compressible blocks is not fruitful anyway */
+ savings += (S64)blockSize - (S64)cSize;
ip += blockSize;
assert(remaining >= blockSize);
@@ -2860,8 +4603,10 @@
static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
- const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
-{ BYTE* const op = (BYTE*)dst;
+ const ZSTD_CCtx_params* params,
+ U64 pledgedSrcSize, U32 dictID)
+{
+ BYTE* const op = (BYTE*)dst;
U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
U32 const checksumFlag = params->fParams.checksumFlag>0;
@@ -2942,19 +4687,15 @@
}
}
-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
+void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
{
- RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
- "wrong cctx stage");
- RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
- parameter_unsupported,
- "incompatible with ldm");
+ assert(cctx->stage == ZSTDcs_init);
+ assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_ps_enable);
cctx->externSeqStore.seq = seq;
cctx->externSeqStore.size = nbSeq;
cctx->externSeqStore.capacity = nbSeq;
cctx->externSeqStore.pos = 0;
cctx->externSeqStore.posInSequence = 0;
- return 0;
}
@@ -2963,7 +4704,7 @@
const void* src, size_t srcSize,
U32 frame, U32 lastFrameChunk)
{
- ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ ZSTD_MatchState_t* const ms = &cctx->blockState.matchState;
size_t fhSize = 0;
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
@@ -2983,11 +4724,12 @@
if (!srcSize) return fhSize; /* do not generate an empty block if no input */
- if (!ZSTD_window_update(&ms->window, src, srcSize)) {
+ if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) {
+ ms->forceNonContiguous = 0;
ms->nextToUpdate = ms->window.dictLimit;
}
- if (cctx->appliedParams.ldmParams.enableLdm) {
- ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
+ if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
+ ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
}
if (!frame) {
@@ -2997,7 +4739,7 @@
src, (BYTE const*)src + srcSize);
}
- DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
+ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSizeMax);
{ size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
@@ -3018,102 +4760,191 @@
}
}
-size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
+size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
}
+/* NOTE: Must just wrap ZSTD_compressContinue_public() */
+size_t ZSTD_compressContinue(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize);
+}
-size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+static size_t ZSTD_getBlockSize_deprecated(const ZSTD_CCtx* cctx)
{
ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
assert(!ZSTD_checkCParams(cParams));
- return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
+ return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog);
+}
+
+/* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */
+size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+{
+ return ZSTD_getBlockSize_deprecated(cctx);
}
-size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */
+size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
- { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
+ { size_t const blockSizeMax = ZSTD_getBlockSize_deprecated(cctx);
RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
}
+/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */
+size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize);
+}
+
/*! ZSTD_loadDictionaryContent() :
* @return : 0, or an error code
*/
-static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
- ldmState_t* ls,
- ZSTD_cwksp* ws,
- ZSTD_CCtx_params const* params,
- const void* src, size_t srcSize,
- ZSTD_dictTableLoadMethod_e dtlm)
+static size_t
+ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms,
+ ldmState_t* ls,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ const void* src, size_t srcSize,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp)
{
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
+ int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL;
+
+ /* Assert that the ms params match the params we're being given */
+ ZSTD_assertEqualCParams(params->cParams, ms->cParams);
- ZSTD_window_update(&ms->window, src, srcSize);
+ { /* Ensure large dictionaries can't cause index overflow */
+
+ /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.
+ * Dictionaries right at the edge will immediately trigger overflow
+ * correction, but I don't want to insert extra constraints here.
+ */
+ U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX;
+
+ int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(¶ms->cParams);
+ if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) {
+ /* Some dictionary matchfinders in zstd use "short cache",
+ * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each
+ * CDict hashtable entry as a tag rather than as part of an index.
+ * When short cache is used, we need to truncate the dictionary
+ * so that its indices don't overlap with the tag. */
+ U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX;
+ maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize);
+ assert(!loadLdmDict);
+ }
+
+ /* If the dictionary is too large, only load the suffix of the dictionary. */
+ if (srcSize > maxDictSize) {
+ ip = iend - maxDictSize;
+ src = ip;
+ srcSize = maxDictSize;
+ }
+ }
+
+ if (srcSize > ZSTD_CHUNKSIZE_MAX) {
+ /* We must have cleared our windows when our source is this large. */
+ assert(ZSTD_window_isEmpty(ms->window));
+ if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window));
+ }
+ ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
+
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent: useRowMatchFinder=%d", (int)params->useRowMatchFinder);
+
+ if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent: Trigger loadLdmDict");
+ ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
+ ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
+ ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams);
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent: ZSTD_ldm_fillHashTable completes");
+ }
+
+ /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */
+ { U32 maxDictSize = 1U << MIN(MAX(params->cParams.hashLog + 3, params->cParams.chainLog + 1), 31);
+ if (srcSize > maxDictSize) {
+ ip = iend - maxDictSize;
+ src = ip;
+ srcSize = maxDictSize;
+ }
+ }
+
+ ms->nextToUpdate = (U32)(ip - ms->window.base);
ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
-
- if (params->ldmParams.enableLdm && ls != NULL) {
- ZSTD_window_update(&ls->window, src, srcSize);
- ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
- }
-
- /* Assert that we the ms params match the params we're being given */
- ZSTD_assertEqualCParams(params->cParams, ms->cParams);
+ ms->forceNonContiguous = params->deterministicRefPrefix;
if (srcSize <= HASH_READ_SIZE) return 0;
- while (iend - ip > HASH_READ_SIZE) {
- size_t const remaining = (size_t)(iend - ip);
- size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
- const BYTE* const ichunk = ip + chunk;
-
- ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
-
- if (params->ldmParams.enableLdm && ls != NULL)
- ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, ¶ms->ldmParams);
+ ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend);
- switch(params->cParams.strategy)
- {
- case ZSTD_fast:
- ZSTD_fillHashTable(ms, ichunk, dtlm);
- break;
- case ZSTD_dfast:
- ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
- break;
+ switch(params->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, iend, dtlm, tfp);
+ break;
+ case ZSTD_dfast:
+#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+ ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp);
+#else
+ assert(0); /* shouldn't be called: cparams should've been adjusted. */
+#endif
+ break;
- case ZSTD_greedy:
- case ZSTD_lazy:
- case ZSTD_lazy2:
- if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
- assert(chunk == remaining); /* must load everything in one go */
- ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
- } else if (chunk >= HASH_READ_SIZE) {
- ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR)
+ assert(srcSize >= HASH_READ_SIZE);
+ if (ms->dedicatedDictSearch) {
+ assert(ms->chainTable != NULL);
+ ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE);
+ } else {
+ assert(params->useRowMatchFinder != ZSTD_ps_auto);
+ if (params->useRowMatchFinder == ZSTD_ps_enable) {
+ size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog);
+ ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ ZSTD_row_update(ms, iend-HASH_READ_SIZE);
+ DEBUGLOG(4, "Using row-based hash table for lazy dict");
+ } else {
+ ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
+ DEBUGLOG(4, "Using chain-based hash table for lazy dict");
}
- break;
+ }
+#else
+ assert(0); /* shouldn't be called: cparams should've been adjusted. */
+#endif
+ break;
- case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
- case ZSTD_btopt:
- case ZSTD_btultra:
- case ZSTD_btultra2:
- if (chunk >= HASH_READ_SIZE)
- ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
- break;
+ case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
+ assert(srcSize >= HASH_READ_SIZE);
+ DEBUGLOG(4, "Fill %u bytes into the Binary Tree", (unsigned)srcSize);
+ ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
+#else
+ assert(0); /* shouldn't be called: cparams should've been adjusted. */
+#endif
+ break;
- default:
- assert(0); /* not possible : not a valid strategy id */
- }
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
- ip = ichunk;
- }
-
ms->nextToUpdate = (U32)(iend - ms->window.base);
return 0;
}
@@ -3150,20 +4981,19 @@
{ unsigned maxSymbolValue = 255;
unsigned hasZeroWeights = 1;
size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
- dictEnd-dictPtr, &hasZeroWeights);
+ (size_t)(dictEnd-dictPtr), &hasZeroWeights);
/* We only set the loaded table as valid if it contains all non-zero
* weights. Otherwise, we set it to check */
- if (!hasZeroWeights)
+ if (!hasZeroWeights && maxSymbolValue == 255)
bs->entropy.huf.repeatMode = HUF_repeat_valid;
RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
- RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
dictPtr += hufHeaderSize;
}
{ unsigned offcodeLog;
- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
/* fill all offset symbols to avoid garbage at end of table */
@@ -3178,7 +5008,7 @@
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
@@ -3192,7 +5022,7 @@
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
@@ -3226,7 +5056,7 @@
RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
} } }
- return dictPtr - (const BYTE*)dict;
+ return (size_t)(dictPtr - (const BYTE*)dict);
}
/* Dictionary format :
@@ -3239,18 +5069,18 @@
* dictSize supposed >= 8
*/
static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
const void* dict, size_t dictSize,
ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp,
void* workspace)
{
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
size_t dictID;
size_t eSize;
-
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= 8);
assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
@@ -3263,7 +5093,7 @@
{
size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
- ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
+ ms, NULL, ws, params, dictPtr, dictContentSize, dtlm, tfp), "");
}
return dictID;
}
@@ -3272,13 +5102,14 @@
* @return : dictID, or an error code */
static size_t
ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
ldmState_t* ls,
ZSTD_cwksp* ws,
const ZSTD_CCtx_params* params,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp,
void* workspace)
{
DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
@@ -3291,13 +5122,13 @@
/* dict restricted modes */
if (dictContentType == ZSTD_dct_rawContent)
- return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
+ return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm, tfp);
if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
if (dictContentType == ZSTD_dct_auto) {
DEBUGLOG(4, "raw content dictionary detected");
return ZSTD_loadDictionaryContent(
- ms, ls, ws, params, dict, dictSize, dtlm);
+ ms, ls, ws, params, dict, dictSize, dtlm, tfp);
}
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
assert(0); /* impossible */
@@ -3305,13 +5136,14 @@
/* dict as full zstd dictionary */
return ZSTD_loadZstdDictionary(
- bs, ms, ws, params, dict, dictSize, dtlm, workspace);
+ bs, ms, ws, params, dict, dictSize, dtlm, tfp, workspace);
}
#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
/*! ZSTD_compressBegin_internal() :
+ * Assumption : either @dict OR @cdict (or none) is non-NULL, never both
* @return : 0, or an error code */
static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
@@ -3321,6 +5153,7 @@
const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
+ size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize;
DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
/* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
@@ -3335,22 +5168,23 @@
return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
}
- FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
+ FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ dictContentSize,
ZSTDcrp_makeClean, zbuff) , "");
{ size_t const dictID = cdict ?
ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
cdict->dictContentSize, cdict->dictContentType, dtlm,
- cctx->entropyWorkspace)
+ ZSTD_tfp_forCCtx, cctx->tmpWorkspace)
: ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
- dictContentType, dtlm, cctx->entropyWorkspace);
+ dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->tmpWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= UINT_MAX);
cctx->dictID = (U32)dictID;
- cctx->dictContentSize = cdict ? cdict->dictContentSize : dictSize;
+ cctx->dictContentSize = dictContentSize;
}
return 0;
}
@@ -3387,11 +5221,11 @@
&cctxParams, pledgedSrcSize);
}
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+static size_t
+ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_CCtx_params cctxParams;
- {
- ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
+ { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
}
DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
@@ -3399,9 +5233,15 @@
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
}
+size_t
+ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+{
+ return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel);
+}
+
size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
{
- return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
+ return ZSTD_compressBegin_usingDict_deprecated(cctx, NULL, 0, compressionLevel);
}
@@ -3412,14 +5252,13 @@
{
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
- size_t fhSize = 0;
DEBUGLOG(4, "ZSTD_writeEpilogue");
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
/* special case : empty frame */
if (cctx->stage == ZSTDcs_init) {
- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
+ size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
dstCapacity -= fhSize;
op += fhSize;
@@ -3429,8 +5268,9 @@
if (cctx->stage != ZSTDcs_ending) {
/* write one last empty block, make it the "last" block */
U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
- RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
- MEM_writeLE32(op, cBlockHeader24);
+ ZSTD_STATIC_ASSERT(ZSTD_BLOCKHEADERSIZE == 3);
+ RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "no room for epilogue");
+ MEM_writeLE24(op, cBlockHeader24);
op += ZSTD_blockHeaderSize;
dstCapacity -= ZSTD_blockHeaderSize;
}
@@ -3444,7 +5284,7 @@
}
cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
- return op-ostart;
+ return (size_t)(op-ostart);
}
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
@@ -3453,9 +5293,9 @@
(void)extraCSize;
}
-size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
+size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
{
size_t endResult;
size_t const cSize = ZSTD_compressContinue_internal(cctx,
@@ -3479,21 +5319,28 @@
return cSize + endResult;
}
+/* NOTE: Must just wrap ZSTD_compressEnd_public() */
+size_t ZSTD_compressEnd(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);
+}
+
size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_parameters params)
{
- ZSTD_CCtx_params cctxParams;
DEBUGLOG(4, "ZSTD_compress_advanced");
FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
- ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, ZSTD_NO_CLEVEL);
+ ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, ZSTD_NO_CLEVEL);
return ZSTD_compress_advanced_internal(cctx,
dst, dstCapacity,
src, srcSize,
dict, dictSize,
- &cctxParams);
+ &cctx->simpleApiParams);
}
/* Internal */
@@ -3508,7 +5355,7 @@
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) , "");
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+ return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);
}
size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
@@ -3517,14 +5364,13 @@
const void* dict, size_t dictSize,
int compressionLevel)
{
- ZSTD_CCtx_params cctxParams;
{
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
assert(params.fParams.contentSizeFlag == 1);
- ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
+ ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
}
DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
- return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
+ return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams);
}
size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
@@ -3561,7 +5407,10 @@
DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
- + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+ /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small
+ * in case we are using DDS with row-hash. */
+ + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams),
+ /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
}
@@ -3592,9 +5441,6 @@
assert(!ZSTD_checkCParams(params.cParams));
cdict->matchState.cParams = params.cParams;
cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
- if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) {
- cdict->matchState.dedicatedDictSearch = 0;
- }
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
cdict->dictContent = dictBuffer;
} else {
@@ -3615,6 +5461,7 @@
&cdict->matchState,
&cdict->workspace,
¶ms.cParams,
+ params.useRowMatchFinder,
ZSTDcrp_makeClean,
ZSTDirp_reset,
ZSTD_resetTarget_CDict), "");
@@ -3626,7 +5473,7 @@
{ size_t const dictID = ZSTD_compress_insertDictionary(
&cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
¶ms, cdict->dictContent, cdict->dictContentSize,
- dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
+ dictContentType, ZSTD_dtlm_full, ZSTD_tfp_forCDict, cdict->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= (size_t)(U32)-1);
cdict->dictID = (U32)dictID;
@@ -3636,16 +5483,21 @@
return 0;
}
-static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
+static ZSTD_CDict*
+ZSTD_createCDict_advanced_internal(size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_compressionParameters cParams,
+ ZSTD_ParamSwitch_e useRowMatchFinder,
+ int enableDedicatedDictSearch,
+ ZSTD_customMem customMem)
{
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ DEBUGLOG(3, "ZSTD_createCDict_advanced_internal (dictSize=%u)", (unsigned)dictSize);
{ size_t const workspaceSize =
ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
- ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
+ ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) +
(dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
@@ -3664,7 +5516,7 @@
ZSTD_cwksp_move(&cdict->workspace, &ws);
cdict->customMem = customMem;
cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
-
+ cdict->useRowMatchFinder = useRowMatchFinder;
return cdict;
}
}
@@ -3677,6 +5529,7 @@
{
ZSTD_CCtx_params cctxParams;
ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
+ DEBUGLOG(3, "ZSTD_createCDict_advanced, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType);
ZSTD_CCtxParams_init(&cctxParams, 0);
cctxParams.cParams = cParams;
cctxParams.customMem = customMem;
@@ -3686,7 +5539,7 @@
&cctxParams, customMem);
}
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
+ZSTD_CDict* ZSTD_createCDict_advanced2(
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
@@ -3697,7 +5550,7 @@
ZSTD_compressionParameters cParams;
ZSTD_CDict* cdict;
- DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType);
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
if (cctxParams.enableDedicatedDictSearch) {
@@ -3716,13 +5569,16 @@
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
}
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2: DedicatedDictSearch=%u", cctxParams.enableDedicatedDictSearch);
cctxParams.cParams = cParams;
+ cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
cdict = ZSTD_createCDict_advanced_internal(dictSize,
dictLoadMethod, cctxParams.cParams,
+ cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
customMem);
- if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ if (!cdict || ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
dictLoadMethod, dictContentType,
cctxParams) )) {
@@ -3776,7 +5632,7 @@
* workspaceSize: Use ZSTD_estimateCDictSize()
* to determine how large workspace must be.
* cParams : use ZSTD_getCParams() to transform a compression level
- * into its relevants cParams.
+ * into its relevant cParams.
* @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
* Note : there is no corresponding "free" function.
* Since workspace was allocated externally, it must be freed externally.
@@ -3788,7 +5644,9 @@
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams)
{
- size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
+ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
+ /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
+ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
@@ -3797,6 +5655,7 @@
ZSTD_CDict* cdict;
ZSTD_CCtx_params params;
+ DEBUGLOG(4, "ZSTD_initStaticCDict (dictSize==%u)", (unsigned)dictSize);
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
{
@@ -3807,12 +5666,13 @@
ZSTD_cwksp_move(&cdict->workspace, &ws);
}
- DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
- (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
ZSTD_CCtxParams_init(¶ms, 0);
params.cParams = cParams;
+ params.useRowMatchFinder = useRowMatchFinder;
+ cdict->useRowMatchFinder = useRowMatchFinder;
+ cdict->compressionLevel = ZSTD_NO_CLEVEL;
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
@@ -3839,15 +5699,15 @@
return cdict->dictID;
}
-
-/* ZSTD_compressBegin_usingCDict_advanced() :
- * cdict must be != NULL */
-size_t ZSTD_compressBegin_usingCDict_advanced(
+/* ZSTD_compressBegin_usingCDict_internal() :
+ * Implementation of various ZSTD_compressBegin_usingCDict* functions.
+ */
+static size_t ZSTD_compressBegin_usingCDict_internal(
ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
{
ZSTD_CCtx_params cctxParams;
- DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
+ DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal");
RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
/* Initialize the cctxParams from the cdict */
{
@@ -3879,23 +5739,51 @@
ZSTDb_not_buffered);
}
+
+/* ZSTD_compressBegin_usingCDict_advanced() :
+ * This function is DEPRECATED.
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict_advanced(
+ ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+ ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
+ return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize);
+}
+
/* ZSTD_compressBegin_usingCDict() :
- * pledgedSrcSize=0 means "unknown"
- * if pledgedSrcSize>0, it will enable contentSizeFlag */
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
- ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
- DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
- return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
+ return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict);
+}
+
+/*! ZSTD_compress_usingCDict_internal():
+ * Implementation of various ZSTD_compress_usingCDict* functions.
+ */
+static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
+ FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
+ return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);
}
+/*! ZSTD_compress_usingCDict_advanced():
+ * This function is DEPRECATED.
+ */
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
{
- FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+ return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
}
/*! ZSTD_compress_usingCDict() :
@@ -3909,7 +5797,7 @@
const ZSTD_CDict* cdict)
{
ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
- return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+ return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
}
@@ -3950,7 +5838,7 @@
return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
}
-static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
+static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
{
if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
return ZSTD_cpm_attachDict;
@@ -4081,30 +5969,41 @@
static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
{
- size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
- if (hintInSize==0) hintInSize = cctx->blockSize;
- return hintInSize;
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ return cctx->blockSizeMax - cctx->stableIn_notConsumed;
+ }
+ assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered);
+ { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
+ if (hintInSize==0) hintInSize = cctx->blockSizeMax;
+ return hintInSize;
+ }
}
/* ZSTD_compressStream_generic():
* internal function for all *compressStream*() variants
- * non-static, because can be called from zstdmt_compress.c
- * @return : hint size for next input */
+ * @return : hint size for next input to complete ongoing block */
static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective const flushMode)
{
- const char* const istart = (const char*)input->src;
- const char* const iend = input->size != 0 ? istart + input->size : istart;
- const char* ip = input->pos != 0 ? istart + input->pos : istart;
- char* const ostart = (char*)output->dst;
- char* const oend = output->size != 0 ? ostart + output->size : ostart;
- char* op = output->pos != 0 ? ostart + output->pos : ostart;
+ const char* const istart = (assert(input != NULL), (const char*)input->src);
+ const char* const iend = (istart != NULL) ? istart + input->size : istart;
+ const char* ip = (istart != NULL) ? istart + input->pos : istart;
+ char* const ostart = (assert(output != NULL), (char*)output->dst);
+ char* const oend = (ostart != NULL) ? ostart + output->size : ostart;
+ char* op = (ostart != NULL) ? ostart + output->pos : ostart;
U32 someMoreWork = 1;
/* check expectations */
- DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
+ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos);
+ assert(zcs != NULL);
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ assert(input->pos >= zcs->stableIn_notConsumed);
+ input->pos -= zcs->stableIn_notConsumed;
+ if (ip) ip -= zcs->stableIn_notConsumed;
+ zcs->stableIn_notConsumed = 0;
+ }
if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
assert(zcs->inBuff != NULL);
assert(zcs->inBuffSize > 0);
@@ -4113,8 +6012,10 @@
assert(zcs->outBuff != NULL);
assert(zcs->outBuffSize > 0);
}
- assert(output->pos <= output->size);
+ if (input->src == NULL) assert(input->size == 0);
assert(input->pos <= input->size);
+ if (output->dst == NULL) assert(output->size == 0);
+ assert(output->pos <= output->size);
assert((U32)flushMode <= (U32)ZSTD_e_end);
while (someMoreWork) {
@@ -4125,12 +6026,13 @@
case zcss_load:
if ( (flushMode == ZSTD_e_end)
- && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
+ && ( (size_t)(oend-op) >= ZSTD_compressBound((size_t)(iend-ip)) /* Enough output space */
|| zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
&& (zcs->inBuffPos == 0) ) {
/* shortcut to compression pass directly into output buffer */
- size_t const cSize = ZSTD_compressEnd(zcs,
- op, oend-op, ip, iend-ip);
+ size_t const cSize = ZSTD_compressEnd_public(zcs,
+ op, (size_t)(oend-op),
+ ip, (size_t)(iend-ip));
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
ip = iend;
@@ -4144,10 +6046,9 @@
size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
size_t const loaded = ZSTD_limitCopy(
zcs->inBuff + zcs->inBuffPos, toLoad,
- ip, iend-ip);
+ ip, (size_t)(iend-ip));
zcs->inBuffPos += loaded;
- if (loaded != 0)
- ip += loaded;
+ if (ip) ip += loaded;
if ( (flushMode == ZSTD_e_continue)
&& (zcs->inBuffPos < zcs->inBuffTarget) ) {
/* not enough input to fill full block : stop here */
@@ -4157,6 +6058,20 @@
&& (zcs->inBuffPos == zcs->inToCompress) ) {
/* empty */
someMoreWork = 0; break;
+ }
+ } else {
+ assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable);
+ if ( (flushMode == ZSTD_e_continue)
+ && ( (size_t)(iend - ip) < zcs->blockSizeMax) ) {
+ /* can't compress a full block : stop here */
+ zcs->stableIn_notConsumed = (size_t)(iend - ip);
+ ip = iend; /* pretend to have consumed input */
+ someMoreWork = 0; break;
+ }
+ if ( (flushMode == ZSTD_e_flush)
+ && (ip == iend) ) {
+ /* empty */
+ someMoreWork = 0; break;
}
}
/* compress current block (note : this stage cannot be stopped in the middle) */
@@ -4164,10 +6079,9 @@
{ int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
void* cDst;
size_t cSize;
- size_t oSize = oend-op;
- size_t const iSize = inputBuffered
- ? zcs->inBuffPos - zcs->inToCompress
- : MIN((size_t)(iend - ip), zcs->blockSize);
+ size_t oSize = (size_t)(oend-op);
+ size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress
+ : MIN((size_t)(iend - ip), zcs->blockSizeMax);
if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
cDst = op; /* compress into output buffer, to skip flush stage */
else
@@ -4175,34 +6089,31 @@
if (inputBuffered) {
unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
cSize = lastBlock ?
- ZSTD_compressEnd(zcs, cDst, oSize,
+ ZSTD_compressEnd_public(zcs, cDst, oSize,
zcs->inBuff + zcs->inToCompress, iSize) :
- ZSTD_compressContinue(zcs, cDst, oSize,
+ ZSTD_compressContinue_public(zcs, cDst, oSize,
zcs->inBuff + zcs->inToCompress, iSize);
FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
zcs->frameEnded = lastBlock;
/* prepare next block */
- zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax;
if (zcs->inBuffTarget > zcs->inBuffSize)
- zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSizeMax;
DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
(unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
if (!lastBlock)
assert(zcs->inBuffTarget <= zcs->inBuffSize);
zcs->inToCompress = zcs->inBuffPos;
- } else {
- unsigned const lastBlock = (ip + iSize == iend);
- assert(flushMode == ZSTD_e_end /* Already validated */);
+ } else { /* !inputBuffered, hence ZSTD_bm_stable */
+ unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend);
cSize = lastBlock ?
- ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
- ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
+ ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) :
+ ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize);
/* Consume the input prior to error checking to mirror buffered mode. */
- if (iSize > 0)
- ip += iSize;
+ if (ip) ip += iSize;
FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
zcs->frameEnded = lastBlock;
- if (lastBlock)
- assert(ip == iend);
+ if (lastBlock) assert(ip == iend);
}
if (cDst == op) { /* no need to flush */
op += cSize;
@@ -4251,8 +6162,8 @@
}
}
- input->pos = ip - istart;
- output->pos = op - ostart;
+ input->pos = (size_t)(ip - istart);
+ output->pos = (size_t)(op - ostart);
if (zcs->frameEnded) return 0;
return ZSTD_nextInputSizeHint(zcs);
}
@@ -4272,8 +6183,10 @@
/* After a compression call set the expected input/output buffer.
* This is validated at the start of the next compression call.
*/
-static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
+static void
+ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, const ZSTD_outBuffer* output, const ZSTD_inBuffer* input)
{
+ DEBUGLOG(5, "ZSTD_setBufferExpectations (for advanced stable in/out modes)");
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
cctx->expectedInBuffer = *input;
}
@@ -4292,46 +6205,57 @@
{
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
ZSTD_inBuffer const expect = cctx->expectedInBuffer;
- if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
- RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
- if (endOp != ZSTD_e_end)
- RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
+ if (expect.src != input->src || expect.pos != input->pos)
+ RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!");
}
+ (void)endOp;
if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
size_t const outBufferSize = output->size - output->pos;
if (cctx->expectedOutBufferSize != outBufferSize)
- RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
+ RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableOutBuffer enabled but output size differs!");
}
return 0;
}
+/*
+ * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize.
+ * Otherwise, it's ignored.
+ * @return: 0 on success, or a ZSTD_error code otherwise.
+ */
static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
ZSTD_EndDirective endOp,
- size_t inSize) {
+ size_t inSize)
+{
ZSTD_CCtx_params params = cctx->requestedParams;
ZSTD_prefixDict const prefixDict = cctx->prefixDict;
FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
- if (cctx->cdict)
- params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */
- DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
- if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
- {
- size_t const dictSize = prefixDict.dict
+ if (cctx->cdict && !cctx->localDict.cdict) {
+ /* Let the cdict's compression level take priority over the requested params.
+ * But do not take the cdict's compression level if the "cdict" is actually a localDict
+ * generated from ZSTD_initLocalDict().
+ */
+ params.compressionLevel = cctx->cdict->compressionLevel;
+ }
+ DEBUGLOG(4, "ZSTD_CCtx_init_compressStream2 : transparent init stage");
+ if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */
+
+ { size_t const dictSize = prefixDict.dict
? prefixDict.dictSize
: (cctx->cdict ? cctx->cdict->dictContentSize : 0);
- ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1);
+ ZSTD_CParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1);
params.cParams = ZSTD_getCParamsFromCCtxParams(
¶ms, cctx->pledgedSrcSizePlusOne-1,
dictSize, mode);
}
- if (ZSTD_CParams_shouldEnableLdm(¶ms.cParams)) {
- /* Enable LDM by default for optimal parser and window size >= 128MB */
- DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
- params.ldmParams.enableLdm = 1;
- }
+ params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(params.postBlockSplitter, ¶ms.cParams);
+ params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams);
+ params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams);
+ params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences);
+ params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize);
+ params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel);
{ U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
@@ -4347,7 +6271,7 @@
/* for small input: avoid automatic flush on reaching end of block, since
* it would require to add a 3-bytes null block to end frame
*/
- cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
+ cctx->inBuffTarget = cctx->blockSizeMax + (cctx->blockSizeMax == pledgedSrcSize);
} else {
cctx->inBuffTarget = 0;
}
@@ -4358,6 +6282,8 @@
return 0;
}
+/* @return provides a minimum amount of data remaining to be flushed from internal buffers
+ */
size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
@@ -4372,8 +6298,27 @@
/* transparent initialization stage */
if (cctx->streamStage == zcss_init) {
- FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
- ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
+ size_t const inputSize = input->size - input->pos; /* no obligation to start from pos==0 */
+ size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed;
+ if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */
+ && (endOp == ZSTD_e_continue) /* no flush requested, more input to come */
+ && (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) { /* not even reached one block yet */
+ if (cctx->stableIn_notConsumed) { /* not the first time */
+ /* check stable source guarantees */
+ RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer");
+ RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos");
+ }
+ /* pretend input was consumed, to give a sense forward progress */
+ input->pos = input->size;
+ /* save stable inBuffer, for later control, and flush/end */
+ cctx->expectedInBuffer = *input;
+ /* but actually input wasn't consumed, so keep track of position from where compression shall resume */
+ cctx->stableIn_notConsumed += inputSize;
+ /* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */
+ return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format); /* at least some header to produce */
+ }
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed");
+ ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
}
/* end of transparent initialization stage */
@@ -4391,13 +6336,20 @@
const void* src, size_t srcSize, size_t* srcPos,
ZSTD_EndDirective endOp)
{
- ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
- ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ ZSTD_outBuffer output;
+ ZSTD_inBuffer input;
+ output.dst = dst;
+ output.size = dstCapacity;
+ output.pos = *dstPos;
+ input.src = src;
+ input.size = srcSize;
+ input.pos = *srcPos;
/* ZSTD_compressStream2() will check validity of dstPos and srcPos */
- size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
- *dstPos = output.pos;
- *srcPos = input.pos;
- return cErr;
+ { size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+ }
}
size_t ZSTD_compress2(ZSTD_CCtx* cctx,
@@ -4420,6 +6372,7 @@
/* Reset to the original values. */
cctx->requestedParams.inBufferMode = originalInBufferMode;
cctx->requestedParams.outBufferMode = originalOutBufferMode;
+
FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
if (result != 0) { /* compression not completed, due to lack of output space */
assert(oPos == dstCapacity);
@@ -4430,64 +6383,66 @@
}
}
-typedef struct {
- U32 idx; /* Index in array of ZSTD_Sequence */
- U32 posInSequence; /* Position within sequence at idx */
- size_t posInSrc; /* Number of bytes given by sequences provided so far */
-} ZSTD_sequencePosition;
-
-/* Returns a ZSTD error code if sequence is not valid */
-static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
- size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
- size_t offsetBound;
- U32 windowSize = 1 << windowLog;
- /* posInSrc represents the amount of data the the decoder would decode up to this point.
+/* ZSTD_validateSequence() :
+ * @offBase : must use the format required by ZSTD_storeSeq()
+ * @returns a ZSTD error code if sequence is not valid
+ */
+static size_t
+ZSTD_validateSequence(U32 offBase, U32 matchLength, U32 minMatch,
+ size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer)
+{
+ U32 const windowSize = 1u << windowLog;
+ /* posInSrc represents the amount of data the decoder would decode up to this point.
* As long as the amount of data decoded is less than or equal to window size, offsets may be
* larger than the total length of output decoded in order to reference the dict, even larger than
* window size. After output surpasses windowSize, we're limited to windowSize offsets again.
*/
- offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
- RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
- RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
+ size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
+ size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4;
+ RETURN_ERROR_IF(offBase > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!");
+ /* Validate maxNbSeq is large enough for the given matchLength and minMatch */
+ RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch");
return 0;
}
/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
-static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
- U32 offCode = rawOffset + ZSTD_REP_MOVE;
- U32 repCode = 0;
+static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
+{
+ U32 offBase = OFFSET_TO_OFFBASE(rawOffset);
if (!ll0 && rawOffset == rep[0]) {
- repCode = 1;
+ offBase = REPCODE1_TO_OFFBASE;
} else if (rawOffset == rep[1]) {
- repCode = 2 - ll0;
+ offBase = REPCODE_TO_OFFBASE(2 - ll0);
} else if (rawOffset == rep[2]) {
- repCode = 3 - ll0;
+ offBase = REPCODE_TO_OFFBASE(3 - ll0);
} else if (ll0 && rawOffset == rep[0] - 1) {
- repCode = 3;
+ offBase = REPCODE3_TO_OFFBASE;
}
- if (repCode) {
- /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
- offCode = repCode - 1;
- }
- return offCode;
+ return offBase;
}
-/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
- * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
+/* This function scans through an array of ZSTD_Sequence,
+ * storing the sequences it reads, until it reaches a block delimiter.
+ * Note that the block delimiter includes the last literals of the block.
+ * @blockSize must be == sum(sequence_lengths).
+ * @returns @blockSize on success, and a ZSTD_error otherwise.
*/
-static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize) {
+static size_t
+ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch)
+{
U32 idx = seqPos->idx;
+ U32 const startIdx = idx;
BYTE const* ip = (BYTE const*)(src);
const BYTE* const iend = ip + blockSize;
- repcodes_t updatedRepcodes;
+ Repcodes_t updatedRepcodes;
U32 dictSize;
- U32 litLength;
- U32 matchLength;
- U32 ll0;
- U32 offCode;
+
+ DEBUGLOG(5, "ZSTD_transferSequences_wBlockDelim (blockSize = %zu)", blockSize);
if (cctx->cdict) {
dictSize = (U32)cctx->cdict->dictContentSize;
@@ -4496,28 +6451,60 @@
} else {
dictSize = 0;
}
- ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
- for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
- litLength = inSeqs[idx].litLength;
- matchLength = inSeqs[idx].matchLength;
- ll0 = litLength == 0;
- offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
- updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+ for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) {
+ U32 const litLength = inSeqs[idx].litLength;
+ U32 const matchLength = inSeqs[idx].matchLength;
+ U32 offBase;
- DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ if (externalRepSearch == ZSTD_ps_disable) {
+ offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset);
+ } else {
+ U32 const ll0 = (litLength == 0);
+ offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
+ ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
+ }
+
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
if (cctx->appliedParams.validateSequences) {
seqPos->posInSrc += litLength + matchLength;
- FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch,
+ seqPos->posInSrc,
cctx->appliedParams.cParams.windowLog, dictSize,
- cctx->appliedParams.cParams.minMatch),
+ ZSTD_hasExtSeqProd(&cctx->appliedParams)),
"Sequence validation failed");
}
- RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
"Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
- ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength);
ip += matchLength + litLength;
}
- ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+ RETURN_ERROR_IF(idx == inSeqsSize, externalSequences_invalid, "Block delimiter not found.");
+
+ /* If we skipped repcode search while parsing, we need to update repcodes now */
+ assert(externalRepSearch != ZSTD_ps_auto);
+ assert(idx >= startIdx);
+ if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) {
+ U32* const rep = updatedRepcodes.rep;
+ U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */
+
+ if (lastSeqIdx >= startIdx + 2) {
+ rep[2] = inSeqs[lastSeqIdx - 2].offset;
+ rep[1] = inSeqs[lastSeqIdx - 1].offset;
+ rep[0] = inSeqs[lastSeqIdx].offset;
+ } else if (lastSeqIdx == startIdx + 1) {
+ rep[2] = rep[0];
+ rep[1] = inSeqs[lastSeqIdx - 1].offset;
+ rep[0] = inSeqs[lastSeqIdx].offset;
+ } else {
+ assert(lastSeqIdx == startIdx);
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = inSeqs[lastSeqIdx].offset;
+ }
+ }
+
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));
if (inSeqs[idx].litLength) {
DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
@@ -4525,38 +6512,42 @@
ip += inSeqs[idx].litLength;
seqPos->posInSrc += inSeqs[idx].litLength;
}
- RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
+ RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!");
seqPos->idx = idx+1;
- return 0;
+ return blockSize;
}
-/* Returns the number of bytes to move the current read position back by. Only non-zero
- * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
- * went wrong.
+/*
+ * This function attempts to scan through @blockSize bytes in @src
+ * represented by the sequences in @inSeqs,
+ * storing any (partial) sequences.
*
- * This function will attempt to scan through blockSize bytes represented by the sequences
- * in inSeqs, storing any (partial) sequences.
+ * Occasionally, we may want to reduce the actual number of bytes consumed from @src
+ * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH.
*
- * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
- * avoid splitting a match, or to avoid splitting a match such that it would produce a match
- * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
+ * @returns the number of bytes consumed from @src, necessarily <= @blockSize.
+ * Otherwise, it may return a ZSTD error if something went wrong.
*/
-static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize) {
+static size_t
+ZSTD_transferSequences_noDelim(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch)
+{
U32 idx = seqPos->idx;
U32 startPosInSequence = seqPos->posInSequence;
U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
size_t dictSize;
- BYTE const* ip = (BYTE const*)(src);
- BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
- repcodes_t updatedRepcodes;
+ const BYTE* const istart = (const BYTE*)(src);
+ const BYTE* ip = istart;
+ const BYTE* iend = istart + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
+ Repcodes_t updatedRepcodes;
U32 bytesAdjustment = 0;
U32 finalMatchSplit = 0;
- U32 litLength;
- U32 matchLength;
- U32 rawOffset;
- U32 offCode;
+
+ /* TODO(embg) support fast parsing mode in noBlockDelim mode */
+ (void)externalRepSearch;
if (cctx->cdict) {
dictSize = cctx->cdict->dictContentSize;
@@ -4565,14 +6556,15 @@
} else {
dictSize = 0;
}
- DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
+ DEBUGLOG(5, "ZSTD_transferSequences_noDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
- ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));
while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
const ZSTD_Sequence currSeq = inSeqs[idx];
- litLength = currSeq.litLength;
- matchLength = currSeq.matchLength;
- rawOffset = currSeq.offset;
+ U32 litLength = currSeq.litLength;
+ U32 matchLength = currSeq.matchLength;
+ U32 const rawOffset = currSeq.offset;
+ U32 offBase;
/* Modify the sequence depending on where endPosInSequence lies */
if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
@@ -4586,7 +6578,6 @@
/* Move to the next sequence */
endPosInSequence -= currSeq.litLength + currSeq.matchLength;
startPosInSequence = 0;
- idx++;
} else {
/* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
does not reach the end of the match. So, we have to split the sequence */
@@ -4625,76 +6616,131 @@
}
}
/* Check if this offset can be represented with a repcode */
- { U32 ll0 = (litLength == 0);
- offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
- updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+ { U32 const ll0 = (litLength == 0);
+ offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0);
+ ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
}
if (cctx->appliedParams.validateSequences) {
seqPos->posInSrc += litLength + matchLength;
- FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
- cctx->appliedParams.cParams.windowLog, dictSize,
- cctx->appliedParams.cParams.minMatch),
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)),
"Sequence validation failed");
}
- DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
- RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
+ RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
"Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
- ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength);
ip += matchLength + litLength;
+ if (!finalMatchSplit)
+ idx++; /* Next Sequence */
}
DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
seqPos->idx = idx;
seqPos->posInSequence = endPosInSequence;
- ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));
iend -= bytesAdjustment;
if (ip != iend) {
/* Store any last literals */
- U32 lastLLSize = (U32)(iend - ip);
+ U32 const lastLLSize = (U32)(iend - ip);
assert(ip <= iend);
DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
seqPos->posInSrc += lastLLSize;
}
- return bytesAdjustment;
+ return (size_t)(iend-istart);
}
-typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize);
-static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
- ZSTD_sequenceCopier sequenceCopier = NULL;
- assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
+/* @seqPos represents a position within @inSeqs,
+ * it is read and updated by this function,
+ * once the goal to produce a block of size @blockSize is reached.
+ * @return: nb of bytes consumed from @src, necessarily <= @blockSize.
+ */
+typedef size_t (*ZSTD_SequenceCopier_f)(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch);
+
+static ZSTD_SequenceCopier_f ZSTD_selectSequenceCopier(ZSTD_SequenceFormat_e mode)
+{
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, (int)mode));
if (mode == ZSTD_sf_explicitBlockDelimiters) {
- return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
- } else if (mode == ZSTD_sf_noBlockDelimiters) {
- return ZSTD_copySequencesToSeqStoreNoBlockDelim;
+ return ZSTD_transferSequences_wBlockDelim;
+ }
+ assert(mode == ZSTD_sf_noBlockDelimiters);
+ return ZSTD_transferSequences_noDelim;
+}
+
+/* Discover the size of next block by searching for the delimiter.
+ * Note that a block delimiter **must** exist in this mode,
+ * otherwise it's an input error.
+ * The block size retrieved will be later compared to ensure it remains within bounds */
+static size_t
+blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_SequencePosition seqPos)
+{
+ int end = 0;
+ size_t blockSize = 0;
+ size_t spos = seqPos.idx;
+ DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize);
+ assert(spos <= inSeqsSize);
+ while (spos < inSeqsSize) {
+ end = (inSeqs[spos].offset == 0);
+ blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength;
+ if (end) {
+ if (inSeqs[spos].matchLength != 0)
+ RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0");
+ break;
+ }
+ spos++;
+ }
+ if (!end)
+ RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter");
+ return blockSize;
+}
+
+static size_t determine_blockSize(ZSTD_SequenceFormat_e mode,
+ size_t blockSize, size_t remaining,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ ZSTD_SequencePosition seqPos)
+{
+ DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining);
+ if (mode == ZSTD_sf_noBlockDelimiters) {
+ /* Note: more a "target" block size */
+ return MIN(remaining, blockSize);
}
- assert(sequenceCopier != NULL);
- return sequenceCopier;
+ assert(mode == ZSTD_sf_explicitBlockDelimiters);
+ { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos);
+ FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters");
+ if (explicitBlockSize > blockSize)
+ RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block");
+ if (explicitBlockSize > remaining)
+ RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source");
+ return explicitBlockSize;
+ }
}
-/* Compress, block-by-block, all of the sequences given.
+/* Compress all provided sequences, block-by-block.
*
- * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
+ * Returns the cumulative size of all compressed blocks (including their headers),
+ * otherwise a ZSTD error.
*/
-static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
- const void* src, size_t srcSize) {
+static size_t
+ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize)
+{
size_t cSize = 0;
- U32 lastBlock;
- size_t blockSize;
- size_t compressedSeqsSize;
size_t remaining = srcSize;
- ZSTD_sequencePosition seqPos = {0, 0, 0};
+ ZSTD_SequencePosition seqPos = {0, 0, 0};
- BYTE const* ip = (BYTE const*)src;
+ const BYTE* ip = (BYTE const*)src;
BYTE* op = (BYTE*)dst;
- ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
+ ZSTD_SequenceCopier_f const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
/* Special case: empty frame */
@@ -4708,22 +6754,29 @@
}
while (remaining) {
+ size_t compressedSeqsSize;
size_t cBlockSize;
- size_t additionalByteAdjustment;
- lastBlock = remaining <= cctx->blockSize;
- blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
+ size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters,
+ cctx->blockSizeMax, remaining,
+ inSeqs, inSeqsSize, seqPos);
+ U32 const lastBlock = (blockSize == remaining);
+ FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size");
+ assert(blockSize <= remaining);
ZSTD_resetSeqStore(&cctx->seqStore);
- DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
- additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
- FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
- blockSize -= additionalByteAdjustment;
+ blockSize = sequenceCopier(cctx,
+ &seqPos, inSeqs, inSeqsSize,
+ ip, blockSize,
+ cctx->appliedParams.searchForExternalRepcodes);
+ FORWARD_IF_ERROR(blockSize, "Bad sequence copy");
/* If blocks are too small, emit as a nocompress block */
- if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
+ * additional 1. We need to revisit and change this logic to be more consistent */
+ if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {
cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
- DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
+ DEBUGLOG(5, "Block too small (%zu): data remains uncompressed: cSize=%zu", blockSize, cBlockSize);
cSize += cBlockSize;
ip += blockSize;
op += cBlockSize;
@@ -4732,39 +6785,40 @@
continue;
}
- compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore,
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block");
+ compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
&cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
&cctx->appliedParams,
op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
blockSize,
- cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */,
cctx->bmi2);
FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
- DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
+ DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize);
if (!cctx->isFirstBlock &&
ZSTD_maybeRLE(&cctx->seqStore) &&
- ZSTD_isRLE((BYTE const*)src, srcSize)) {
- /* We don't want to emit our first block as a RLE even if it qualifies because
- * doing so will cause the decoder (cli only) to throw a "should consume all input error."
- * This is only an issue for zstd <= v1.4.3
- */
+ ZSTD_isRLE(ip, blockSize)) {
+ /* Note: don't emit the first block as RLE even if it qualifies because
+ * doing so will cause the decoder (cli <= v1.4.3 only) to throw an (invalid) error
+ * "should consume all input error."
+ */
compressedSeqsSize = 1;
}
if (compressedSeqsSize == 0) {
/* ZSTD_noCompressBlock writes the block header as well */
cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
- FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
- DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
+ FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed");
+ DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize);
} else if (compressedSeqsSize == 1) {
cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
- FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
- DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
+ FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed");
+ DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize);
} else {
U32 cBlockHeader;
/* Error checking and repcodes update */
- ZSTD_confirmRepcodesAndEntropyTables(cctx);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
@@ -4772,11 +6826,10 @@
cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
MEM_writeLE24(op, cBlockHeader);
cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
- DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
+ DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize);
}
cSize += cBlockSize;
- DEBUGLOG(4, "cSize running total: %zu", cSize);
if (lastBlock) {
break;
@@ -4787,40 +6840,50 @@
dstCapacity -= cBlockSize;
cctx->isFirstBlock = 0;
}
+ DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity);
}
+ DEBUGLOG(4, "cSize final total: %zu", cSize);
return cSize;
}
-size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
+size_t ZSTD_compressSequences(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
- const void* src, size_t srcSize) {
+ const void* src, size_t srcSize)
+{
BYTE* op = (BYTE*)dst;
size_t cSize = 0;
- size_t compressedBlocksSize = 0;
- size_t frameHeaderSize = 0;
/* Transparent initialization stage, same as compressStream2() */
- DEBUGLOG(3, "ZSTD_compressSequences()");
+ DEBUGLOG(4, "ZSTD_compressSequences (nbSeqs=%zu,dstCapacity=%zu)", inSeqsSize, dstCapacity);
assert(cctx != NULL);
FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
+
/* Begin writing output, starting with frame header */
- frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
- op += frameHeaderSize;
- dstCapacity -= frameHeaderSize;
- cSize += frameHeaderSize;
+ { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity,
+ &cctx->appliedParams, srcSize, cctx->dictID);
+ op += frameHeaderSize;
+ assert(frameHeaderSize <= dstCapacity);
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ }
if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
xxh64_update(&cctx->xxhState, src, srcSize);
}
- /* cSize includes block header size and compressed sequences size */
- compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
+
+ /* Now generate compressed blocks */
+ { size_t const cBlocksSize = ZSTD_compressSequences_internal(cctx,
op, dstCapacity,
inSeqs, inSeqsSize,
src, srcSize);
- FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
- cSize += compressedBlocksSize;
- dstCapacity -= compressedBlocksSize;
+ FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!");
+ cSize += cBlocksSize;
+ assert(cBlocksSize <= dstCapacity);
+ dstCapacity -= cBlocksSize;
+ }
+ /* Complete with frame checksum, if needed */
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
@@ -4829,26 +6892,557 @@
cSize += 4;
}
- DEBUGLOG(3, "Final compressed size: %zu", cSize);
+ DEBUGLOG(4, "Final compressed size: %zu", cSize);
+ return cSize;
+}
+
+
+#if defined(__AVX2__)
+
+#include /* AVX2 intrinsics */
+
+/*
+ * Convert 2 sequences per iteration, using AVX2 intrinsics:
+ * - offset -> offBase = offset + 2
+ * - litLength -> (U16) litLength
+ * - matchLength -> (U16)(matchLength - 3)
+ * - rep is ignored
+ * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]).
+ *
+ * At the end, instead of extracting two __m128i,
+ * we use _mm256_permute4x64_epi64(..., 0xE8) to move lane2 into lane1,
+ * then store the lower 16 bytes in one go.
+ *
+ * @returns 0 on succes, with no long length detected
+ * @returns > 0 if there is one long length (> 65535),
+ * indicating the position, and type.
+ */
+static size_t convertSequences_noRepcodes(
+ SeqDef* dstSeqs,
+ const ZSTD_Sequence* inSeqs,
+ size_t nbSequences)
+{
+ /*
+ * addition:
+ * For each 128-bit half: (offset+2, litLength+0, matchLength-3, rep+0)
+ */
+ const __m256i addition = _mm256_setr_epi32(
+ ZSTD_REP_NUM, 0, -MINMATCH, 0, /* for sequence i */
+ ZSTD_REP_NUM, 0, -MINMATCH, 0 /* for sequence i+1 */
+ );
+
+ /* limit: check if there is a long length */
+ const __m256i limit = _mm256_set1_epi32(65535);
+
+ /*
+ * shuffle mask for byte-level rearrangement in each 128-bit half:
+ *
+ * Input layout (after addition) per 128-bit half:
+ * [ offset+2 (4 bytes) | litLength (4 bytes) | matchLength (4 bytes) | rep (4 bytes) ]
+ * We only need:
+ * offBase (4 bytes) = offset+2
+ * litLength (2 bytes) = low 2 bytes of litLength
+ * mlBase (2 bytes) = low 2 bytes of (matchLength)
+ * => Bytes [0..3, 4..5, 8..9], zero the rest.
+ */
+ const __m256i mask = _mm256_setr_epi8(
+ /* For the lower 128 bits => sequence i */
+ 0, 1, 2, 3, /* offset+2 */
+ 4, 5, /* litLength (16 bits) */
+ 8, 9, /* matchLength (16 bits) */
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80,
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80,
+
+ /* For the upper 128 bits => sequence i+1 */
+ 16,17,18,19, /* offset+2 */
+ 20,21, /* litLength */
+ 24,25, /* matchLength */
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80,
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80
+ );
+
+ /*
+ * Next, we'll use _mm256_permute4x64_epi64(vshf, 0xE8).
+ * Explanation of 0xE8 = 11101000b => [lane0, lane2, lane2, lane3].
+ * So the lower 128 bits become [lane0, lane2] => combining seq0 and seq1.
+ */
+#define PERM_LANE_0X_E8 0xE8 /* [0,2,2,3] in lane indices */
+
+ size_t longLen = 0, i = 0;
+
+ /* AVX permutation depends on the specific definition of target structures */
+ ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16);
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0);
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4);
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8);
+ ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8);
+ ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0);
+ ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4);
+ ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6);
+
+ /* Process 2 sequences per loop iteration */
+ for (; i + 1 < nbSequences; i += 2) {
+ /* Load 2 ZSTD_Sequence (32 bytes) */
+ __m256i vin = _mm256_loadu_si256((const __m256i*)(const void*)&inSeqs[i]);
+
+ /* Add {2, 0, -3, 0} in each 128-bit half */
+ __m256i vadd = _mm256_add_epi32(vin, addition);
+
+ /* Check for long length */
+ __m256i ll_cmp = _mm256_cmpgt_epi32(vadd, limit); /* 0xFFFFFFFF for element > 65535 */
+ int ll_res = _mm256_movemask_epi8(ll_cmp);
+
+ /* Shuffle bytes so each half gives us the 8 bytes we need */
+ __m256i vshf = _mm256_shuffle_epi8(vadd, mask);
+ /*
+ * Now:
+ * Lane0 = seq0's 8 bytes
+ * Lane1 = 0
+ * Lane2 = seq1's 8 bytes
+ * Lane3 = 0
+ */
+
+ /* Permute 64-bit lanes => move Lane2 down into Lane1. */
+ __m256i vperm = _mm256_permute4x64_epi64(vshf, PERM_LANE_0X_E8);
+ /*
+ * Now the lower 16 bytes (Lane0+Lane1) = [seq0, seq1].
+ * The upper 16 bytes are [Lane2, Lane3] = [seq1, 0], but we won't use them.
+ */
+
+ /* Store only the lower 16 bytes => 2 SeqDef (8 bytes each) */
+ _mm_storeu_si128((__m128i *)(void*)&dstSeqs[i], _mm256_castsi256_si128(vperm));
+ /*
+ * This writes out 16 bytes total:
+ * - offset 0..7 => seq0 (offBase, litLength, mlBase)
+ * - offset 8..15 => seq1 (offBase, litLength, mlBase)
+ */
+
+ /* check (unlikely) long lengths > 65535
+ * indices for lengths correspond to bits [4..7], [8..11], [20..23], [24..27]
+ * => combined mask = 0x0FF00FF0
+ */
+ if (UNLIKELY((ll_res & 0x0FF00FF0) != 0)) {
+ /* long length detected: let's figure out which one*/
+ if (inSeqs[i].matchLength > 65535+MINMATCH) {
+ assert(longLen == 0);
+ longLen = i + 1;
+ }
+ if (inSeqs[i].litLength > 65535) {
+ assert(longLen == 0);
+ longLen = i + nbSequences + 1;
+ }
+ if (inSeqs[i+1].matchLength > 65535+MINMATCH) {
+ assert(longLen == 0);
+ longLen = i + 1 + 1;
+ }
+ if (inSeqs[i+1].litLength > 65535) {
+ assert(longLen == 0);
+ longLen = i + 1 + nbSequences + 1;
+ }
+ }
+ }
+
+ /* Handle leftover if @nbSequences is odd */
+ if (i < nbSequences) {
+ /* process last sequence */
+ assert(i == nbSequences - 1);
+ dstSeqs[i].offBase = OFFSET_TO_OFFBASE(inSeqs[i].offset);
+ dstSeqs[i].litLength = (U16)inSeqs[i].litLength;
+ dstSeqs[i].mlBase = (U16)(inSeqs[i].matchLength - MINMATCH);
+ /* check (unlikely) long lengths > 65535 */
+ if (UNLIKELY(inSeqs[i].matchLength > 65535+MINMATCH)) {
+ assert(longLen == 0);
+ longLen = i + 1;
+ }
+ if (UNLIKELY(inSeqs[i].litLength > 65535)) {
+ assert(longLen == 0);
+ longLen = i + nbSequences + 1;
+ }
+ }
+
+ return longLen;
+}
+
+/* the vector implementation could also be ported to SSSE3,
+ * but since this implementation is targeting modern systems (>= Sapphire Rapid),
+ * it's not useful to develop and maintain code for older pre-AVX2 platforms */
+
+#else /* no AVX2 */
+
+static size_t convertSequences_noRepcodes(
+ SeqDef* dstSeqs,
+ const ZSTD_Sequence* inSeqs,
+ size_t nbSequences)
+{
+ size_t longLen = 0;
+ size_t n;
+ for (n=0; n 65535 */
+ if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) {
+ assert(longLen == 0);
+ longLen = n + 1;
+ }
+ if (UNLIKELY(inSeqs[n].litLength > 65535)) {
+ assert(longLen == 0);
+ longLen = n + nbSequences + 1;
+ }
+ }
+ return longLen;
+}
+
+#endif
+
+/*
+ * Precondition: Sequences must end on an explicit Block Delimiter
+ * @return: 0 on success, or an error code.
+ * Note: Sequence validation functionality has been disabled (removed).
+ * This is helpful to generate a lean main pipeline, improving performance.
+ * It may be re-inserted later.
+ */
+size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx,
+ const ZSTD_Sequence* const inSeqs, size_t nbSequences,
+ int repcodeResolution)
+{
+ Repcodes_t updatedRepcodes;
+ size_t seqNb = 0;
+
+ DEBUGLOG(5, "ZSTD_convertBlockSequences (nbSequences = %zu)", nbSequences);
+
+ RETURN_ERROR_IF(nbSequences >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+
+ /* check end condition */
+ assert(nbSequences >= 1);
+ assert(inSeqs[nbSequences-1].matchLength == 0);
+ assert(inSeqs[nbSequences-1].offset == 0);
+
+ /* Convert Sequences from public format to internal format */
+ if (!repcodeResolution) {
+ size_t const longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences-1);
+ cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences-1;
+ if (longl) {
+ DEBUGLOG(5, "long length");
+ assert(cctx->seqStore.longLengthType == ZSTD_llt_none);
+ if (longl <= nbSequences-1) {
+ DEBUGLOG(5, "long match length detected at pos %zu", longl-1);
+ cctx->seqStore.longLengthType = ZSTD_llt_matchLength;
+ cctx->seqStore.longLengthPos = (U32)(longl-1);
+ } else {
+ DEBUGLOG(5, "long literals length detected at pos %zu", longl-nbSequences);
+ assert(longl <= 2* (nbSequences-1));
+ cctx->seqStore.longLengthType = ZSTD_llt_literalLength;
+ cctx->seqStore.longLengthPos = (U32)(longl-(nbSequences-1)-1);
+ }
+ }
+ } else {
+ for (seqNb = 0; seqNb < nbSequences - 1 ; seqNb++) {
+ U32 const litLength = inSeqs[seqNb].litLength;
+ U32 const matchLength = inSeqs[seqNb].matchLength;
+ U32 const ll0 = (litLength == 0);
+ U32 const offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0);
+
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
+ ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength);
+ ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
+ }
+ }
+
+ /* If we skipped repcode search while parsing, we need to update repcodes now */
+ if (!repcodeResolution && nbSequences > 1) {
+ U32* const rep = updatedRepcodes.rep;
+
+ if (nbSequences >= 4) {
+ U32 lastSeqIdx = (U32)nbSequences - 2; /* index of last full sequence */
+ rep[2] = inSeqs[lastSeqIdx - 2].offset;
+ rep[1] = inSeqs[lastSeqIdx - 1].offset;
+ rep[0] = inSeqs[lastSeqIdx].offset;
+ } else if (nbSequences == 3) {
+ rep[2] = rep[0];
+ rep[1] = inSeqs[0].offset;
+ rep[0] = inSeqs[1].offset;
+ } else {
+ assert(nbSequences == 2);
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = inSeqs[0].offset;
+ }
+ }
+
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));
+
+ return 0;
+}
+
+#if defined(ZSTD_ARCH_X86_AVX2)
+
+BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs)
+{
+ size_t i;
+ __m256i const zeroVec = _mm256_setzero_si256();
+ __m256i sumVec = zeroVec; /* accumulates match+lit in 32-bit lanes */
+ ZSTD_ALIGNED(32) U32 tmp[8]; /* temporary buffer for reduction */
+ size_t mSum = 0, lSum = 0;
+ ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16);
+
+ /* Process 2 structs (32 bytes) at a time */
+ for (i = 0; i + 2 <= nbSeqs; i += 2) {
+ /* Load two consecutive ZSTD_Sequence (8×4 = 32 bytes) */
+ __m256i data = _mm256_loadu_si256((const __m256i*)(const void*)&seqs[i]);
+ /* check end of block signal */
+ __m256i cmp = _mm256_cmpeq_epi32(data, zeroVec);
+ int cmp_res = _mm256_movemask_epi8(cmp);
+ /* indices for match lengths correspond to bits [8..11], [24..27]
+ * => combined mask = 0x0F000F00 */
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8);
+ if (cmp_res & 0x0F000F00) break;
+ /* Accumulate in sumVec */
+ sumVec = _mm256_add_epi32(sumVec, data);
+ }
+
+ /* Horizontal reduction */
+ _mm256_store_si256((__m256i*)tmp, sumVec);
+ lSum = tmp[1] + tmp[5];
+ mSum = tmp[2] + tmp[6];
+
+ /* Handle the leftover */
+ for (; i < nbSeqs; i++) {
+ lSum += seqs[i].litLength;
+ mSum += seqs[i].matchLength;
+ if (seqs[i].matchLength == 0) break; /* end of block */
+ }
+
+ if (i==nbSeqs) {
+ /* reaching end of sequences: end of block signal was not present */
+ BlockSummary bs;
+ bs.nbSequences = ERROR(externalSequences_invalid);
+ return bs;
+ }
+ { BlockSummary bs;
+ bs.nbSequences = i+1;
+ bs.blockSize = lSum + mSum;
+ bs.litSize = lSum;
+ return bs;
+ }
+}
+
+#else
+
+BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs)
+{
+ size_t totalMatchSize = 0;
+ size_t litSize = 0;
+ size_t n;
+ assert(seqs);
+ for (n=0; nappliedParams.searchForExternalRepcodes == ZSTD_ps_enable);
+ assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_ps_auto);
+
+ DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals_internal: nbSeqs=%zu, litSize=%zu", nbSequences, litSize);
+ RETURN_ERROR_IF(nbSequences == 0, externalSequences_invalid, "Requires at least 1 end-of-block");
+
+ /* Special case: empty frame */
+ if ((nbSequences == 1) && (inSeqs[0].litLength == 0)) {
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
+ RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "No room for empty frame block header");
+ MEM_writeLE24(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ cSize += ZSTD_blockHeaderSize;
+ }
+
+ while (nbSequences) {
+ size_t compressedSeqsSize, cBlockSize, conversionStatus;
+ BlockSummary const block = ZSTD_get1BlockSummary(inSeqs, nbSequences);
+ U32 const lastBlock = (block.nbSequences == nbSequences);
+ FORWARD_IF_ERROR(block.nbSequences, "Error while trying to determine nb of sequences for a block");
+ assert(block.nbSequences <= nbSequences);
+ RETURN_ERROR_IF(block.litSize > litSize, externalSequences_invalid, "discrepancy: Sequences require more literals than present in buffer");
+ ZSTD_resetSeqStore(&cctx->seqStore);
+
+ conversionStatus = ZSTD_convertBlockSequences(cctx,
+ inSeqs, block.nbSequences,
+ repcodeResolution);
+ FORWARD_IF_ERROR(conversionStatus, "Bad sequence conversion");
+ inSeqs += block.nbSequences;
+ nbSequences -= block.nbSequences;
+ remaining -= block.blockSize;
+
+ /* Note: when blockSize is very small, other variant send it uncompressed.
+ * Here, we still send the sequences, because we don't have the original source to send it uncompressed.
+ * One could imagine in theory reproducing the source from the sequences,
+ * but that's complex and costly memory intensive, and goes against the objectives of this variant. */
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block");
+
+ compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal(
+ op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
+ literals, block.litSize,
+ &cctx->seqStore,
+ &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
+ &cctx->appliedParams,
+ cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */,
+ cctx->bmi2);
+ FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
+ /* note: the spec forbids for any compressed block to be larger than maximum block size */
+ if (compressedSeqsSize > cctx->blockSizeMax) compressedSeqsSize = 0;
+ DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize);
+ litSize -= block.litSize;
+ literals = (const char*)literals + block.litSize;
+
+ /* Note: difficult to check source for RLE block when only Literals are provided,
+ * but it could be considered from analyzing the sequence directly */
+
+ if (compressedSeqsSize == 0) {
+ /* Sending uncompressed blocks is out of reach, because the source is not provided.
+ * In theory, one could use the sequences to regenerate the source, like a decompressor,
+ * but it's complex, and memory hungry, killing the purpose of this variant.
+ * Current outcome: generate an error code.
+ */
+ RETURN_ERROR(cannotProduce_uncompressedBlock, "ZSTD_compressSequencesAndLiterals cannot generate an uncompressed block");
+ } else {
+ U32 cBlockHeader;
+ assert(compressedSeqsSize > 1); /* no RLE */
+ /* Error checking and repcodes update */
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
+ if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ /* Write block header into beginning of block*/
+ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
+ DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize);
+ }
+
+ cSize += cBlockSize;
+ op += cBlockSize;
+ dstCapacity -= cBlockSize;
+ cctx->isFirstBlock = 0;
+ DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity);
+
+ if (lastBlock) {
+ assert(nbSequences == 0);
+ break;
+ }
+ }
+
+ RETURN_ERROR_IF(litSize != 0, externalSequences_invalid, "literals must be entirely and exactly consumed");
+ RETURN_ERROR_IF(remaining != 0, externalSequences_invalid, "Sequences must represent a total of exactly srcSize=%zu", srcSize);
+ DEBUGLOG(4, "cSize final total: %zu", cSize);
+ return cSize;
+}
+
+size_t
+ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* literals, size_t litSize, size_t litCapacity,
+ size_t decompressedSize)
+{
+ BYTE* op = (BYTE*)dst;
+ size_t cSize = 0;
+
+ /* Transparent initialization stage, same as compressStream2() */
+ DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals (dstCapacity=%zu)", dstCapacity);
+ assert(cctx != NULL);
+ if (litCapacity < litSize) {
+ RETURN_ERROR(workSpace_tooSmall, "literals buffer is not large enough: must be at least 8 bytes larger than litSize (risk of read out-of-bound)");
+ }
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, decompressedSize), "CCtx initialization failed");
+
+ if (cctx->appliedParams.blockDelimiters == ZSTD_sf_noBlockDelimiters) {
+ RETURN_ERROR(frameParameter_unsupported, "This mode is only compatible with explicit delimiters");
+ }
+ if (cctx->appliedParams.validateSequences) {
+ RETURN_ERROR(parameter_unsupported, "This mode is not compatible with Sequence validation");
+ }
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ RETURN_ERROR(frameParameter_unsupported, "this mode is not compatible with frame checksum");
+ }
+
+ /* Begin writing output, starting with frame header */
+ { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity,
+ &cctx->appliedParams, decompressedSize, cctx->dictID);
+ op += frameHeaderSize;
+ assert(frameHeaderSize <= dstCapacity);
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ }
+
+ /* Now generate compressed blocks */
+ { size_t const cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx,
+ op, dstCapacity,
+ inSeqs, inSeqsSize,
+ literals, litSize, decompressedSize);
+ FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!");
+ cSize += cBlocksSize;
+ assert(cBlocksSize <= dstCapacity);
+ dstCapacity -= cBlocksSize;
+ }
+
+ DEBUGLOG(4, "Final compressed size: %zu", cSize);
return cSize;
}
/*====== Finalize ======*/
+static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs)
+{
+ const ZSTD_inBuffer nullInput = { NULL, 0, 0 };
+ const int stableInput = (zcs->appliedParams.inBufferMode == ZSTD_bm_stable);
+ return stableInput ? zcs->expectedInBuffer : nullInput;
+}
+
/*! ZSTD_flushStream() :
* @return : amount of data remaining to flush */
size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
- ZSTD_inBuffer input = { NULL, 0, 0 };
+ ZSTD_inBuffer input = inBuffer_forEndFlush(zcs);
+ input.size = input.pos; /* do not ingest more input during flush */
return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
}
-
size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
- ZSTD_inBuffer input = { NULL, 0, 0 };
+ ZSTD_inBuffer input = inBuffer_forEndFlush(zcs);
size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
- FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
+ FORWARD_IF_ERROR(remainingToFlush , "ZSTD_compressStream2(,,ZSTD_e_end) failed");
if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
/* single thread mode : attempt to calculate remaining to flush more precisely */
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
@@ -4861,117 +7455,11 @@
/*-===== Pre-defined compression levels =====-*/
+#include "clevels.h"
-#define ZSTD_MAX_CLEVEL 22
int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
-
-static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
-{ /* "default" - for any srcSize > 256 KB */
- /* W, C, H, S, L, TL, strat */
- { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
- { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
- { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
- { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
- { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
- { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */
- { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */
- { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */
- { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
- { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
- { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
- { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
- { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
- { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */
- { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
- { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
- { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
- { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
- { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
- { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
- { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
- { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
- { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
-},
-{ /* for srcSize <= 256 KB */
- /* W, C, H, S, L, T, strat */
- { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
- { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
- { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
- { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
- { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/
- { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/
- { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
- { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
- { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
- { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
- { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
- { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
- { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
- { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
- { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
- { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
- { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
- { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
- { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
- { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
- { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
- { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
- { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
-},
-{ /* for srcSize <= 128 KB */
- /* W, C, H, S, L, T, strat */
- { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
- { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
- { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
- { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
- { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
- { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
- { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
- { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
- { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
- { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
- { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
- { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
- { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
- { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
- { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
- { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
- { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
- { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
- { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
- { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
- { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
- { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
- { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
-},
-{ /* for srcSize <= 16 KB */
- /* W, C, H, S, L, T, strat */
- { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
- { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
- { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
- { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
- { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
- { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
- { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
- { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
- { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
- { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
- { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
- { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
- { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
- { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
- { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
- { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
- { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
- { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
- { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
- { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
- { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
- { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
- { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
-},
-};
+int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; }
static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
{
@@ -4999,7 +7487,7 @@
{
return (cParams->strategy >= ZSTD_greedy)
&& (cParams->strategy <= ZSTD_lazy2)
- && (cParams->hashLog >= cParams->chainLog)
+ && (cParams->hashLog > cParams->chainLog)
&& (cParams->chainLog <= 24);
}
@@ -5018,6 +7506,9 @@
case ZSTD_lazy:
case ZSTD_lazy2:
cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
+ if (cParams->hashLog < ZSTD_HASHLOG_MIN) {
+ cParams->hashLog = ZSTD_HASHLOG_MIN;
+ }
break;
case ZSTD_btlazy2:
case ZSTD_btopt:
@@ -5027,7 +7518,7 @@
}
}
-static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
{
switch (mode) {
case ZSTD_cpm_unknown:
@@ -5051,10 +7542,8 @@
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
* Use dictSize == 0 for unknown or unused.
- * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
-
-
-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+ * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
{
U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
@@ -5068,24 +7557,17 @@
else row = compressionLevel;
{ ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
-#ifdef DSLAB_OPTIMIZE_COMPRESS
- // 自动根据L1DCache的大小设置hashlog和chainlog的值
- if (cp.hashLog > DSLAB_L1DCACHE_LOG - 4){
- cp.hashLog = DSLAB_L1DCACHE_LOG - 4;
- }
- if (cp.chainLog >= cp.hashLog){
- cp.chainLog = cp.hashLog - 1;
- }
-#endif
+ DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy);
/* acceleration factor */
if (compressionLevel < 0) {
int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
cp.targetLength = (unsigned)(-clampedCompressionLevel);
}
/* refine parameters based on srcSize & dictSize */
- return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto);
}
}
+
/*! ZSTD_getCParams() :
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Size values are optional, provide 0 if not known or unused */
@@ -5099,7 +7581,9 @@
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
+static ZSTD_parameters
+ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
+{
ZSTD_parameters params;
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
@@ -5113,7 +7597,34 @@
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
-ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
+{
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
}
+
+void ZSTD_registerSequenceProducer(
+ ZSTD_CCtx* zc,
+ void* extSeqProdState,
+ ZSTD_sequenceProducer_F extSeqProdFunc)
+{
+ assert(zc != NULL);
+ ZSTD_CCtxParams_registerSequenceProducer(
+ &zc->requestedParams, extSeqProdState, extSeqProdFunc
+ );
+}
+
+void ZSTD_CCtxParams_registerSequenceProducer(
+ ZSTD_CCtx_params* params,
+ void* extSeqProdState,
+ ZSTD_sequenceProducer_F extSeqProdFunc)
+{
+ assert(params != NULL);
+ if (extSeqProdFunc != NULL) {
+ params->extSeqProdFunc = extSeqProdFunc;
+ params->extSeqProdState = extSeqProdState;
+ } else {
+ params->extSeqProdFunc = NULL;
+ params->extSeqProdState = NULL;
+ }
+}
Index: lib/zstd/compress/hist.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c
--- a/lib/zstd/compress/hist.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/hist.c (date 1740124241360)
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* hist : Histogram functions
* part of Finite State Entropy project
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -26,6 +27,16 @@
/*-**************************************************************
* Histogram functions
****************************************************************/
+void HIST_add(unsigned* count, const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const end = ip + srcSize;
+
+ while (ipUTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
--- a/lib/zstd/compress/zstd_compress_internal.h (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_compress_internal.h (date 1740124241403)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -20,7 +21,8 @@
***************************************/
#include "../common/zstd_internal.h"
#include "zstd_cwksp.h"
-
+#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */
+#include "zstd_preSplit.h" /* ZSTD_SLIPBLOCK_WORKSPACESIZE */
/*-*************************************
* Constants
@@ -32,7 +34,7 @@
It's not a big deal though : candidate will just be sorted again.
Additionally, candidate position 1 will be lost.
But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
- The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
+ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table reuse with a different strategy.
This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
@@ -57,7 +59,7 @@
} ZSTD_localDict;
typedef struct {
- HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
+ HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)];
HUF_repeat repeatMode;
} ZSTD_hufCTables_t;
@@ -75,8 +77,120 @@
ZSTD_fseCTables_t fse;
} ZSTD_entropyCTables_t;
+/* *********************************************
+* Sequences *
+***********************************************/
+typedef struct SeqDef_s {
+ U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
+ U16 litLength;
+ U16 mlBase; /* mlBase == matchLength - MINMATCH */
+} SeqDef;
+
+/* Controls whether seqStore has a single "long" litLength or matchLength. See SeqStore_t. */
+typedef enum {
+ ZSTD_llt_none = 0, /* no longLengthType */
+ ZSTD_llt_literalLength = 1, /* represents a long literal */
+ ZSTD_llt_matchLength = 2 /* represents a long match */
+} ZSTD_longLengthType_e;
+
+typedef struct {
+ SeqDef* sequencesStart;
+ SeqDef* sequences; /* ptr to end of sequences */
+ BYTE* litStart;
+ BYTE* lit; /* ptr to end of literals */
+ BYTE* llCode;
+ BYTE* mlCode;
+ BYTE* ofCode;
+ size_t maxNbSeq;
+ size_t maxNbLit;
+
+ /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
+ * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
+ * the existing value of the litLength or matchLength by 0x10000.
+ */
+ ZSTD_longLengthType_e longLengthType;
+ U32 longLengthPos; /* Index of the sequence to apply long length modification to */
+} SeqStore_t;
+
+typedef struct {
+ U32 litLength;
+ U32 matchLength;
+} ZSTD_SequenceLength;
+
+/*
+ * Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences
+ * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
+ */
+MEM_STATIC ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t const* seqStore, SeqDef const* seq)
+{
+ ZSTD_SequenceLength seqLen;
+ seqLen.litLength = seq->litLength;
+ seqLen.matchLength = seq->mlBase + MINMATCH;
+ if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
+ seqLen.litLength += 0x10000;
+ }
+ if (seqStore->longLengthType == ZSTD_llt_matchLength) {
+ seqLen.matchLength += 0x10000;
+ }
+ }
+ return seqLen;
+}
+
+const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
+int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+
+
+/* *********************************************
+* Entropy buffer statistics structs and funcs *
+***********************************************/
+/* ZSTD_hufCTablesMetadata_t :
+ * Stores Literals Block Type for a super-block in hType, and
+ * huffman tree description in hufDesBuffer.
+ * hufDesSize refers to the size of huffman tree description in bytes.
+ * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */
+typedef struct {
+ SymbolEncodingType_e hType;
+ BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
+ size_t hufDesSize;
+} ZSTD_hufCTablesMetadata_t;
+
+/* ZSTD_fseCTablesMetadata_t :
+ * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
+ * fse tables in fseTablesBuffer.
+ * fseTablesSize refers to the size of fse tables in bytes.
+ * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */
+typedef struct {
+ SymbolEncodingType_e llType;
+ SymbolEncodingType_e ofType;
+ SymbolEncodingType_e mlType;
+ BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
+ size_t fseTablesSize;
+ size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
+} ZSTD_fseCTablesMetadata_t;
+
+typedef struct {
+ ZSTD_hufCTablesMetadata_t hufMetadata;
+ ZSTD_fseCTablesMetadata_t fseMetadata;
+} ZSTD_entropyCTablesMetadata_t;
+
+/* ZSTD_buildBlockEntropyStats() :
+ * Builds entropy for the block.
+ * @return : 0 on success or error code */
+size_t ZSTD_buildBlockEntropyStats(
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize);
+
+/* *******************************
+* Compression internals structs *
+*********************************/
+
typedef struct {
- U32 off; /* Offset code (offset + ZSTD_REP_MOVE) for the match */
+ U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */
U32 len; /* Raw length of match */
} ZSTD_match_t;
@@ -93,28 +207,29 @@
stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
size_t size; /* The number of sequences. <= capacity. */
size_t capacity; /* The capacity starting from `seq` pointer */
-} rawSeqStore_t;
+} RawSeqStore_t;
-UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
+UNUSED_ATTR static const RawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
typedef struct {
- int price;
- U32 off;
- U32 mlen;
- U32 litlen;
- U32 rep[ZSTD_REP_NUM];
+ int price; /* price from beginning of segment to this position */
+ U32 off; /* offset of previous match */
+ U32 mlen; /* length of previous match */
+ U32 litlen; /* nb of literals since previous match */
+ U32 rep[ZSTD_REP_NUM]; /* offset history after previous match */
} ZSTD_optimal_t;
typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+#define ZSTD_OPT_SIZE (ZSTD_OPT_NUM+3)
typedef struct {
/* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
unsigned* litFreq; /* table of literals statistics, of size 256 */
unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
- ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
- ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_SIZE */
+ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */
U32 litSum; /* nb of literals */
U32 litLengthSum; /* nb of litLength codes */
@@ -126,7 +241,7 @@
U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
- ZSTD_literalCompressionMode_e literalCompressionMode;
+ ZSTD_ParamSwitch_e literalCompressionMode;
} optState_t;
typedef struct {
@@ -135,15 +250,24 @@
} ZSTD_compressedBlockState_t;
typedef struct {
- BYTE const* nextSrc; /* next block here to continue on current prefix */
- BYTE const* base; /* All regular indexes relative to this position */
- BYTE const* dictBase; /* extDict indexes relative to this position */
- U32 dictLimit; /* below that point, need extDict */
- U32 lowLimit; /* below that point, no more valid data */
+ BYTE const* nextSrc; /* next block here to continue on current prefix */
+ BYTE const* base; /* All regular indexes relative to this position */
+ BYTE const* dictBase; /* extDict indexes relative to this position */
+ U32 dictLimit; /* below that point, need extDict */
+ U32 lowLimit; /* below that point, no more valid data */
+ U32 nbOverflowCorrections; /* Number of times overflow correction has run since
+ * ZSTD_window_init(). Useful for debugging coredumps
+ * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY.
+ */
} ZSTD_window_t;
-typedef struct ZSTD_matchState_t ZSTD_matchState_t;
-struct ZSTD_matchState_t {
+#define ZSTD_WINDOW_START_INDEX 2
+
+typedef struct ZSTD_MatchState_t ZSTD_MatchState_t;
+
+#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
+
+struct ZSTD_MatchState_t {
ZSTD_window_t window; /* State for window round buffer management */
U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
* When loadedDictEnd != 0, a dictionary is in use, and still valid.
@@ -154,22 +278,44 @@
*/
U32 nextToUpdate; /* index from which to continue table update */
U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
+
+ U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
+ BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
+ U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
+ U64 hashSalt; /* For row-based matchFinder: salts the hash for reuse of tag table */
+ U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */
+
U32* hashTable;
U32* hashTable3;
U32* chainTable;
+
+ int forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
+
int dedicatedDictSearch; /* Indicates whether this matchState is using the
* dedicated dictionary search structure.
*/
optState_t opt; /* optimal parser state */
- const ZSTD_matchState_t* dictMatchState;
+ const ZSTD_MatchState_t* dictMatchState;
ZSTD_compressionParameters cParams;
- const rawSeqStore_t* ldmSeqStore;
+ const RawSeqStore_t* ldmSeqStore;
+
+ /* Controls prefetching in some dictMatchState matchfinders.
+ * This behavior is controlled from the cctx ms.
+ * This parameter has no effect in the cdict ms. */
+ int prefetchCDictTables;
+
+ /* When == 0, lazy match finders insert every position.
+ * When != 0, lazy match finders only insert positions they search.
+ * This allows them to skip much faster over incompressible data,
+ * at a small cost to compression ratio.
+ */
+ int lazySkipping;
};
typedef struct {
ZSTD_compressedBlockState_t* prevCBlock;
ZSTD_compressedBlockState_t* nextCBlock;
- ZSTD_matchState_t matchState;
+ ZSTD_MatchState_t matchState;
} ZSTD_blockState_t;
typedef struct {
@@ -196,7 +342,7 @@
} ldmState_t;
typedef struct {
- U32 enableLdm; /* 1 if enable long distance matching */
+ ZSTD_ParamSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
U32 hashLog; /* Log size of hashTable */
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
U32 minMatchLength; /* Minimum match length */
@@ -227,7 +373,7 @@
* There is no guarantee that hint is close to actual source size */
ZSTD_dictAttachPref_e attachDictPref;
- ZSTD_literalCompressionMode_e literalCompressionMode;
+ ZSTD_ParamSwitch_e literalCompressionMode;
/* Multithreading: used to pass parameters to mtctx */
int nbWorkers;
@@ -246,15 +392,54 @@
ZSTD_bufferMode_e outBufferMode;
/* Sequence compression API */
- ZSTD_sequenceFormat_e blockDelimiters;
+ ZSTD_SequenceFormat_e blockDelimiters;
int validateSequences;
+ /* Block splitting
+ * @postBlockSplitter executes split analysis after sequences are produced,
+ * it's more accurate but consumes more resources.
+ * @preBlockSplitter_level splits before knowing sequences,
+ * it's more approximative but also cheaper.
+ * Valid @preBlockSplitter_level values range from 0 to 6 (included).
+ * 0 means auto, 1 means do not split,
+ * then levels are sorted in increasing cpu budget, from 2 (fastest) to 6 (slowest).
+ * Highest @preBlockSplitter_level combines well with @postBlockSplitter.
+ */
+ ZSTD_ParamSwitch_e postBlockSplitter;
+ int preBlockSplitter_level;
+
+ /* Adjust the max block size*/
+ size_t maxBlockSize;
+
+ /* Param for deciding whether to use row-based matchfinder */
+ ZSTD_ParamSwitch_e useRowMatchFinder;
+
+ /* Always load a dictionary in ext-dict mode (not prefix mode)? */
+ int deterministicRefPrefix;
+
/* Internal use, for createCCtxParams() and freeCCtxParams() only */
ZSTD_customMem customMem;
+
+ /* Controls prefetching in some dictMatchState matchfinders */
+ ZSTD_ParamSwitch_e prefetchCDictTables;
+
+ /* Controls whether zstd will fall back to an internal matchfinder
+ * if the external matchfinder returns an error code. */
+ int enableMatchFinderFallback;
+
+ /* Parameters for the external sequence producer API.
+ * Users set these parameters through ZSTD_registerSequenceProducer().
+ * It is not possible to set these parameters individually through the public API. */
+ void* extSeqProdState;
+ ZSTD_sequenceProducer_F extSeqProdFunc;
+
+ /* Controls repcode search in external sequence parsing */
+ ZSTD_ParamSwitch_e searchForExternalRepcodes;
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
+#define TMP_WORKSPACE_SIZE (MAX(ENTROPY_WORKSPACE_SIZE, ZSTD_SLIPBLOCK_WORKSPACESIZE))
/*
* Indicates whether this compression proceeds directly from user-provided
@@ -266,17 +451,34 @@
ZSTDb_buffered
} ZSTD_buffered_policy_e;
+/*
+ * Struct that contains all elements of block splitter that should be allocated
+ * in a wksp.
+ */
+#define ZSTD_MAX_NB_BLOCK_SPLITS 196
+typedef struct {
+ SeqStore_t fullSeqStoreChunk;
+ SeqStore_t firstHalfSeqStore;
+ SeqStore_t secondHalfSeqStore;
+ SeqStore_t currSeqStore;
+ SeqStore_t nextSeqStore;
+
+ U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS];
+ ZSTD_entropyCTablesMetadata_t entropyMetadata;
+} ZSTD_blockSplitCtx;
+
struct ZSTD_CCtx_s {
ZSTD_compressionStage_e stage;
int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
ZSTD_CCtx_params requestedParams;
ZSTD_CCtx_params appliedParams;
+ ZSTD_CCtx_params simpleApiParams; /* Param storage used by the simple API - not sticky. Must only be used in top-level simple API functions for storage. */
U32 dictID;
size_t dictContentSize;
ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
- size_t blockSize;
+ size_t blockSizeMax;
unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
unsigned long long consumedSrcSize;
unsigned long long producedCSize;
@@ -288,15 +490,16 @@
int isFirstBlock;
int initialized;
- seqStore_t seqStore; /* sequences storage ptrs */
+ SeqStore_t seqStore; /* sequences storage ptrs */
ldmState_t ldmState; /* long distance matching state */
rawSeq* ldmSequences; /* Storage for the ldm output sequences */
size_t maxNbLdmSequences;
- rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
+ RawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
ZSTD_blockState_t blockState;
- U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
+ void* tmpWorkspace; /* used as substitute of stack space - must be aligned for S64 type */
+ size_t tmpWkspSize;
- /* Wether we are streaming or not */
+ /* Whether we are streaming or not */
ZSTD_buffered_policy_e bufferedPolicy;
/* streaming */
@@ -314,6 +517,7 @@
/* Stable in/out buffer verification */
ZSTD_inBuffer expectedInBuffer;
+ size_t stableIn_notConsumed; /* nb bytes within stable input buffer that are said to be consumed but are not */
size_t expectedOutBufferSize;
/* Dictionary */
@@ -324,9 +528,17 @@
/* Multi-threading */
/* Tracing */
+
+ /* Workspace for block splitter */
+ ZSTD_blockSplitCtx blockSplitCtx;
+
+ /* Buffer for output from external sequence producer */
+ ZSTD_Sequence* extSeqBuf;
+ size_t extSeqBufCapacity;
};
typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
+typedef enum { ZSTD_tfp_forCCtx, ZSTD_tfp_forCDict } ZSTD_tableFillPurpose_e;
typedef enum {
ZSTD_noDict = 0,
@@ -348,17 +560,17 @@
* In this mode we take both the source size and the dictionary size
* into account when selecting and adjusting the parameters.
*/
- ZSTD_cpm_unknown = 3, /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
+ ZSTD_cpm_unknown = 3 /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
* We don't know what these parameters are for. We default to the legacy
* behavior of taking both the source size and the dict size into account
* when selecting and adjusting parameters.
*/
-} ZSTD_cParamMode_e;
+} ZSTD_CParamMode_e;
-typedef size_t (*ZSTD_blockCompressor) (
- ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+typedef size_t (*ZSTD_BlockCompressor_f) (
+ ZSTD_MatchState_t* bs, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
+ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
@@ -392,31 +604,6 @@
return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
}
-typedef struct repcodes_s {
- U32 rep[3];
-} repcodes_t;
-
-MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
-{
- repcodes_t newReps;
- if (offset >= ZSTD_REP_NUM) { /* full offset */
- newReps.rep[2] = rep[1];
- newReps.rep[1] = rep[0];
- newReps.rep[0] = offset - ZSTD_REP_MOVE;
- } else { /* repcode */
- U32 const repCode = offset + ll0;
- if (repCode > 0) { /* note : if repCode==0, no change */
- U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
- newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
- newReps.rep[1] = rep[0];
- newReps.rep[0] = currentOffset;
- } else { /* repCode == 0 */
- ZSTD_memcpy(&newReps, rep, sizeof(newReps));
- }
- }
- return newReps;
-}
-
/* ZSTD_cParam_withinBounds:
* @return 1 if value is within cParam bounds,
* 0 otherwise */
@@ -429,12 +616,33 @@
return 1;
}
+/* ZSTD_selectAddr:
+ * @return index >= lowLimit ? candidate : backup,
+ * tries to force branchless codegen. */
+MEM_STATIC const BYTE*
+ZSTD_selectAddr(U32 index, U32 lowLimit, const BYTE* candidate, const BYTE* backup)
+{
+#if defined(__x86_64__)
+ __asm__ (
+ "cmp %1, %2\n"
+ "cmova %3, %0\n"
+ : "+r"(candidate)
+ : "r"(index), "r"(lowLimit), "r"(backup)
+ );
+ return candidate;
+#else
+ return index >= lowLimit ? candidate : backup;
+#endif
+}
+
/* ZSTD_noCompressBlock() :
* Writes uncompressed block to dst buffer from given src.
* Returns the size of the block */
-MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
+MEM_STATIC size_t
+ZSTD_noCompressBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
{
U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
+ DEBUGLOG(5, "ZSTD_noCompressBlock (srcSize=%zu, dstCapacity=%zu)", srcSize, dstCapacity);
RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
dstSize_tooSmall, "dst buf too small for uncompressed block");
MEM_writeLE24(dst, cBlockHeader24);
@@ -442,7 +650,8 @@
return ZSTD_blockHeaderSize + srcSize;
}
-MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
+MEM_STATIC size_t
+ZSTD_rleCompressBlock(void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
{
BYTE* const op = (BYTE*)dst;
U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
@@ -461,21 +670,21 @@
{
U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
- assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat));
return (srcSize >> minlog) + 2;
}
-MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
+MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams)
{
switch (cctxParams->literalCompressionMode) {
- case ZSTD_lcm_huffman:
+ case ZSTD_ps_enable:
return 0;
- case ZSTD_lcm_uncompressed:
+ case ZSTD_ps_disable:
return 1;
default:
assert(0 /* impossible: pre-validated */);
ZSTD_FALLTHROUGH;
- case ZSTD_lcm_auto:
+ case ZSTD_ps_auto:
return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
}
}
@@ -485,7 +694,9 @@
* Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
* large copies.
*/
-static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
+static void
+ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w)
+{
assert(iend > ilimit_w);
if (ip <= ilimit_w) {
ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
@@ -495,14 +706,69 @@
while (ip < iend) *op++ = *ip++;
}
+
+#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1)
+#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2)
+#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3)
+#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */
+#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM)
+#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM)
+#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM)
+#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM)
+#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */
+
+/*! ZSTD_storeSeqOnly() :
+ * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t.
+ * Literals themselves are not copied, but @litPtr is updated.
+ * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE().
+ * @matchLength : must be >= MINMATCH
+*/
+HINT_INLINE UNUSED_ATTR void
+ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr,
+ size_t litLength,
+ U32 offBase,
+ size_t matchLength)
+{
+ assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
+
+ /* literal Length */
+ assert(litLength <= ZSTD_BLOCKSIZE_MAX);
+ if (UNLIKELY(litLength>0xFFFF)) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_literalLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+ /* match offset */
+ seqStorePtr->sequences[0].offBase = offBase;
+
+ /* match Length */
+ assert(matchLength <= ZSTD_BLOCKSIZE_MAX);
+ assert(matchLength >= MINMATCH);
+ { size_t const mlBase = matchLength - MINMATCH;
+ if (UNLIKELY(mlBase>0xFFFF)) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_matchLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].mlBase = (U16)mlBase;
+ }
+
+ seqStorePtr->sequences++;
+}
+
/*! ZSTD_storeSeq() :
- * Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
- * `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
- * `mlBase` : matchLength - MINMATCH
- * Allowed to overread literals up to litLimit.
+ * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t.
+ * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE().
+ * @matchLength : must be >= MINMATCH
+ * Allowed to over-read literals up to litLimit.
*/
-HINT_INLINE UNUSED_ATTR
-void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
+HINT_INLINE UNUSED_ATTR void
+ZSTD_storeSeq(SeqStore_t* seqStorePtr,
+ size_t litLength, const BYTE* literals, const BYTE* litLimit,
+ U32 offBase,
+ size_t matchLength)
{
BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
BYTE const* const litEnd = literals + litLength;
@@ -510,8 +776,8 @@
static const BYTE* g_start = NULL;
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
- DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
- pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
+ DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u",
+ pos, (U32)litLength, (U32)matchLength, (U32)offBase);
}
#endif
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
@@ -521,9 +787,9 @@
assert(literals + litLength <= litLimit);
if (litEnd <= litLimit_w) {
/* Common case we can use wildcopy.
- * First copy 16 bytes, because literals are likely short.
- */
- assert(WILDCOPY_OVERLENGTH >= 16);
+ * First copy 16 bytes, because literals are likely short.
+ */
+ ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16);
ZSTD_copy16(seqStorePtr->lit, literals);
if (litLength > 16) {
ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
@@ -533,85 +799,50 @@
}
seqStorePtr->lit += litLength;
- /* literal Length */
- if (litLength>0xFFFF) {
- assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
- seqStorePtr->longLengthID = 1;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].litLength = (U16)litLength;
-
- /* match offset */
- seqStorePtr->sequences[0].offset = offCode + 1;
+ ZSTD_storeSeqOnly(seqStorePtr, litLength, offBase, matchLength);
+}
- /* match Length */
- if (mlBase>0xFFFF) {
- assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
- seqStorePtr->longLengthID = 2;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+/* ZSTD_updateRep() :
+ * updates in-place @rep (array of repeat offsets)
+ * @offBase : sum-type, using numeric representation of ZSTD_storeSeq()
+ */
+MEM_STATIC void
+ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
+{
+ if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = OFFBASE_TO_OFFSET(offBase);
+ } else { /* repcode */
+ U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0;
+ if (repCode > 0) { /* note : if repCode==0, no change */
+ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ rep[2] = (repCode >= 2) ? rep[1] : rep[2];
+ rep[1] = rep[0];
+ rep[0] = currentOffset;
+ } else { /* repCode == 0 */
+ /* nothing to do */
+ }
}
- seqStorePtr->sequences[0].matchLength = (U16)mlBase;
+}
- seqStorePtr->sequences++;
+typedef struct repcodes_s {
+ U32 rep[3];
+} Repcodes_t;
+
+MEM_STATIC Repcodes_t
+ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
+{
+ Repcodes_t newReps;
+ ZSTD_memcpy(&newReps, rep, sizeof(newReps));
+ ZSTD_updateRep(newReps.rep, offBase, ll0);
+ return newReps;
}
/*-*************************************
* Match length counter
***************************************/
-static unsigned ZSTD_NbCommonBytes (size_t val)
-{
- if (MEM_isLittleEndian()) {
- if (MEM_64bits()) {
-# if (__GNUC__ >= 4)
- return (__builtin_ctzll((U64)val) >> 3);
-# else
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
- 0, 3, 1, 3, 1, 4, 2, 7,
- 0, 2, 3, 6, 1, 5, 3, 5,
- 1, 3, 4, 4, 2, 5, 6, 7,
- 7, 0, 1, 2, 3, 3, 4, 6,
- 2, 6, 5, 5, 3, 4, 5, 6,
- 7, 1, 2, 4, 6, 4, 4, 5,
- 7, 2, 6, 5, 7, 6, 7, 7 };
- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-# endif
- } else { /* 32 bits */
-# if (__GNUC__ >= 3)
- return (__builtin_ctz((U32)val) >> 3);
-# else
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
- 3, 2, 2, 1, 3, 2, 0, 1,
- 3, 3, 1, 2, 2, 2, 2, 0,
- 3, 1, 2, 0, 1, 0, 1, 1 };
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-# endif
- }
- } else { /* Big Endian CPU */
- if (MEM_64bits()) {
-# if (__GNUC__ >= 4)
- return (__builtin_clzll(val) >> 3);
-# else
- unsigned r;
- const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
- if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
- if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
- r += (!val);
- return r;
-# endif
- } else { /* 32 bits */
-# if (__GNUC__ >= 3)
- return (__builtin_clz((U32)val) >> 3);
-# else
- unsigned r;
- if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
- r += (!val);
- return r;
-# endif
- } }
-}
-
-
MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
{
const BYTE* const pStart = pIn;
@@ -645,8 +876,8 @@
size_t const matchLength = ZSTD_count(ip, match, vEnd);
if (match + matchLength != mEnd) return matchLength;
DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
- DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
- DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+ DEBUGLOG(7, "distance from match beginning to end dictionary = %i", (int)(mEnd - match));
+ DEBUGLOG(7, "distance from current pos to end buffer = %i", (int)(iEnd - ip));
DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
@@ -657,32 +888,43 @@
* Hashes
***************************************/
static const U32 prime3bytes = 506832829U;
-static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
-MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
+static U32 ZSTD_hash3(U32 u, U32 h, U32 s) { assert(h <= 32); return (((u << (32-24)) * prime3bytes) ^ s) >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h, 0); } /* only in zstd_opt.h */
+MEM_STATIC size_t ZSTD_hash3PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash3(MEM_readLE32(ptr), h, s); }
static const U32 prime4bytes = 2654435761U;
-static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
-static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+static U32 ZSTD_hash4(U32 u, U32 h, U32 s) { assert(h <= 32); return ((u * prime4bytes) ^ s) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h, 0); }
+static size_t ZSTD_hash4PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash4(MEM_readLE32(ptr), h, s); }
static const U64 prime5bytes = 889523592379ULL;
-static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
-static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+static size_t ZSTD_hash5(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-40)) * prime5bytes) ^ s) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h, 0); }
+static size_t ZSTD_hash5PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash5(MEM_readLE64(p), h, s); }
static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
-static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+static size_t ZSTD_hash6(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-48)) * prime6bytes) ^ s) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h, 0); }
+static size_t ZSTD_hash6PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash6(MEM_readLE64(p), h, s); }
static const U64 prime7bytes = 58295818150454627ULL;
-static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
-static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+static size_t ZSTD_hash7(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-56)) * prime7bytes) ^ s) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h, 0); }
+static size_t ZSTD_hash7PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash7(MEM_readLE64(p), h, s); }
static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
-static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
-static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+static size_t ZSTD_hash8(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u) * prime8bytes) ^ s) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h, 0); }
+static size_t ZSTD_hash8PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash8(MEM_readLE64(p), h, s); }
+
MEM_STATIC FORCE_INLINE_ATTR
size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
{
+ /* Although some of these hashes do support hBits up to 64, some do not.
+ * To be on the safe side, always avoid hBits > 32. */
+ assert(hBits <= 32);
+
switch(mls)
{
default:
@@ -694,6 +936,24 @@
}
}
+MEM_STATIC FORCE_INLINE_ATTR
+size_t ZSTD_hashPtrSalted(const void* p, U32 hBits, U32 mls, const U64 hashSalt) {
+ /* Although some of these hashes do support hBits up to 64, some do not.
+ * To be on the safe side, always avoid hBits > 32. */
+ assert(hBits <= 32);
+
+ switch(mls)
+ {
+ default:
+ case 4: return ZSTD_hash4PtrS(p, hBits, (U32)hashSalt);
+ case 5: return ZSTD_hash5PtrS(p, hBits, hashSalt);
+ case 6: return ZSTD_hash6PtrS(p, hBits, hashSalt);
+ case 7: return ZSTD_hash7PtrS(p, hBits, hashSalt);
+ case 8: return ZSTD_hash8PtrS(p, hBits, hashSalt);
+ }
+}
+
+
/* ZSTD_ipow() :
* Return base^exponent.
*/
@@ -755,11 +1015,12 @@
/*-*************************************
* Round buffer management
***************************************/
-#if (ZSTD_WINDOWLOG_MAX_64 > 31)
-# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
-#endif
-/* Max current allowed */
-#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
+/* Max @current value allowed:
+ * In 32-bit mode: we want to avoid crossing the 2 GB limit,
+ * reducing risks of side effects in case of signed operations on indexes.
+ * In 64-bit mode: we want to ensure that adding the maximum job size (512 MB)
+ * doesn't overflow U32 index capacity (4 GB) */
+#define ZSTD_CURRENT_MAX (MEM_64bits() ? 3500U MB : 2000U MB)
/* Maximum chunk size before overflow correction needs to be called again */
#define ZSTD_CHUNKSIZE_MAX \
( ((U32)-1) /* Maximum ending current index */ \
@@ -778,6 +1039,13 @@
window->dictLimit = end;
}
+MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
+{
+ return window.dictLimit == ZSTD_WINDOW_START_INDEX &&
+ window.lowLimit == ZSTD_WINDOW_START_INDEX &&
+ (window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX;
+}
+
/*
* ZSTD_window_hasExtDict():
* Returns non-zero if the window has a non-empty extDict.
@@ -792,7 +1060,7 @@
* Inspects the provided matchState and figures out what dictMode should be
* passed to the compressor.
*/
-MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_MatchState_t *ms)
{
return ZSTD_window_hasExtDict(ms->window) ?
ZSTD_extDict :
@@ -801,15 +1069,71 @@
ZSTD_noDict;
}
+/* Defining this macro to non-zero tells zstd to run the overflow correction
+ * code much more frequently. This is very inefficient, and should only be
+ * used for tests and fuzzers.
+ */
+#ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY
+# ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1
+# else
+# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0
+# endif
+#endif
+
+/*
+ * ZSTD_window_canOverflowCorrect():
+ * Returns non-zero if the indices are large enough for overflow correction
+ * to work correctly without impacting compression ratio.
+ */
+MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window,
+ U32 cycleLog,
+ U32 maxDist,
+ U32 loadedDictEnd,
+ void const* src)
+{
+ U32 const cycleSize = 1u << cycleLog;
+ U32 const curr = (U32)((BYTE const*)src - window.base);
+ U32 const minIndexToOverflowCorrect = cycleSize
+ + MAX(maxDist, cycleSize)
+ + ZSTD_WINDOW_START_INDEX;
+
+ /* Adjust the min index to backoff the overflow correction frequency,
+ * so we don't waste too much CPU in overflow correction. If this
+ * computation overflows we don't really care, we just need to make
+ * sure it is at least minIndexToOverflowCorrect.
+ */
+ U32 const adjustment = window.nbOverflowCorrections + 1;
+ U32 const adjustedIndex = MAX(minIndexToOverflowCorrect * adjustment,
+ minIndexToOverflowCorrect);
+ U32 const indexLargeEnough = curr > adjustedIndex;
+
+ /* Only overflow correct early if the dictionary is invalidated already,
+ * so we don't hurt compression ratio.
+ */
+ U32 const dictionaryInvalidated = curr > maxDist + loadedDictEnd;
+
+ return indexLargeEnough && dictionaryInvalidated;
+}
+
/*
* ZSTD_window_needOverflowCorrection():
* Returns non-zero if the indices are getting too large and need overflow
* protection.
*/
MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
+ U32 cycleLog,
+ U32 maxDist,
+ U32 loadedDictEnd,
+ void const* src,
void const* srcEnd)
{
U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
+ if (ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
+ if (ZSTD_window_canOverflowCorrect(window, cycleLog, maxDist, loadedDictEnd, src)) {
+ return 1;
+ }
+ }
return curr > ZSTD_CURRENT_MAX;
}
@@ -821,9 +1145,10 @@
*
* The least significant cycleLog bits of the indices must remain the same,
* which may be 0. Every index up to maxDist in the past must be valid.
- * NOTE: (maxDist & cycleMask) must be zero.
*/
-MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
U32 maxDist, void const* src)
{
/* preemptive overflow correction:
@@ -845,32 +1170,52 @@
* 3. (cctx->lowLimit + 1< 3<<29 + 1<base);
- U32 const currentCycle0 = curr & cycleMask;
- /* Exclude zero so that newCurrent - maxDist >= 1. */
- U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
- U32 const newCurrent = currentCycle1 + maxDist;
+ U32 const currentCycle = curr & cycleMask;
+ /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */
+ U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX
+ ? MAX(cycleSize, ZSTD_WINDOW_START_INDEX)
+ : 0;
+ U32 const newCurrent = currentCycle
+ + currentCycleCorrection
+ + MAX(maxDist, cycleSize);
U32 const correction = curr - newCurrent;
- assert((maxDist & cycleMask) == 0);
+ /* maxDist must be a power of two so that:
+ * (newCurrent & cycleMask) == (curr & cycleMask)
+ * This is required to not corrupt the chains / binary tree.
+ */
+ assert((maxDist & (maxDist - 1)) == 0);
+ assert((curr & cycleMask) == (newCurrent & cycleMask));
assert(curr > newCurrent);
- /* Loose bound, should be around 1<<29 (see above) */
- assert(correction > 1<<28);
+ if (!ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
+ /* Loose bound, should be around 1<<29 (see above) */
+ assert(correction > 1<<28);
+ }
window->base += correction;
window->dictBase += correction;
- if (window->lowLimit <= correction) window->lowLimit = 1;
- else window->lowLimit -= correction;
- if (window->dictLimit <= correction) window->dictLimit = 1;
- else window->dictLimit -= correction;
+ if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) {
+ window->lowLimit = ZSTD_WINDOW_START_INDEX;
+ } else {
+ window->lowLimit -= correction;
+ }
+ if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) {
+ window->dictLimit = ZSTD_WINDOW_START_INDEX;
+ } else {
+ window->dictLimit -= correction;
+ }
/* Ensure we can still reference the full window. */
assert(newCurrent >= maxDist);
- assert(newCurrent - maxDist >= 1);
+ assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX);
/* Ensure that lowLimit and dictLimit didn't underflow. */
assert(window->lowLimit <= newCurrent);
assert(window->dictLimit <= newCurrent);
+ ++window->nbOverflowCorrections;
+
DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
window->lowLimit);
return correction;
@@ -904,7 +1249,7 @@
const void* blockEnd,
U32 maxDist,
U32* loadedDictEndPtr,
- const ZSTD_matchState_t** dictMatchStatePtr)
+ const ZSTD_MatchState_t** dictMatchStatePtr)
{
U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
@@ -949,7 +1294,7 @@
const void* blockEnd,
U32 maxDist,
U32* loadedDictEndPtr,
- const ZSTD_matchState_t** dictMatchStatePtr)
+ const ZSTD_MatchState_t** dictMatchStatePtr)
{
assert(loadedDictEndPtr != NULL);
assert(dictMatchStatePtr != NULL);
@@ -959,10 +1304,15 @@
(unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
assert(blockEndIdx >= loadedDictEnd);
- if (blockEndIdx > loadedDictEnd + maxDist) {
+ if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) {
/* On reaching window size, dictionaries are invalidated.
* For simplification, if window size is reached anywhere within next block,
* the dictionary is invalidated for the full block.
+ *
+ * We also have to invalidate the dictionary if ZSTD_window_update() has detected
+ * non-contiguous segments, which means that loadedDictEnd != window->dictLimit.
+ * loadedDictEnd may be 0, if forceWindow is true, but in that case we never use
+ * dictMatchState, so setting it to NULL is not a problem.
*/
DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
*loadedDictEndPtr = 0;
@@ -975,11 +1325,13 @@
MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
ZSTD_memset(window, 0, sizeof(*window));
- window->base = (BYTE const*)"";
- window->dictBase = (BYTE const*)"";
- window->dictLimit = 1; /* start from 1, so that 1st position is valid */
- window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
- window->nextSrc = window->base + 1; /* see issue #1241 */
+ window->base = (BYTE const*)" ";
+ window->dictBase = (BYTE const*)" ";
+ ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */
+ window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */
+ window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */
+ window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */
+ window->nbOverflowCorrections = 0;
}
/*
@@ -989,8 +1341,11 @@
* forget about the extDict. Handles overlap of the prefix and extDict.
* Returns non-zero if the segment is contiguous.
*/
-MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
- void const* src, size_t srcSize)
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_window_update(ZSTD_window_t* window,
+ const void* src, size_t srcSize,
+ int forceNonContiguous)
{
BYTE const* const ip = (BYTE const*)src;
U32 contiguous = 1;
@@ -1000,7 +1355,7 @@
assert(window->base != NULL);
assert(window->dictBase != NULL);
/* Check if blocks follow each other */
- if (src != window->nextSrc) {
+ if (src != window->nextSrc || forceNonContiguous) {
/* not contiguous */
size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
@@ -1017,8 +1372,9 @@
/* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
if ( (ip+srcSize > window->dictBase + window->lowLimit)
& (ip < window->dictBase + window->dictLimit)) {
- ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
- U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ size_t const highInputIdx = (size_t)((ip + srcSize) - window->dictBase);
+ U32 const lowLimitMax = (highInputIdx > (size_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ assert(highInputIdx < UINT_MAX);
window->lowLimit = lowLimitMax;
DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
}
@@ -1028,24 +1384,24 @@
/*
* Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
*/
-MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog)
{
- U32 const maxDistance = 1U << windowLog;
- U32 const lowestValid = ms->window.lowLimit;
- U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
- U32 const isDictionary = (ms->loadedDictEnd != 0);
+ U32 const maxDistance = 1U << windowLog;
+ U32 const lowestValid = ms->window.lowLimit;
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ U32 const isDictionary = (ms->loadedDictEnd != 0);
/* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
* is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
* valid for the entire block. So this check is sufficient to find the lowest valid match index.
*/
- U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
+ U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
return matchLowest;
}
/*
* Returns the lowest allowed match index in the prefix.
*/
-MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.dictLimit;
@@ -1058,6 +1414,13 @@
return matchLowest;
}
+/* index_safety_check:
+ * intentional underflow : ensure repIndex isn't overlapping dict + prefix
+ * @return 1 if values are not overlapping,
+ * 0 otherwise */
+MEM_STATIC int ZSTD_index_overlap_check(const U32 prefixLowestIndex, const U32 repIndex) {
+ return ((U32)((prefixLowestIndex-1) - repIndex) >= 3);
+}
/* debug functions */
@@ -1091,7 +1454,42 @@
#endif
+/* Short Cache */
+
+/* Normally, zstd matchfinders follow this flow:
+ * 1. Compute hash at ip
+ * 2. Load index from hashTable[hash]
+ * 3. Check if *ip == *(base + index)
+ * In dictionary compression, loading *(base + index) is often an L2 or even L3 miss.
+ *
+ * Short cache is an optimization which allows us to avoid step 3 most of the time
+ * when the data doesn't actually match. With short cache, the flow becomes:
+ * 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip.
+ * 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works.
+ * 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue.
+ *
+ * Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to
+ * dictMatchState matchfinders.
+ */
+#define ZSTD_SHORT_CACHE_TAG_BITS 8
+#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1)
+/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable.
+ * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */
+MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) {
+ size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
+ U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK);
+ assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0);
+ hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag;
+}
+
+/* Helper function for short cache matchfinders.
+ * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */
+MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) {
+ U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK;
+ U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK;
+ return tag1 == tag2;
+}
/* ===============================================================
* Shared internal declarations
@@ -1108,6 +1506,25 @@
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
+typedef struct {
+ U32 idx; /* Index in array of ZSTD_Sequence */
+ U32 posInSequence; /* Position within sequence at idx */
+ size_t posInSrc; /* Number of bytes given by sequences provided so far */
+} ZSTD_SequencePosition;
+
+/* for benchmark */
+size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx,
+ const ZSTD_Sequence* const inSeqs, size_t nbSequences,
+ int const repcodeResolution);
+
+typedef struct {
+ size_t nbSequences;
+ size_t blockSize;
+ size_t litSize;
+} BlockSummary;
+
+BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs);
+
/* ==============================================================
* Private declarations
* These prototypes shall only be called from within lib/compress
@@ -1119,7 +1536,7 @@
* Note: srcSizeHint == 0 means 0!
*/
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);
/*! ZSTD_initCStream_internal() :
* Private use only. Init streaming operation.
@@ -1131,7 +1548,7 @@
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
-void ZSTD_resetSeqStore(seqStore_t* ssPtr);
+void ZSTD_resetSeqStore(SeqStore_t* ssPtr);
/*! ZSTD_getCParamsFromCDict() :
* as the name implies */
@@ -1170,11 +1587,10 @@
* This cannot be used when long range matching is enabled.
* Zstd will use these sequences, and pass the literals to a secondary block
* compressor.
- * @return : An error code on failure.
* NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
* access and data corruption.
*/
-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
+void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
/* ZSTD_cycleLog() :
* condition for correct operation : hashLog > 1 */
@@ -1185,4 +1601,28 @@
*/
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
+/* Returns 1 if an external sequence producer is registered, otherwise returns 0. */
+MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params* params) {
+ return params->extSeqProdFunc != NULL;
+}
+
+/* ===============================================================
+ * Deprecated definitions that are still used internally to avoid
+ * deprecation warnings. These functions are exactly equivalent to
+ * their public variants, but avoid the deprecation warnings.
+ * =============================================================== */
+
+size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
#endif /* ZSTD_COMPRESS_H */
Index: lib/zstd/compress/huf_compress.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
--- a/lib/zstd/compress/huf_compress.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/huf_compress.c (date 1740124241371)
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* Huffman encoder, part of New Generation Entropy library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -26,9 +27,9 @@
#include "hist.h"
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
#include "../common/fse.h" /* header compression */
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/error_private.h"
+#include "../common/bits.h" /* ZSTD_highbit32 */
/* **************************************************************
@@ -39,17 +40,93 @@
/* **************************************************************
-* Utils
+* Required declarations
****************************************************************/
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+typedef struct nodeElt_s {
+ U32 count;
+ U16 parent;
+ BYTE byte;
+ BYTE nbBits;
+} nodeElt;
+
+
+/* **************************************************************
+* Debug Traces
+****************************************************************/
+
+#if DEBUGLEVEL >= 2
+
+static size_t showU32(const U32* arr, size_t size)
+{
+ size_t u;
+ for (u=0; u= add) {
+ assert(add < align);
+ assert(((size_t)aligned & mask) == 0);
+ *workspaceSizePtr -= add;
+ return aligned;
+ } else {
+ *workspaceSizePtr = 0;
+ return NULL;
+ }
+}
+
+
/* HUF_compressWeights() :
* Same as FSE_compress(), but dedicated to huff0's weights compression.
* The use case needs much less stack memory.
@@ -64,7 +141,10 @@
S16 norm[HUF_TABLELOG_MAX+1];
} HUF_CompressWeightsWksp;
-static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
+static size_t
+HUF_compressWeights(void* dst, size_t dstSize,
+ const void* weightTable, size_t wtSize,
+ void* workspace, size_t workspaceSize)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
@@ -72,7 +152,7 @@
unsigned maxSymbolValue = HUF_TABLELOG_MAX;
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
- HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace;
+ HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
@@ -103,6 +183,59 @@
return (size_t)(op-ostart);
}
+static size_t HUF_getNbBits(HUF_CElt elt)
+{
+ return elt & 0xFF;
+}
+
+static size_t HUF_getNbBitsFast(HUF_CElt elt)
+{
+ return elt;
+}
+
+static size_t HUF_getValue(HUF_CElt elt)
+{
+ return elt & ~(size_t)0xFF;
+}
+
+static size_t HUF_getValueFast(HUF_CElt elt)
+{
+ return elt;
+}
+
+static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits)
+{
+ assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX);
+ *elt = nbBits;
+}
+
+static void HUF_setValue(HUF_CElt* elt, size_t value)
+{
+ size_t const nbBits = HUF_getNbBits(*elt);
+ if (nbBits > 0) {
+ assert((value >> nbBits) == 0);
+ *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits);
+ }
+}
+
+HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable)
+{
+ HUF_CTableHeader header;
+ ZSTD_memcpy(&header, ctable, sizeof(header));
+ return header;
+}
+
+static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue)
+{
+ HUF_CTableHeader header;
+ HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header));
+ ZSTD_memset(&header, 0, sizeof(header));
+ assert(tableLog < 256);
+ header.tableLog = (BYTE)tableLog;
+ assert(maxSymbolValue < 256);
+ header.maxSymbolValue = (BYTE)maxSymbolValue;
+ ZSTD_memcpy(ctable, &header, sizeof(header));
+}
typedef struct {
HUF_CompressWeightsWksp wksp;
@@ -114,9 +247,15 @@
const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
void* workspace, size_t workspaceSize)
{
+ HUF_CElt const* const ct = CTable + 1;
BYTE* op = (BYTE*)dst;
U32 n;
- HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace;
+ HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
+
+ HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp));
+
+ assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue);
+ assert(HUF_readCTableHeader(CTable).tableLog == huffLog);
/* check conditions */
if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
@@ -127,9 +266,10 @@
for (n=1; nbitsToWeight[n] = (BYTE)(huffLog + 1 - n);
for (n=0; nhuffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits];
+ wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])];
/* attempt weights compression by FSE */
+ if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
op[0] = (BYTE)hSize;
@@ -146,16 +286,6 @@
return ((maxSymbolValue+1)/2) + 1;
}
-/*! HUF_writeCTable() :
- `CTable` : Huffman tree to save, using huf representation.
- @return : size of saved CTable */
-size_t HUF_writeCTable (void* dst, size_t maxDstSize,
- const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
-{
- HUF_WriteCTableWksp wksp;
- return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
-}
-
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
{
@@ -163,6 +293,7 @@
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
U32 nbSymbols = 0;
+ HUF_CElt* const ct = CTable + 1;
/* get symbol weights */
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
@@ -172,6 +303,10 @@
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
+ *maxSymbolValuePtr = nbSymbols - 1;
+
+ HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr);
+
/* Prepare base value per rank */
{ U32 n, nextRankStart = 0;
for (n=1; n<=tableLog; n++) {
@@ -183,13 +318,13 @@
/* fill nbBits */
{ U32 n; for (n=0; nn=tableLog+1 */
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
- { U32 n; for (n=0; n>= 1;
} }
/* assign value within rank, symbol order */
- { U32 n; for (n=0; n HUF_readCTableHeader(CTable).maxSymbolValue)
+ return 0;
+ return (U32)HUF_getNbBits(ct[symbolValue]);
}
-
-typedef struct nodeElt_s {
- U32 count;
- U16 parent;
- BYTE byte;
- BYTE nbBits;
-} nodeElt;
/*
* HUF_setMaxHeight():
- * Enforces maxNbBits on the Huffman tree described in huffNode.
+ * Try to enforce @targetNbBits on the Huffman tree described in @huffNode.
*
- * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
- * the tree to so that it is a valid canonical Huffman tree.
+ * It attempts to convert all nodes with nbBits > @targetNbBits
+ * to employ @targetNbBits instead. Then it adjusts the tree
+ * so that it remains a valid canonical Huffman tree.
*
* @pre The sum of the ranks of each symbol == 2^largestBits,
* where largestBits == huffNode[lastNonNull].nbBits.
* @post The sum of the ranks of each symbol == 2^largestBits,
- * where largestBits is the return value <= maxNbBits.
+ * where largestBits is the return value (expected <= targetNbBits).
*
- * @param huffNode The Huffman tree modified in place to enforce maxNbBits.
+ * @param huffNode The Huffman tree modified in place to enforce targetNbBits.
+ * It's presumed sorted, from most frequent to rarest symbol.
* @param lastNonNull The symbol with the lowest count in the Huffman tree.
- * @param maxNbBits The maximum allowed number of bits, which the Huffman tree
+ * @param targetNbBits The allowed number of bits, which the Huffman tree
* may not respect. After this function the Huffman tree will
- * respect maxNbBits.
- * @return The maximum number of bits of the Huffman tree after adjustment,
- * necessarily no more than maxNbBits.
+ * respect targetNbBits.
+ * @return The maximum number of bits of the Huffman tree after adjustment.
*/
-static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits)
{
const U32 largestBits = huffNode[lastNonNull].nbBits;
- /* early exit : no elt > maxNbBits, so the tree is already valid. */
- if (largestBits <= maxNbBits) return largestBits;
+ /* early exit : no elt > targetNbBits, so the tree is already valid. */
+ if (largestBits <= targetNbBits) return largestBits;
+
+ DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits);
/* there are several too large elements (at least >= 2) */
{ int totalCost = 0;
- const U32 baseCost = 1 << (largestBits - maxNbBits);
+ const U32 baseCost = 1 << (largestBits - targetNbBits);
int n = (int)lastNonNull;
- /* Adjust any ranks > maxNbBits to maxNbBits.
+ /* Adjust any ranks > targetNbBits to targetNbBits.
* Compute totalCost, which is how far the sum of the ranks is
* we are over 2^largestBits after adjust the offending ranks.
*/
- while (huffNode[n].nbBits > maxNbBits) {
+ while (huffNode[n].nbBits > targetNbBits) {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
- huffNode[n].nbBits = (BYTE)maxNbBits;
+ huffNode[n].nbBits = (BYTE)targetNbBits;
n--;
}
- /* n stops at huffNode[n].nbBits <= maxNbBits */
- assert(huffNode[n].nbBits <= maxNbBits);
- /* n end at index of smallest symbol using < maxNbBits */
- while (huffNode[n].nbBits == maxNbBits) --n;
+ /* n stops at huffNode[n].nbBits <= targetNbBits */
+ assert(huffNode[n].nbBits <= targetNbBits);
+ /* n end at index of smallest symbol using < targetNbBits */
+ while (huffNode[n].nbBits == targetNbBits) --n;
- /* renorm totalCost from 2^largestBits to 2^maxNbBits
+ /* renorm totalCost from 2^largestBits to 2^targetNbBits
* note : totalCost is necessarily a multiple of baseCost */
- assert((totalCost & (baseCost - 1)) == 0);
- totalCost >>= (largestBits - maxNbBits);
+ assert(((U32)totalCost & (baseCost - 1)) == 0);
+ totalCost >>= (largestBits - targetNbBits);
assert(totalCost > 0);
/* repay normalized cost */
@@ -278,19 +410,19 @@
/* Get pos of last (smallest = lowest cum. count) symbol per rank */
ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
- { U32 currentNbBits = maxNbBits;
+ { U32 currentNbBits = targetNbBits;
int pos;
for (pos=n ; pos >= 0; pos--) {
if (huffNode[pos].nbBits >= currentNbBits) continue;
- currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
- rankLast[maxNbBits-currentNbBits] = (U32)pos;
+ currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */
+ rankLast[targetNbBits-currentNbBits] = (U32)pos;
} }
while (totalCost > 0) {
/* Try to reduce the next power of 2 above totalCost because we
* gain back half the rank.
*/
- U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
+ U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1;
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
U32 const highPos = rankLast[nBitsToDecrease];
U32 const lowPos = rankLast[nBitsToDecrease-1];
@@ -330,7 +462,7 @@
rankLast[nBitsToDecrease] = noSymbol;
else {
rankLast[nBitsToDecrease]--;
- if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
+ if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease)
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
}
} /* while (totalCost > 0) */
@@ -342,11 +474,11 @@
* TODO.
*/
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
- /* special case : no rank 1 symbol (using maxNbBits-1);
- * let's create one from largest rank 0 (using maxNbBits).
+ /* special case : no rank 1 symbol (using targetNbBits-1);
+ * let's create one from largest rank 0 (using targetNbBits).
*/
if (rankLast[1] == noSymbol) {
- while (huffNode[n].nbBits == maxNbBits) n--;
+ while (huffNode[n].nbBits == targetNbBits) n--;
huffNode[n+1].nbBits--;
assert(n >= 0);
rankLast[1] = (U32)(n+1);
@@ -360,26 +492,122 @@
} /* repay normalized cost */
} /* there are several too large elements (at least >= 2) */
- return maxNbBits;
+ return targetNbBits;
}
typedef struct {
- U32 base;
- U32 curr;
+ U16 base;
+ U16 curr;
} rankPos;
-typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
+typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)];
-#define RANK_POSITION_TABLE_SIZE 32
+/* Number of buckets available for HUF_sort() */
+#define RANK_POSITION_TABLE_SIZE 192
typedef struct {
huffNodeTable huffNodeTbl;
rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
} HUF_buildCTable_wksp_tables;
+/* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing.
+ * Strategy is to use as many buckets as possible for representing distinct
+ * counts while using the remainder to represent all "large" counts.
+ *
+ * To satisfy this requirement for 192 buckets, we can do the following:
+ * Let buckets 0-166 represent distinct counts of [0, 166]
+ * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
+ */
+#define RANK_POSITION_MAX_COUNT_LOG 32
+#define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */)
+#define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */)
+
+/* Return the appropriate bucket index for a given count. See definition of
+ * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
+ */
+static U32 HUF_getIndex(U32 const count) {
+ return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
+ ? count
+ : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
+}
+
+/* Helper swap function for HUF_quickSortPartition() */
+static void HUF_swapNodes(nodeElt* a, nodeElt* b) {
+ nodeElt tmp = *a;
+ *a = *b;
+ *b = tmp;
+}
+
+/* Returns 0 if the huffNode array is not sorted by descending count */
+MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) {
+ U32 i;
+ for (i = 1; i < maxSymbolValue1; ++i) {
+ if (huffNode[i].count > huffNode[i-1].count) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Insertion sort by descending order */
+HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) {
+ int i;
+ int const size = high-low+1;
+ huffNode += low;
+ for (i = 1; i < size; ++i) {
+ nodeElt const key = huffNode[i];
+ int j = i - 1;
+ while (j >= 0 && huffNode[j].count < key.count) {
+ huffNode[j + 1] = huffNode[j];
+ j--;
+ }
+ huffNode[j + 1] = key;
+ }
+}
+
+/* Pivot helper function for quicksort. */
+static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) {
+ /* Simply select rightmost element as pivot. "Better" selectors like
+ * median-of-three don't experimentally appear to have any benefit.
+ */
+ U32 const pivot = arr[high].count;
+ int i = low - 1;
+ int j = low;
+ for ( ; j < high; j++) {
+ if (arr[j].count > pivot) {
+ i++;
+ HUF_swapNodes(&arr[i], &arr[j]);
+ }
+ }
+ HUF_swapNodes(&arr[i + 1], &arr[high]);
+ return i + 1;
+}
+
+/* Classic quicksort by descending with partially iterative calls
+ * to reduce worst case callstack size.
+ */
+static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
+ int const kInsertionSortThreshold = 8;
+ if (high - low < kInsertionSortThreshold) {
+ HUF_insertionSort(arr, low, high);
+ return;
+ }
+ while (low < high) {
+ int const idx = HUF_quickSortPartition(arr, low, high);
+ if (idx - low < high - idx) {
+ HUF_simpleQuickSort(arr, low, idx - 1);
+ low = idx + 1;
+ } else {
+ HUF_simpleQuickSort(arr, idx + 1, high);
+ high = idx - 1;
+ }
+ }
+}
+
/*
* HUF_sort():
* Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
+ * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket.
*
* @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
* Must have (maxSymbolValue + 1) entries.
@@ -387,42 +615,51 @@
* @param[in] maxSymbolValue Maximum symbol value.
* @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
*/
-static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
-{
- int n;
- int const maxSymbolValue1 = (int)maxSymbolValue + 1;
+static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
+ U32 n;
+ U32 const maxSymbolValue1 = maxSymbolValue+1;
/* Compute base and set curr to base.
- * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
- * Then 2^lowerRank <= count[n]+1 <= 2^rank.
+ * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
+ * See HUF_getIndex to see bucketing strategy.
* We attribute each symbol to lowerRank's base value, because we want to know where
* each rank begins in the output, so for rank R we want to count ranks R+1 and above.
*/
ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
for (n = 0; n < maxSymbolValue1; ++n) {
- U32 lowerRank = BIT_highbit32(count[n] + 1);
+ U32 lowerRank = HUF_getIndex(count[n]);
+ assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1);
rankPosition[lowerRank].base++;
}
+
assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
+ /* Set up the rankPosition table */
for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
rankPosition[n-1].base += rankPosition[n].base;
rankPosition[n-1].curr = rankPosition[n-1].base;
}
- /* Sort */
+
+ /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */
for (n = 0; n < maxSymbolValue1; ++n) {
U32 const c = count[n];
- U32 const r = BIT_highbit32(c+1) + 1;
- U32 pos = rankPosition[r].curr++;
- /* Insert into the correct position in the rank.
- * We have at most 256 symbols, so this insertion should be fine.
- */
- while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
- huffNode[pos] = huffNode[pos-1];
- pos--;
- }
+ U32 const r = HUF_getIndex(c) + 1;
+ U32 const pos = rankPosition[r].curr++;
+ assert(pos < maxSymbolValue1);
huffNode[pos].count = c;
huffNode[pos].byte = (BYTE)n;
}
+
+ /* Sort each bucket. */
+ for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
+ int const bucketSize = rankPosition[n].curr - rankPosition[n].base;
+ U32 const bucketStartIdx = rankPosition[n].base;
+ if (bucketSize > 1) {
+ assert(bucketStartIdx < maxSymbolValue1);
+ HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1);
+ }
+ }
+
+ assert(HUF_isSorted(huffNode, maxSymbolValue1));
}
@@ -446,6 +683,7 @@
int lowS, lowN;
int nodeNb = STARTNODE;
int n, nodeRoot;
+ DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1);
/* init for parents */
nonNullRank = (int)maxSymbolValue;
while(huffNode[nonNullRank].count == 0) nonNullRank--;
@@ -472,6 +710,8 @@
for (n=0; n<=nonNullRank; n++)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+ DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1));
+
return nonNullRank;
}
@@ -487,6 +727,7 @@
*/
static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
{
+ HUF_CElt* const ct = CTable + 1;
/* fill result into ctable (val, nbBits) */
int n;
U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
@@ -502,130 +743,381 @@
min >>= 1;
} }
for (n=0; nhuffNodeTbl;
nodeElt* const huffNode = huffNode0+1;
int nonNullRank;
+ HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables));
+
+ DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1);
+
/* safety checks */
- if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
- return ERROR(workSpace_tooSmall);
+ return ERROR(workSpace_tooSmall);
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
- return ERROR(maxSymbolValue_tooLarge);
+ return ERROR(maxSymbolValue_tooLarge);
ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
/* sort, decreasing order */
HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
+ DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1));
/* build tree */
nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
- /* enforce maxTableLog */
+ /* determine and enforce maxTableLog */
maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
- HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
+ HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
return maxNbBits;
}
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
{
+ HUF_CElt const* ct = CTable + 1;
size_t nbBits = 0;
int s;
for (s = 0; s <= (int)maxSymbolValue; ++s) {
- nbBits += CTable[s].nbBits * count[s];
+ nbBits += HUF_getNbBits(ct[s]) * count[s];
}
return nbBits >> 3;
}
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
- int bad = 0;
- int s;
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
- bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
- }
- return !bad;
+ HUF_CTableHeader header = HUF_readCTableHeader(CTable);
+ HUF_CElt const* ct = CTable + 1;
+ int bad = 0;
+ int s;
+
+ assert(header.tableLog <= HUF_TABLELOG_ABSOLUTEMAX);
+
+ if (header.maxSymbolValue < maxSymbolValue)
+ return 0;
+
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
+ }
+ return !bad;
}
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+/* HUF_CStream_t:
+ * Huffman uses its own BIT_CStream_t implementation.
+ * There are three major differences from BIT_CStream_t:
+ * 1. HUF_addBits() takes a HUF_CElt (size_t) which is
+ * the pair (nbBits, value) in the format:
+ * format:
+ * - Bits [0, 4) = nbBits
+ * - Bits [4, 64 - nbBits) = 0
+ * - Bits [64 - nbBits, 64) = value
+ * 2. The bitContainer is built from the upper bits and
+ * right shifted. E.g. to add a new value of N bits
+ * you right shift the bitContainer by N, then or in
+ * the new value into the N upper bits.
+ * 3. The bitstream has two bit containers. You can add
+ * bits to the second container and merge them into
+ * the first container.
+ */
+
+#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8)
+
+typedef struct {
+ size_t bitContainer[2];
+ size_t bitPos[2];
+
+ BYTE* startPtr;
+ BYTE* ptr;
+ BYTE* endPtr;
+} HUF_CStream_t;
+
+/*! HUF_initCStream():
+ * Initializes the bitstream.
+ * @returns 0 or an error code.
+ */
+static size_t HUF_initCStream(HUF_CStream_t* bitC,
+ void* startPtr, size_t dstCapacity)
+{
+ ZSTD_memset(bitC, 0, sizeof(*bitC));
+ bitC->startPtr = (BYTE*)startPtr;
+ bitC->ptr = bitC->startPtr;
+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]);
+ if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall);
+ return 0;
+}
+
+/*! HUF_addBits():
+ * Adds the symbol stored in HUF_CElt elt to the bitstream.
+ *
+ * @param elt The element we're adding. This is a (nbBits, value) pair.
+ * See the HUF_CStream_t docs for the format.
+ * @param idx Insert into the bitstream at this idx.
+ * @param kFast This is a template parameter. If the bitstream is guaranteed
+ * to have at least 4 unused bits after this call it may be 1,
+ * otherwise it must be 0. HUF_addBits() is faster when fast is set.
+ */
+FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast)
+{
+ assert(idx <= 1);
+ assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX);
+ /* This is efficient on x86-64 with BMI2 because shrx
+ * only reads the low 6 bits of the register. The compiler
+ * knows this and elides the mask. When fast is set,
+ * every operation can use the same value loaded from elt.
+ */
+ bitC->bitContainer[idx] >>= HUF_getNbBits(elt);
+ bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt);
+ /* We only read the low 8 bits of bitC->bitPos[idx] so it
+ * doesn't matter that the high bits have noise from the value.
+ */
+ bitC->bitPos[idx] += HUF_getNbBitsFast(elt);
+ assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
+ /* The last 4-bits of elt are dirty if fast is set,
+ * so we must not be overwriting bits that have already been
+ * inserted into the bit container.
+ */
+#if DEBUGLEVEL >= 1
+ {
+ size_t const nbBits = HUF_getNbBits(elt);
+ size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1;
+ (void)dirtyBits;
+ /* Middle bits are 0. */
+ assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
+ /* We didn't overwrite any bits in the bit container. */
+ assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
+ (void)dirtyBits;
+ }
+#endif
+}
+
+FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC)
+{
+ bitC->bitContainer[1] = 0;
+ bitC->bitPos[1] = 0;
+}
+
+/*! HUF_mergeIndex1() :
+ * Merges the bit container @ index 1 into the bit container @ index 0
+ * and zeros the bit container @ index 1.
+ */
+FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC)
+{
+ assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER);
+ bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF);
+ bitC->bitContainer[0] |= bitC->bitContainer[1];
+ bitC->bitPos[0] += bitC->bitPos[1];
+ assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER);
+}
+
+/*! HUF_flushBits() :
+* Flushes the bits in the bit container @ index 0.
+*
+* @post bitPos will be < 8.
+* @param kFast If kFast is set then we must know a-priori that
+* the bit container will not overflow.
+*/
+FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast)
+{
+ /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */
+ size_t const nbBits = bitC->bitPos[0] & 0xFF;
+ size_t const nbBytes = nbBits >> 3;
+ /* The top nbBits bits of bitContainer are the ones we need. */
+ size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits);
+ /* Mask bitPos to account for the bytes we consumed. */
+ bitC->bitPos[0] &= 7;
+ assert(nbBits > 0);
+ assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8);
+ assert(bitC->ptr <= bitC->endPtr);
+ MEM_writeLEST(bitC->ptr, bitContainer);
+ bitC->ptr += nbBytes;
+ assert(!kFast || bitC->ptr <= bitC->endPtr);
+ if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+ /* bitContainer doesn't need to be modified because the leftover
+ * bits are already the top bitPos bits. And we don't care about
+ * noise in the lower values.
+ */
+}
+
+/*! HUF_endMark()
+ * @returns The Huffman stream end mark: A 1-bit value = 1.
+ */
+static HUF_CElt HUF_endMark(void)
+{
+ HUF_CElt endMark;
+ HUF_setNbBits(&endMark, 1);
+ HUF_setValue(&endMark, 1);
+ return endMark;
+}
+
+/*! HUF_closeCStream() :
+ * @return Size of CStream, in bytes,
+ * or 0 if it could not fit into dstBuffer */
+static size_t HUF_closeCStream(HUF_CStream_t* bitC)
+{
+ HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0);
+ HUF_flushBits(bitC, /* kFast */ 0);
+ {
+ size_t const nbBits = bitC->bitPos[0] & 0xFF;
+ if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+ return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0);
+ }
+}
+
+FORCE_INLINE_TEMPLATE void
+HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast)
+{
+ HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
+}
+
FORCE_INLINE_TEMPLATE void
-HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
+HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
+ const BYTE* ip, size_t srcSize,
+ const HUF_CElt* ct,
+ int kUnroll, int kFastFlush, int kLastFast)
{
- BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
-}
+ /* Join to kUnroll */
+ int n = (int)srcSize;
+ int rem = n % kUnroll;
+ if (rem > 0) {
+ for (; rem > 0; --rem) {
+ HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0);
+ }
+ HUF_flushBits(bitC, kFastFlush);
+ }
+ assert(n % kUnroll == 0);
-#define HUF_FLUSHBITS(s) BIT_flushBits(s)
+ /* Join to 2 * kUnroll */
+ if (n % (2 * kUnroll)) {
+ int u;
+ for (u = 1; u < kUnroll; ++u) {
+ HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1);
+ }
+ HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast);
+ HUF_flushBits(bitC, kFastFlush);
+ n -= kUnroll;
+ }
+ assert(n % (2 * kUnroll) == 0);
-#define HUF_FLUSHBITS_1(stream) \
- if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
+ for (; n>0; n-= 2 * kUnroll) {
+ /* Encode kUnroll symbols into the bitstream @ index 0. */
+ int u;
+ for (u = 1; u < kUnroll; ++u) {
+ HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1);
+ }
+ HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast);
+ HUF_flushBits(bitC, kFastFlush);
+ /* Encode kUnroll symbols into the bitstream @ index 1.
+ * This allows us to start filling the bit container
+ * without any data dependencies.
+ */
+ HUF_zeroIndex1(bitC);
+ for (u = 1; u < kUnroll; ++u) {
+ HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1);
+ }
+ HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast);
+ /* Merge bitstream @ index 1 into the bitstream @ index 0 */
+ HUF_mergeIndex1(bitC);
+ HUF_flushBits(bitC, kFastFlush);
+ }
+ assert(n == 0);
-#define HUF_FLUSHBITS_2(stream) \
- if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
+}
+
+/*
+ * Returns a tight upper bound on the output space needed by Huffman
+ * with 8 bytes buffer to handle over-writes. If the output is at least
+ * this large we don't need to do bounds checks during Huffman encoding.
+ */
+static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
+{
+ return ((srcSize * tableLog) >> 3) + 8;
+}
+
FORCE_INLINE_TEMPLATE size_t
HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable)
{
+ U32 const tableLog = HUF_readCTableHeader(CTable).tableLog;
+ HUF_CElt const* ct = CTable + 1;
const BYTE* ip = (const BYTE*) src;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
- BYTE* op = ostart;
- size_t n;
- BIT_CStream_t bitC;
+ HUF_CStream_t bitC;
/* init */
if (dstSize < 8) return 0; /* not enough space to compress */
- { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
+ { BYTE* op = ostart;
+ size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
if (HUF_isError(initErr)) return 0; }
- n = srcSize & ~3; /* join to mod 4 */
- switch (srcSize & 3)
- {
- case 3:
- HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
- HUF_FLUSHBITS_2(&bitC);
- ZSTD_FALLTHROUGH;
- case 2:
- HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
- HUF_FLUSHBITS_1(&bitC);
- ZSTD_FALLTHROUGH;
- case 1:
- HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
- HUF_FLUSHBITS(&bitC);
- ZSTD_FALLTHROUGH;
- case 0: ZSTD_FALLTHROUGH;
- default: break;
- }
-
- for (; n>0; n-=4) { /* note : n&3==0 at this stage */
- HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
- HUF_FLUSHBITS_1(&bitC);
- HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
- HUF_FLUSHBITS_2(&bitC);
- HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
- HUF_FLUSHBITS_1(&bitC);
- HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
- HUF_FLUSHBITS(&bitC);
+ if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
+ else {
+ if (MEM_32bits()) {
+ switch (tableLog) {
+ case 11:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 10: ZSTD_FALLTHROUGH;
+ case 9: ZSTD_FALLTHROUGH;
+ case 8:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ case 7: ZSTD_FALLTHROUGH;
+ default:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ }
+ } else {
+ switch (tableLog) {
+ case 11:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 10:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ case 9:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 8:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 7:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 6: ZSTD_FALLTHROUGH;
+ default:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ }
+ }
}
+ assert(bitC.ptr <= bitC.endPtr);
- return BIT_closeCStream(&bitC);
+ return HUF_closeCStream(&bitC);
}
#if DYNAMIC_BMI2
-static TARGET_ATTRIBUTE("bmi2") size_t
+static BMI2_TARGET_ATTRIBUTE size_t
HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable)
@@ -644,9 +1136,9 @@
static size_t
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
- const HUF_CElt* CTable, const int bmi2)
+ const HUF_CElt* CTable, const int flags)
{
- if (bmi2) {
+ if (flags & HUF_flags_bmi2) {
return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
}
return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
@@ -657,24 +1149,23 @@
static size_t
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
- const HUF_CElt* CTable, const int bmi2)
+ const HUF_CElt* CTable, const int flags)
{
- (void)bmi2;
+ (void)flags;
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
}
#endif
-size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
{
- return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+ return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
}
-
static size_t
HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
- const HUF_CElt* CTable, int bmi2)
+ const HUF_CElt* CTable, int flags)
{
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
const BYTE* ip = (const BYTE*) src;
@@ -688,27 +1179,24 @@
op += 6; /* jumpTable */
assert(op <= oend);
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
- if (cSize==0) return 0;
- assert(cSize <= 65535);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
+ if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart, (U16)cSize);
op += cSize;
}
ip += segmentSize;
assert(op <= oend);
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
- if (cSize==0) return 0;
- assert(cSize <= 65535);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
+ if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart+2, (U16)cSize);
op += cSize;
}
ip += segmentSize;
assert(op <= oend);
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
- if (cSize==0) return 0;
- assert(cSize <= 65535);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
+ if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart+4, (U16)cSize);
op += cSize;
}
@@ -716,17 +1204,17 @@
ip += segmentSize;
assert(op <= oend);
assert(ip <= iend);
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
- if (cSize==0) return 0;
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) );
+ if (cSize == 0 || cSize > 65535) return 0;
op += cSize;
}
return (size_t)(op-ostart);
}
-size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
{
- return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+ return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
}
typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
@@ -734,11 +1222,11 @@
static size_t HUF_compressCTable_internal(
BYTE* const ostart, BYTE* op, BYTE* const oend,
const void* src, size_t srcSize,
- HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
+ HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags)
{
size_t const cSize = (nbStreams==HUF_singleStream) ?
- HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
- HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
+ HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) :
+ HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags);
if (HUF_isError(cSize)) { return cSize; }
if (cSize==0) { return 0; } /* uncompressible */
op += cSize;
@@ -750,35 +1238,113 @@
typedef struct {
unsigned count[HUF_SYMBOLVALUE_MAX + 1];
- HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
+ HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
union {
HUF_buildCTable_wksp_tables buildCTable_wksp;
HUF_WriteCTableWksp writeCTable_wksp;
+ U32 hist_wksp[HIST_WKSP_SIZE_U32];
} wksps;
} HUF_compress_tables_t;
+#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
+#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
+
+unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue)
+{
+ unsigned cardinality = 0;
+ unsigned i;
+
+ for (i = 0; i < maxSymbolValue + 1; i++) {
+ if (count[i] != 0) cardinality += 1;
+ }
+
+ return cardinality;
+}
+
+unsigned HUF_minTableLog(unsigned symbolCardinality)
+{
+ U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1;
+ return minBitsSymbols;
+}
+
+unsigned HUF_optimalTableLog(
+ unsigned maxTableLog,
+ size_t srcSize,
+ unsigned maxSymbolValue,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* table,
+ const unsigned* count,
+ int flags)
+{
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables));
+
+ if (!(flags & HUF_flags_optimalDepth)) {
+ /* cheap evaluation, based on FSE */
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+ }
+
+ { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp);
+ size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp);
+ size_t hSize, newSize;
+ const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue);
+ const unsigned minTableLog = HUF_minTableLog(symbolCardinality);
+ size_t optSize = ((size_t) ~0) - 1;
+ unsigned optLog = maxTableLog, optLogGuess;
+
+ DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize);
+
+ /* Search until size increases */
+ for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) {
+ DEBUGLOG(7, "checking for huffLog=%u", optLogGuess);
+
+ { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
+ if (ERR_isError(maxBits)) continue;
+
+ if (maxBits < optLogGuess && optLogGuess > minTableLog) break;
+
+ hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
+ }
+
+ if (ERR_isError(hSize)) continue;
+
+ newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize;
+
+ if (newSize > optSize + 1) {
+ break;
+ }
+
+ if (newSize < optSize) {
+ optSize = newSize;
+ optLog = optLogGuess;
+ }
+ }
+ assert(optLog <= HUF_TABLELOG_MAX);
+ return optLog;
+ }
+}
+
/* HUF_compress_internal() :
* `workSpace_align4` must be aligned on 4-bytes boundaries,
- * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */
+ * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
static size_t
HUF_compress_internal (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
HUF_nbStreams_e nbStreams,
- void* workSpace_align4, size_t wkspSize,
- HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
- const int bmi2)
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags)
{
- HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
+ HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
- HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
- assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */
+ DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize);
+ HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
/* checks & inits */
- if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
+ if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
if (!srcSize) return 0; /* Uncompressed */
if (!dstSize) return 0; /* cannot fit anything within dst budget */
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
@@ -788,17 +1354,34 @@
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
/* Heuristic : If old table is valid, use it for small inputs */
- if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
+ if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- nbStreams, oldHufTable, bmi2);
+ nbStreams, oldHufTable, flags);
+ }
+
+ /* If uncompressible data is suspected, do a smaller sampling first */
+ DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
+ if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
+ size_t largestTotal = 0;
+ DEBUGLOG(5, "input suspected incompressible : sampling to check");
+ { unsigned maxSymbolValueBegin = maxSymbolValue;
+ CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
+ largestTotal += largestBegin;
+ }
+ { unsigned maxSymbolValueEnd = maxSymbolValue;
+ CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
+ largestTotal += largestEnd;
+ }
+ if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
/* Scan input and build symbol stats */
- { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
+ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
+ DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1));
/* Check validity of previous table */
if ( repeat
@@ -807,22 +1390,20 @@
*repeat = HUF_repeat_none;
}
/* Heuristic : use existing table for small inputs */
- if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
+ if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- nbStreams, oldHufTable, bmi2);
+ nbStreams, oldHufTable, flags);
}
/* Build Huffman Tree */
- huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags);
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
maxSymbolValue, huffLog,
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
CHECK_F(maxBits);
huffLog = (U32)maxBits;
- /* Zero unused symbols in CTable, so we can check it for validity */
- ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
- sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
+ DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1));
}
/* Write table description header */
@@ -835,7 +1416,7 @@
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- nbStreams, oldHufTable, bmi2);
+ nbStreams, oldHufTable, flags);
} }
/* Use the new huffman table */
@@ -847,59 +1428,35 @@
}
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- nbStreams, table->CTable, bmi2);
-}
-
-
-size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned huffLog,
- void* workSpace, size_t wkspSize)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize,
- maxSymbolValue, huffLog, HUF_singleStream,
- workSpace, wkspSize,
- NULL, NULL, 0, 0 /*bmi2*/);
+ nbStreams, table->CTable, flags);
}
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags)
{
+ DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize);
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_singleStream,
workSpace, wkspSize, hufTable,
- repeat, preferRepeat, bmi2);
+ repeat, flags);
}
/* HUF_compress4X_repeat():
* compress input using 4 streams.
- * provide workspace to generate compression tables */
-size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned huffLog,
- void* workSpace, size_t wkspSize)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize,
- maxSymbolValue, huffLog, HUF_fourStreams,
- workSpace, wkspSize,
- NULL, NULL, 0, 0 /*bmi2*/);
-}
-
-/* HUF_compress4X_repeat():
- * compress input using 4 streams.
- * re-use an existing huffman compression table */
+ * consider skipping quickly
+ * reuse an existing huffman compression table */
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags)
{
+ DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize);
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_fourStreams,
workSpace, wkspSize,
- hufTable, repeat, preferRepeat, bmi2);
+ hufTable, repeat, flags);
}
-
Index: lib/zstd/compress/zstd_lazy.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
--- a/lib/zstd/compress/zstd_lazy.c (revision 0f030fd569788084912f092fc08e278d5c2a1b78)
+++ b/lib/zstd/compress/zstd_lazy.c (date 1740124241468)
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -10,14 +11,23 @@
#include "zstd_compress_internal.h"
#include "zstd_lazy.h"
+#include "../common/bits.h" /* ZSTD_countTrailingZeros64 */
+
+#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
+
+#define kLazySkippingStep 8
/*-*************************************
* Binary Tree search
***************************************/
-static void
-ZSTD_updateDUBT(ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_updateDUBT(ZSTD_MatchState_t* ms,
const BYTE* ip, const BYTE* iend,
U32 mls)
{
@@ -60,8 +70,9 @@
* sort one already inserted but unsorted position
* assumption : curr >= btlow == (curr - btmask)
* doesn't fail */
-static void
-ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_insertDUBT1(const ZSTD_MatchState_t* ms,
U32 curr, const BYTE* inputEnd,
U32 nbCompares, U32 btLow,
const ZSTD_dictMode_e dictMode)
@@ -149,9 +160,10 @@
}
-static size_t
-ZSTD_DUBT_findBetterDictMatch (
- ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_DUBT_findBetterDictMatch (
+ const ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
size_t* offsetPtr,
size_t bestLength,
@@ -159,7 +171,7 @@
U32 const mls,
const ZSTD_dictMode_e dictMode)
{
- const ZSTD_matchState_t * const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t * const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
const U32 * const dictHashTable = dms->hashTable;
U32 const hashLog = dmsCParams->hashLog;
@@ -197,8 +209,8 @@
U32 matchIndex = dictMatchIndex + dictIndexDelta;
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
- curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
+ curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex);
+ bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
}
if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
break; /* drop, to guarantee consistency (miss a little bit of compression) */
@@ -218,7 +230,7 @@
}
if (bestLength >= MINMATCH) {
- U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+ U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offsetPtr); (void)mIndex;
DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
}
@@ -227,10 +239,11 @@
}
-static size_t
-ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
- size_t* offsetPtr,
+ size_t* offBasePtr,
U32 const mls,
const ZSTD_dictMode_e dictMode)
{
@@ -327,8 +340,8 @@
if (matchLength > bestLength) {
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
- if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) )
+ bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex);
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
if (dictMode == ZSTD_dictMatchState) {
nbCompares = 0; /* in addition to avoiding checking any
@@ -361,16 +374,16 @@
if (dictMode == ZSTD_dictMatchState && nbCompares) {
bestLength = ZSTD_DUBT_findBetterDictMatch(
ms, ip, iend,
- offsetPtr, bestLength, nbCompares,
+ offBasePtr, bestLength, nbCompares,
mls, dictMode);
}
assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
if (bestLength >= MINMATCH) {
- U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+ U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offBasePtr); (void)mIndex;
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
- curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ curr, (U32)bestLength, (U32)*offBasePtr, mIndex);
}
return bestLength;
}
@@ -378,106 +391,25 @@
/* ZSTD_BtFindBestMatch() : Tree updater, providing best match */
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_BtFindBestMatch( ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
+ size_t* offBasePtr,
const U32 mls /* template */,
const ZSTD_dictMode_e dictMode)
{
DEBUGLOG(7, "ZSTD_BtFindBestMatch");
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
ZSTD_updateDUBT(ms, ip, iLimit, mls);
- return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
-}
-
-
-static size_t
-ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr)
-{
- switch(ms->cParams.minMatch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
- case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
- case 7 :
- case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
- }
-}
-
-
-static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
- ZSTD_matchState_t* ms,
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr)
-{
- switch(ms->cParams.minMatch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
- case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
- case 7 :
- case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
- }
+ return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode);
}
-
-
-static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
- ZSTD_matchState_t* ms,
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr)
-{
- switch(ms->cParams.minMatch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
- case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
- case 7 :
- case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
- }
-}
-
-
/* *********************************
-* Hash Chain
+* Dedicated dict search
***********************************/
-#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
-
-/* Update chains up to ip (excluded)
- Assumption : always within prefix (i.e. not within extDict) */
-FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
- ZSTD_matchState_t* ms,
- const ZSTD_compressionParameters* const cParams,
- const BYTE* ip, U32 const mls)
-{
- U32* const hashTable = ms->hashTable;
- const U32 hashLog = cParams->hashLog;
- U32* const chainTable = ms->chainTable;
- const U32 chainMask = (1 << cParams->chainLog) - 1;
- const BYTE* const base = ms->window.base;
- const U32 target = (U32)(ip - base);
- U32 idx = ms->nextToUpdate;
-
- while(idx < target) { /* catch up */
- size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
- NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
- hashTable[h] = idx;
- idx++;
- }
-
- ms->nextToUpdate = target;
- return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
-}
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
- const ZSTD_compressionParameters* const cParams = &ms->cParams;
- return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
-}
-
-void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip)
{
const BYTE* const base = ms->window.base;
U32 const target = (U32)(ip - base);
@@ -485,7 +417,7 @@
U32* const chainTable = ms->chainTable;
U32 const chainSize = 1 << ms->cParams.chainLog;
U32 idx = ms->nextToUpdate;
- U32 const minChain = chainSize < target ? target - chainSize : idx;
+ U32 const minChain = chainSize < target - idx ? target - chainSize : idx;
U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
U32 const cacheSize = bucketSize - 1;
U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
@@ -499,13 +431,12 @@
U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
U32* const tmpHashTable = hashTable;
U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
- U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
+ U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
-
U32 hashIdx;
assert(ms->cParams.chainLog <= 24);
- assert(ms->cParams.hashLog >= ms->cParams.chainLog);
+ assert(ms->cParams.hashLog > ms->cParams.chainLog);
assert(idx != 0);
assert(tmpMinChain <= minChain);
@@ -536,7 +467,7 @@
if (count == cacheSize) {
for (count = 0; count < chainLimit;) {
if (i < minChain) {
- if (!i || countBeyondMinChain++ > cacheSize) {
+ if (!i || ++countBeyondMinChain > cacheSize) {
/* only allow pulling `cacheSize` number of entries
* into the cache or chainTable beyond `minChain`,
* to replace the entries pulled out of the
@@ -592,161 +523,219 @@
ms->nextToUpdate = target;
}
-
-/* inlining is important to hardwire a hot branch (template emulation) */
+/* Returns the longest match length found in the dedicated dict search structure.
+ * If none are longer than the argument ml, then ml will be returned.
+ */
FORCE_INLINE_TEMPLATE
-size_t ZSTD_HcFindBestMatch_generic (
- ZSTD_matchState_t* ms,
- const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 mls, const ZSTD_dictMode_e dictMode)
-{
- const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32* const chainTable = ms->chainTable;
- const U32 chainSize = (1 << cParams->chainLog);
- const U32 chainMask = chainSize-1;
- const BYTE* const base = ms->window.base;
- const BYTE* const dictBase = ms->window.dictBase;
- const U32 dictLimit = ms->window.dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const U32 curr = (U32)(ip-base);
- const U32 maxDistance = 1U << cParams->windowLog;
- const U32 lowestValid = ms->window.lowLimit;
- const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
- const U32 isDictionary = (ms->loadedDictEnd != 0);
- const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
- const U32 minChain = curr > chainSize ? curr - chainSize : 0;
- U32 nbAttempts = 1U << cParams->searchLog;
- size_t ml=4-1;
-
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
- const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
- ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
- const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
- ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
-
- U32 matchIndex;
-
- if (dictMode == ZSTD_dedicatedDictSearch) {
- const U32* entry = &dms->hashTable[ddsIdx];
- PREFETCH_L1(entry);
- }
-
- /* HC4 match finder */
- matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
-
- for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
- size_t currentMl=0;
- if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
- const BYTE* const match = base + matchIndex;
- assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
- if (match[ml] == ip[ml]) /* potentially better */
- currentMl = ZSTD_count(ip, match, iLimit);
- } else {
- const BYTE* const match = dictBase + matchIndex;
- assert(match+4 <= dictEnd);
- if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
- currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
- }
-
- /* save best solution */
- if (currentMl > ml) {
- ml = currentMl;
- *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
- if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
- }
-
- if (matchIndex <= minChain) break;
- matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
- }
-
- assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
- if (dictMode == ZSTD_dedicatedDictSearch) {
- const U32 ddsLowestIndex = dms->window.dictLimit;
- const BYTE* const ddsBase = dms->window.base;
- const BYTE* const ddsEnd = dms->window.nextSrc;
- const U32 ddsSize = (U32)(ddsEnd - ddsBase);
- const U32 ddsIndexDelta = dictLimit - ddsSize;
- const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
- const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
- U32 ddsAttempt;
+size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
+ const ZSTD_MatchState_t* const dms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ const BYTE* const prefixStart, const U32 curr,
+ const U32 dictLimit, const size_t ddsIdx) {
+ const U32 ddsLowestIndex = dms->window.dictLimit;
+ const BYTE* const ddsBase = dms->window.base;
+ const BYTE* const ddsEnd = dms->window.nextSrc;
+ const U32 ddsSize = (U32)(ddsEnd - ddsBase);
+ const U32 ddsIndexDelta = dictLimit - ddsSize;
+ const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
+ const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
+ U32 ddsAttempt;
+ U32 matchIndex;
- for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
- PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
- }
+ for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
+ PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
+ }
- {
- U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
- U32 const chainIndex = chainPackedPointer >> 8;
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 const chainIndex = chainPackedPointer >> 8;
- PREFETCH_L1(&dms->chainTable[chainIndex]);
- }
+ PREFETCH_L1(&dms->chainTable[chainIndex]);
+ }
- for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
- size_t currentMl=0;
- const BYTE* match;
- matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
- match = ddsBase + matchIndex;
+ for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
+ match = ddsBase + matchIndex;
- if (!matchIndex) {
- return ml;
- }
+ if (!matchIndex) {
+ return ml;
+ }
- /* guaranteed by table construction */
- (void)ddsLowestIndex;
- assert(matchIndex >= ddsLowestIndex);
- assert(match+4 <= ddsEnd);
- if (MEM_read32(match) == MEM_read32(ip)) {
- /* assumption : matchIndex <= dictLimit-4 (by table construction) */
- currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
- }
+ /* guaranteed by table construction */
+ (void)ddsLowestIndex;
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
- /* save best solution */
- if (currentMl > ml) {
- ml = currentMl;
- *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
- if (ip+currentMl == iLimit) {
- /* best possible, avoids read overflow on next attempt */
- return ml;
- }
- }
- }
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
+ if (ip+currentMl == iLimit) {
+ /* best possible, avoids read overflow on next attempt */
+ return ml;
+ }
+ }
+ }
- {
- U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
- U32 chainIndex = chainPackedPointer >> 8;
- U32 const chainLength = chainPackedPointer & 0xFF;
- U32 const chainAttempts = nbAttempts - ddsAttempt;
- U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
- U32 chainAttempt;
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 chainIndex = chainPackedPointer >> 8;
+ U32 const chainLength = chainPackedPointer & 0xFF;
+ U32 const chainAttempts = nbAttempts - ddsAttempt;
+ U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
+ U32 chainAttempt;
- for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
- PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
- }
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
+ PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
+ }
- for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
- size_t currentMl=0;
- const BYTE* match;
- matchIndex = dms->chainTable[chainIndex];
- match = ddsBase + matchIndex;
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->chainTable[chainIndex];
+ match = ddsBase + matchIndex;
- /* guaranteed by table construction */
- assert(matchIndex >= ddsLowestIndex);
- assert(match+4 <= ddsEnd);
- if (MEM_read32(match) == MEM_read32(ip)) {
- /* assumption : matchIndex <= dictLimit-4 (by table construction) */
- currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
- }
+ /* guaranteed by table construction */
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
- /* save best solution */
- if (currentMl > ml) {
- ml = currentMl;
- *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
- if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
- }
- }
- }
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+ }
+ }
+ return ml;
+}
+
+
+/* *********************************
+* Hash Chain
+***********************************/
+#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
+
+/* Update chains up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_insertAndFindFirstIndex_internal(
+ ZSTD_MatchState_t* ms,
+ const ZSTD_compressionParameters* const cParams,
+ const BYTE* ip, U32 const mls, U32 const lazySkipping)
+{
+ U32* const hashTable = ms->hashTable;
+ const U32 hashLog = cParams->hashLog;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainMask = (1 << cParams->chainLog) - 1;
+ const BYTE* const base = ms->window.base;
+ const U32 target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ while(idx < target) { /* catch up */
+ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
+ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
+ hashTable[h] = idx;
+ idx++;
+ /* Stop inserting every position when in the lazy skipping mode. */
+ if (lazySkipping)
+ break;
+ }
+
+ ms->nextToUpdate = target;
+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
+}
+
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip) {
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0);
+}
+
+/* inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_HcFindBestMatch(
+ ZSTD_MatchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls, const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainSize = (1 << cParams->chainLog);
+ const U32 chainMask = chainSize-1;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 curr = (U32)(ip-base);
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 lowestValid = ms->window.lowLimit;
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
+ const U32 minChain = curr > chainSize ? curr - chainSize : 0;
+ U32 nbAttempts = 1U << cParams->searchLog;
+ size_t ml=4-1;
+
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
+ const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
+ ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+ const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
+ ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+
+ U32 matchIndex;
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32* entry = &dms->hashTable[ddsIdx];
+ PREFETCH_L1(entry);
+ }
+
+ /* HC4 match finder */
+ matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, ms->lazySkipping);
+
+ for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
+ /* read 4B starting from (match + ml + 1 - sizeof(U32)) */
+ if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= minChain) break;
+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+ }
+
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
} else if (dictMode == ZSTD_dictMatchState) {
const U32* const dmsChainTable = dms->chainTable;
const U32 dmsChainSize = (1 << dms->cParams.chainLog);
@@ -770,7 +759,8 @@
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
- *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
+ assert(curr > matchIndex + dmsIndexDelta);
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
@@ -783,79 +773,749 @@
return ml;
}
+/* *********************************
+* (SIMD) Row-based matchfinder
+***********************************/
+/* Constants for row-based hash */
+#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
+#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
+
+#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
+
+typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */
+
+/* ZSTD_VecMask_next():
+ * Starting from the LSB, returns the idx of the next non-zero bit.
+ * Basically counting the nb of trailing zeroes.
+ */
+MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
+ return ZSTD_countTrailingZeros64(val);
+}
-FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
- ZSTD_matchState_t* ms,
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr)
+/* ZSTD_row_nextIndex():
+ * Returns the next index to insert at within a tagTable row, and updates the "head"
+ * value to reflect the update. Essentially cycles backwards from [1, {entries per row})
+ */
+FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
+ U32 next = (*tagRow-1) & rowMask;
+ next += (next == 0) ? rowMask : 0; /* skip first position */
+ *tagRow = (BYTE)next;
+ return next;
+}
+
+/* ZSTD_isAligned():
+ * Checks that a pointer is aligned to "align" bytes which must be a power of 2.
+ */
+MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
+ assert((align & (align - 1)) == 0);
+ return (((size_t)ptr) & (align - 1)) == 0;
+}
+
+/* ZSTD_row_prefetch():
+ * Performs prefetching for the hashTable and tagTable at a given row.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) {
+ PREFETCH_L1(hashTable + relRow);
+ if (rowLog >= 5) {
+ PREFETCH_L1(hashTable + relRow + 16);
+ /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */
+ }
+ PREFETCH_L1(tagTable + relRow);
+ if (rowLog == 6) {
+ PREFETCH_L1(tagTable + relRow + 32);
+ }
+ assert(rowLog == 4 || rowLog == 5 || rowLog == 6);
+ assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
+ assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */
+}
+
+/* ZSTD_row_fillHashCache():
+ * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
+ * but not beyond iLimit.
+ */
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_row_fillHashCache(ZSTD_MatchState_t* ms, const BYTE* base,
+ U32 const rowLog, U32 const mls,
+ U32 idx, const BYTE* const iLimit)
{
- switch(ms->cParams.minMatch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
- case 7 :
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
+ U32 const* const hashTable = ms->hashTable;
+ BYTE const* const tagTable = ms->tagTable;
+ U32 const hashLog = ms->rowHashLog;
+ U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
+ U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
+
+ for (; idx < lim; ++idx) {
+ U32 const hash = (U32)ZSTD_hashPtrSalted(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);
+ U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
+ ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
+ }
+
+ DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
+ ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
+ ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
+}
+
+/* ZSTD_row_nextCachedHash():
+ * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
+ * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
+ */
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
+ BYTE const* tagTable, BYTE const* base,
+ U32 idx, U32 const hashLog,
+ U32 const rowLog, U32 const mls,
+ U64 const hashSalt)
+{
+ U32 const newHash = (U32)ZSTD_hashPtrSalted(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);
+ U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
+ { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
+ cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
+ return hash;
}
}
+/* ZSTD_row_update_internalImpl():
+ * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
+ */
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms,
+ U32 updateStartIdx, U32 const updateEndIdx,
+ U32 const mls, U32 const rowLog,
+ U32 const rowMask, U32 const useCache)
+{
+ U32* const hashTable = ms->hashTable;
+ BYTE* const tagTable = ms->tagTable;
+ U32 const hashLog = ms->rowHashLog;
+ const BYTE* const base = ms->window.base;
-static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
- ZSTD_matchState_t* ms,
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr)
+ DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);
+ for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {
+ U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt)
+ : (U32)ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ U32* const row = hashTable + relRow;
+ BYTE* tagRow = tagTable + relRow;
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
+
+ assert(hash == ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt));
+ tagRow[pos] = hash & ZSTD_ROW_HASH_TAG_MASK;
+ row[pos] = updateStartIdx;
+ }
+}
+
+/* ZSTD_row_update_internal():
+ * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
+ * Skips sections of long matches as is necessary.
+ */
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_row_update_internal(ZSTD_MatchState_t* ms, const BYTE* ip,
+ U32 const mls, U32 const rowLog,
+ U32 const rowMask, U32 const useCache)
{
- switch(ms->cParams.minMatch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
- case 7 :
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
+ U32 idx = ms->nextToUpdate;
+ const BYTE* const base = ms->window.base;
+ const U32 target = (U32)(ip - base);
+ const U32 kSkipThreshold = 384;
+ const U32 kMaxMatchStartPositionsToUpdate = 96;
+ const U32 kMaxMatchEndPositionsToUpdate = 32;
+
+ if (useCache) {
+ /* Only skip positions when using hash cache, i.e.
+ * if we are loading a dict, don't skip anything.
+ * If we decide to skip, then we only update a set number
+ * of positions at the beginning and end of the match.
+ */
+ if (UNLIKELY(target - idx > kSkipThreshold)) {
+ U32 const bound = idx + kMaxMatchStartPositionsToUpdate;
+ ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache);
+ idx = target - kMaxMatchEndPositionsToUpdate;
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1);
+ }
+ }
+ assert(target >= idx);
+ ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache);
+ ms->nextToUpdate = target;
+}
+
+/* ZSTD_row_update():
+ * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
+ * processing.
+ */
+void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip) {
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
+ const U32 rowMask = (1u << rowLog) - 1;
+ const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
+
+ DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */);
+}
+
+/* Returns the mask width of bits group of which will be set to 1. Given not all
+ * architectures have easy movemask instruction, this helps to iterate over
+ * groups of bits easier and faster.
+ */
+FORCE_INLINE_TEMPLATE U32
+ZSTD_row_matchMaskGroupWidth(const U32 rowEntries)
+{
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
+ assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
+ (void)rowEntries;
+#if defined(ZSTD_ARCH_ARM_NEON)
+ /* NEON path only works for little endian */
+ if (!MEM_isLittleEndian()) {
+ return 1;
}
+ if (rowEntries == 16) {
+ return 4;
+ }
+ if (rowEntries == 32) {
+ return 2;
+ }
+ if (rowEntries == 64) {
+ return 1;
+ }
+#endif
+ return 1;
}
+#if defined(ZSTD_ARCH_X86_SSE2)
+FORCE_INLINE_TEMPLATE ZSTD_VecMask
+ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)
+{
+ const __m128i comparisonMask = _mm_set1_epi8((char)tag);
+ int matches[4] = {0};
+ int i;
+ assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4);
+ for (i=0; icParams.minMatch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
- case 7 :
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
+ if (rowEntries == 16) {
+ /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits.
+ * After that groups of 4 bits represent the equalMask. We lower
+ * all bits except the highest in these groups by doing AND with
+ * 0x88 = 0b10001000.
+ */
+ const uint8x16_t chunk = vld1q_u8(src);
+ const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
+ const uint8x8_t res = vshrn_n_u16(equalMask, 4);
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0);
+ return ZSTD_rotateRight_U64(matches, headGrouped) & 0x8888888888888888ull;
+ } else if (rowEntries == 32) {
+ /* Same idea as with rowEntries == 16 but doing AND with
+ * 0x55 = 0b01010101.
+ */
+ const uint16x8x2_t chunk = vld2q_u16((const uint16_t*)(const void*)src);
+ const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
+ const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
+ const uint8x16_t dup = vdupq_n_u8(tag);
+ const uint8x8_t t0 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk0, dup)), 6);
+ const uint8x8_t t1 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk1, dup)), 6);
+ const uint8x8_t res = vsli_n_u8(t0, t1, 4);
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0) ;
+ return ZSTD_rotateRight_U64(matches, headGrouped) & 0x5555555555555555ull;
+ } else { /* rowEntries == 64 */
+ const uint8x16x4_t chunk = vld4q_u8(src);
+ const uint8x16_t dup = vdupq_n_u8(tag);
+ const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
+ const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
+ const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
+ const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
+
+ const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
+ const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
+ const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
+ const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
+ const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
+ return ZSTD_rotateRight_U64(matches, headGrouped);
+ }
+}
+#endif
+
+/* Returns a ZSTD_VecMask (U64) that has the nth group (determined by
+ * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag"
+ * matches the hash at the nth position in a row of the tagTable.
+ * Each row is a circular buffer beginning at the value of "headGrouped". So we
+ * must rotate the "matches" bitfield to match up with the actual layout of the
+ * entries within the hashTable */
+FORCE_INLINE_TEMPLATE ZSTD_VecMask
+ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries)
+{
+ const BYTE* const src = tagRow;
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
+ assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
+ assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ZSTD_VecMask) * 8);
+
+#if defined(ZSTD_ARCH_X86_SSE2)
+
+ return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, headGrouped);
+
+#else /* SW or NEON-LE */
+
+# if defined(ZSTD_ARCH_ARM_NEON)
+ /* This NEON path only works for little endian - otherwise use SWAR below */
+ if (MEM_isLittleEndian()) {
+ return ZSTD_row_getNEONMask(rowEntries, src, tag, headGrouped);
}
+# endif /* ZSTD_ARCH_ARM_NEON */
+ /* SWAR */
+ { const int chunkSize = sizeof(size_t);
+ const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
+ const size_t xFF = ~((size_t)0);
+ const size_t x01 = xFF / 0xFF;
+ const size_t x80 = x01 << 7;
+ const size_t splatChar = tag * x01;
+ ZSTD_VecMask matches = 0;
+ int i = rowEntries - chunkSize;
+ assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8));
+ if (MEM_isLittleEndian()) { /* runtime check so have two loops */
+ const size_t extractMagic = (xFF / 0x7F) >> chunkSize;
+ do {
+ size_t chunk = MEM_readST(&src[i]);
+ chunk ^= splatChar;
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
+ matches <<= chunkSize;
+ matches |= (chunk * extractMagic) >> shiftAmount;
+ i -= chunkSize;
+ } while (i >= 0);
+ } else { /* big endian: reverse bits during extraction */
+ const size_t msb = xFF ^ (xFF >> 1);
+ const size_t extractMagic = (msb / 0x1FF) | msb;
+ do {
+ size_t chunk = MEM_readST(&src[i]);
+ chunk ^= splatChar;
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
+ matches <<= chunkSize;
+ matches |= ((chunk >> 7) * extractMagic) >> shiftAmount;
+ i -= chunkSize;
+ } while (i >= 0);
+ }
+ matches = ~matches;
+ if (rowEntries == 16) {
+ return ZSTD_rotateRight_U16((U16)matches, headGrouped);
+ } else if (rowEntries == 32) {
+ return ZSTD_rotateRight_U32((U32)matches, headGrouped);
+ } else {
+ return ZSTD_rotateRight_U64((U64)matches, headGrouped);
+ }
+ }
+#endif
}
-
-FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
- ZSTD_matchState_t* ms,
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr)
+/* The high-level approach of the SIMD row based match finder is as follows:
+ * - Figure out where to insert the new entry:
+ * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index.
+ * - The hash is salted by a value that changes on every context reset, so when the same table is used
+ * we will avoid collisions that would otherwise slow us down by introducing phantom matches.
+ * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines
+ * which row to insert into.
+ * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can
+ * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes
+ * per row).
+ * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and
+ * generate a bitfield that we can cycle through to check the collisions in the hash table.
+ * - Pick the longest match.
+ * - Insert the tag into the equivalent row and position in the tagTable.
+ */
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_RowFindBestMatch(
+ ZSTD_MatchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls, const ZSTD_dictMode_e dictMode,
+ const U32 rowLog)
{
- switch(ms->cParams.minMatch)
- {
- default : /* includes case 3 */
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
- case 7 :
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
- }
-}
+ U32* const hashTable = ms->hashTable;
+ BYTE* const tagTable = ms->tagTable;
+ U32* const hashCache = ms->hashCache;
+ const U32 hashLog = ms->rowHashLog;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 curr = (U32)(ip-base);
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 lowestValid = ms->window.lowLimit;
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
+ const U32 rowEntries = (1U << rowLog);
+ const U32 rowMask = rowEntries - 1;
+ const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
+ const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries);
+ const U64 hashSalt = ms->hashSalt;
+ U32 nbAttempts = 1U << cappedSearchLog;
+ size_t ml=4-1;
+ U32 hash;
+
+ /* DMS/DDS variables that may be referenced laster */
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
+
+ /* Initialize the following variables to satisfy static analyzer */
+ size_t ddsIdx = 0;
+ U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
+ U32 dmsTag = 0;
+ U32* dmsRow = NULL;
+ BYTE* dmsTagRow = NULL;
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
+ { /* Prefetch DDS hashtable entry */
+ ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ PREFETCH_L1(&dms->hashTable[ddsIdx]);
+ }
+ ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
+ }
+
+ if (dictMode == ZSTD_dictMatchState) {
+ /* Prefetch DMS rows */
+ U32* const dmsHashTable = dms->hashTable;
+ BYTE* const dmsTagTable = dms->tagTable;
+ U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
+ dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
+ dmsRow = dmsHashTable + dmsRelRow;
+ ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
+ }
+
+ /* Update the hashTable and tagTable up to (but not including) ip */
+ if (!ms->lazySkipping) {
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
+ hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls, hashSalt);
+ } else {
+ /* Stop inserting every position when in the lazy skipping mode.
+ * The hash cache is also not kept up to date in this mode.
+ */
+ hash = (U32)ZSTD_hashPtrSalted(ip, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);
+ ms->nextToUpdate = curr;
+ }
+ ms->hashSaltEntropy += hash; /* collect salt entropy */
+
+ { /* Get the hash for ip, compute the appropriate row */
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
+ U32* const row = hashTable + relRow;
+ BYTE* tagRow = (BYTE*)(tagTable + relRow);
+ U32 const headGrouped = (*tagRow & rowMask) * groupWidth;
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
+ size_t numMatches = 0;
+ size_t currMatch = 0;
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries);
+
+ /* Cycle through the matches and prefetch */
+ for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {
+ U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
+ U32 const matchIndex = row[matchPos];
+ if(matchPos == 0) continue;
+ assert(numMatches < rowEntries);
+ if (matchIndex < lowLimit)
+ break;
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ PREFETCH_L1(base + matchIndex);
+ } else {
+ PREFETCH_L1(dictBase + matchIndex);
+ }
+ matchBuffer[numMatches++] = matchIndex;
+ --nbAttempts;
+ }
+
+ /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
+ in ZSTD_row_update_internal() at the next search. */
+ {
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
+ tagRow[pos] = (BYTE)tag;
+ row[pos] = ms->nextToUpdate++;
+ }
+
+ /* Return the longest match */
+ for (; currMatch < numMatches; ++currMatch) {
+ U32 const matchIndex = matchBuffer[currMatch];
+ size_t currentMl=0;
+ assert(matchIndex < curr);
+ assert(matchIndex >= lowLimit);
+
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
+ /* read 4B starting from (match + ml + 1 - sizeof(U32)) */
+ if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* Save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+ }
+ }
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* TODO: Measure and potentially add prefetching to DMS */
+ const U32 dmsLowestIndex = dms->window.dictLimit;
+ const BYTE* const dmsBase = dms->window.base;
+ const BYTE* const dmsEnd = dms->window.nextSrc;
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
+
+ { U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth;
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
+ size_t numMatches = 0;
+ size_t currMatch = 0;
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries);
+
+ for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {
+ U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
+ U32 const matchIndex = dmsRow[matchPos];
+ if(matchPos == 0) continue;
+ if (matchIndex < dmsLowestIndex)
+ break;
+ PREFETCH_L1(dmsBase + matchIndex);
+ matchBuffer[numMatches++] = matchIndex;
+ --nbAttempts;
+ }
+
+ /* Return the longest match */
+ for (; currMatch < numMatches; ++currMatch) {
+ U32 const matchIndex = matchBuffer[currMatch];
+ size_t currentMl=0;
+ assert(matchIndex >= dmsLowestIndex);
+ assert(matchIndex < curr);
+
+ { const BYTE* const match = dmsBase + matchIndex;
+ assert(match+4 <= dmsEnd);
+ if (MEM_read32(match) == MEM_read32(ip))
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+ }
+
+ if (currentMl > ml) {
+ ml = currentMl;
+ assert(curr > matchIndex + dmsIndexDelta);
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));
+ if (ip+currentMl == iLimit) break;
+ }
+ }
+ }
+ }
+ return ml;
+}
+
+
+/*
+ * Generate search functions templated on (dictMode, mls, rowLog).
+ * These functions are outlined for code size & compilation time.
+ * ZSTD_searchMax() dispatches to the correct implementation function.
+ *
+ * TODO: The start of the search function involves loading and calculating a
+ * bunch of constants from the ZSTD_MatchState_t. These computations could be
+ * done in an initialization function, and saved somewhere in the match state.
+ * Then we could pass a pointer to the saved state instead of the match state,
+ * and avoid duplicate computations.
+ *
+ * TODO: Move the match re-winding into searchMax. This improves compression
+ * ratio, and unlocks further simplifications with the next TODO.
+ *
+ * TODO: Try moving the repcode search into searchMax. After the re-winding
+ * and repcode search are in searchMax, there is no more logic in the match
+ * finder loop that requires knowledge about the dictMode. So we should be
+ * able to avoid force inlining it, and we can join the extDict loop with
+ * the single segment loop. It should go in searchMax instead of its own
+ * function to avoid having multiple virtual function calls per search.
+ */
+
+#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls
+#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls
+#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog
+
+#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE
+
+#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
+ ZSTD_MatchState_t* ms, \
+ const BYTE* ip, const BYTE* const iLimit, \
+ size_t* offBasePtr) \
+ { \
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
+ return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \
+ } \
+
+#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
+ ZSTD_MatchState_t* ms, \
+ const BYTE* ip, const BYTE* const iLimit, \
+ size_t* offsetPtr) \
+ { \
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
+ return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
+ } \
+
+#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
+ ZSTD_MatchState_t* ms, \
+ const BYTE* ip, const BYTE* const iLimit, \
+ size_t* offsetPtr) \
+ { \
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
+ assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \
+ return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \
+ } \
+
+#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \
+ X(dictMode, mls, 4) \
+ X(dictMode, mls, 5) \
+ X(dictMode, mls, 6)
+
+#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)
+
+#define ZSTD_FOR_EACH_MLS(X, dictMode) \
+ X(dictMode, 4) \
+ X(dictMode, 5) \
+ X(dictMode, 6)
+
+#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \
+ X(__VA_ARGS__, noDict) \
+ X(__VA_ARGS__, extDict) \
+ X(__VA_ARGS__, dictMatchState) \
+ X(__VA_ARGS__, dedicatedDictSearch)
+
+/* Generate row search fns for each combination of (dictMode, mls, rowLog) */
+ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN)
+/* Generate binary Tree search fns for each combination of (dictMode, mls) */
+ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN)
+/* Generate hash chain search fns for each combination of (dictMode, mls) */
+ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN)
+
+typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
+
+#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \
+ case mls: \
+ return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
+#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \
+ case mls: \
+ return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
+#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \
+ case rowLog: \
+ return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr);
+
+#define ZSTD_SWITCH_MLS(X, dictMode) \
+ switch (mls) { \
+ ZSTD_FOR_EACH_MLS(X, dictMode) \
+ }
+
+#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \
+ case mls: \
+ switch (rowLog) { \
+ ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \
+ } \
+ ZSTD_UNREACHABLE; \
+ break;
+
+#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \
+ switch (searchMethod) { \
+ case search_hashChain: \
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \
+ break; \
+ case search_binaryTree: \
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \
+ break; \
+ case search_rowHash: \
+ ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \
+ break; \
+ } \
+ ZSTD_UNREACHABLE;
+
+/*
+ * Searches for the longest match at @p ip.
+ * Dispatches to the correct implementation function based on the
+ * (searchMethod, dictMode, mls, rowLog). We use switch statements
+ * here instead of using an indirect function call through a function
+ * pointer because after Spectre and Meltdown mitigations, indirect
+ * function calls can be very costly, especially in the kernel.
+ *
+ * NOTE: dictMode and searchMethod should be templated, so those switch
+ * statements should be optimized out. Only the mls & rowLog switches
+ * should be left.
+ *
+ * @param ms The match state.
+ * @param ip The position to search at.
+ * @param iend The end of the input data.
+ * @param[out] offsetPtr Stores the match offset into this pointer.
+ * @param mls The minimum search length, in the range [4, 6].
+ * @param rowLog The row log (if applicable), in the range [4, 6].
+ * @param searchMethod The search method to use (templated).
+ * @param dictMode The dictMode (templated).
+ *
+ * @returns The length of the longest match found, or < mls if no match is found.
+ * If a match is found its offset is stored in @p offsetPtr.
+ */
+FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
+ ZSTD_MatchState_t* ms,
+ const BYTE* ip,
+ const BYTE* iend,
+ size_t* offsetPtr,
+ U32 const mls,
+ U32 const rowLog,
+ searchMethod_e const searchMethod,
+ ZSTD_dictMode_e const dictMode)
+{
+ if (dictMode == ZSTD_noDict) {
+ ZSTD_SWITCH_SEARCH_METHOD(noDict)
+ } else if (dictMode == ZSTD_extDict) {
+ ZSTD_SWITCH_SEARCH_METHOD(extDict)
+ } else if (dictMode == ZSTD_dictMatchState) {
+ ZSTD_SWITCH_SEARCH_METHOD(dictMatchState)
+ } else if (dictMode == ZSTD_dedicatedDictSearch) {
+ ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch)
+ }
+ ZSTD_UNREACHABLE;
+ return 0;
+}
/* *******************************
* Common parser - lazy strategy
*********************************/
-typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_compressBlock_lazy_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore,
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_lazy_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth,
@@ -865,47 +1525,20 @@
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
+ const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
const BYTE* const base = ms->window.base;
const U32 prefixLowestIndex = ms->window.dictLimit;
const BYTE* const prefixLowest = base + prefixLowestIndex;
-
- typedef size_t (*searchMax_f)(
- ZSTD_matchState_t* ms,
- const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
- /*
- * This table is indexed first by the four ZSTD_dictMode_e values, and then
- * by the two searchMethod_e values. NULLs are placed for configurations
- * that should never occur (extDict modes go to the other implementation
- * below and there is no DDSS for binary tree search yet).
- */
- const searchMax_f searchFuncs[4][2] = {
- {
- ZSTD_HcFindBestMatch_selectMLS,
- ZSTD_BtFindBestMatch_selectMLS
- },
- {
- NULL,
- NULL
- },
- {
- ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
- ZSTD_BtFindBestMatch_dictMatchState_selectMLS
- },
- {
- ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
- NULL
- }
- };
-
- searchMax_f const searchMax = searchFuncs[dictMode][searchMethod == search_binaryTree];
- U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+ U32 offset_1 = rep[0], offset_2 = rep[1];
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
const int isDMS = dictMode == ZSTD_dictMatchState;
const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
const int isDxS = isDMS || isDDS;
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
@@ -915,18 +1548,14 @@
0;
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
- assert(searchMax != NULL);
-
- DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode);
-
- /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);
ip += (dictAndPrefixLength == 0);
if (dictMode == ZSTD_noDict) {
U32 const curr = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
U32 const maxRep = curr - windowLow;
- if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+ if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
}
if (isDxS) {
/* dictMatchState repCode checks don't currently handle repCode == 0
@@ -935,6 +1564,13 @@
assert(offset_2 <= dictAndPrefixLength);
}
+ /* Reset the lazy skipping state */
+ ms->lazySkipping = 0;
+
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
+ }
+
/* Match Loop */
#if defined(__x86_64__)
/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
@@ -944,8 +1580,9 @@
#endif
while (ip < ilimit) {
size_t matchLength=0;
- size_t offset=0;
+ size_t offBase = REPCODE1_TO_OFFBASE;
const BYTE* start=ip+1;
+ DEBUGLOG(7, "search baseline (depth 0)");
/* check repCode */
if (isDxS) {
@@ -954,7 +1591,7 @@
&& repIndex < prefixLowestIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
@@ -968,116 +1605,133 @@
}
/* first search (depth 0) */
- { size_t offsetFound = 999999999;
- size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
+ { size_t offbaseFound = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode);
if (ml2 > matchLength)
- matchLength = ml2, start = ip, offset=offsetFound;
+ matchLength = ml2, start = ip, offBase = offbaseFound;
}
if (matchLength < 4) {
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ size_t const step = ((size_t)(ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */;
+ ip += step;
+ /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.
+ * In this mode we stop inserting every position into our tables, and only insert
+ * positions that we search, which is one in step positions.
+ * The exact cutoff is flexible, I've just chosen a number that is reasonably high,
+ * so we minimize the compression ratio loss in "normal" scenarios. This mode gets
+ * triggered once we've gone 2KB without finding any matches.
+ */
+ ms->lazySkipping = step > kLazySkippingStep;
continue;
}
/* let's try to find a better solution */
if (depth>=1)
while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
int const gain2 = (int)(mlRep * 3);
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
- matchLength = mlRep, offset = 0, start = ip;
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
}
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
int const gain2 = (int)(mlRep * 3);
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
- matchLength = mlRep, offset = 0, start = ip;
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
}
}
- { size_t offset2=999999999;
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ { size_t ofbCandidate=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
continue; /* search a better one */
} }
/* let's find an even better one */
if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
int const gain2 = (int)(mlRep * 4);
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
- matchLength = mlRep, offset = 0, start = ip;
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
}
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
int const gain2 = (int)(mlRep * 4);
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
- matchLength = mlRep, offset = 0, start = ip;
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
}
}
- { size_t offset2=999999999;
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+ { size_t ofbCandidate=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
continue;
} } }
break; /* nothing found : store previous solution */
}
/* NOTE:
- * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
- * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
- * overflows the pointer, which is undefined behavior.
+ * Pay attention that `start[-value]` can lead to strange undefined behavior
+ * notably if `value` is unsigned, resulting in a large positive `-value`.
*/
/* catch up */
- if (offset) {
+ if (OFFBASE_IS_OFFSET(offBase)) {
if (dictMode == ZSTD_noDict) {
- while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
- && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
+ while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest))
+ && (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */
{ start--; matchLength++; }
}
if (isDxS) {
- U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+ U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));
const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
}
- offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+ offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);
}
/* store sequence */
_storeSequence:
- { size_t const litLength = start - anchor;
- ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
+ { size_t const litLength = (size_t)(start - anchor);
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
anchor = ip = start + matchLength;
}
+ if (ms->lazySkipping) {
+ /* We've found a match, disable lazy skipping mode, and refill the hash cache. */
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
+ }
+ ms->lazySkipping = 0;
+ }
/* check immediate repcode */
if (isDxS) {
@@ -1087,12 +1741,12 @@
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase - dictIndexDelta + repIndex :
base + repIndex;
- if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+ if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
ip += matchLength;
anchor = ip;
continue;
@@ -1106,104 +1760,183 @@
&& (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
/* store sequence */
matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
ip += matchLength;
anchor = ip;
continue; /* faster when present ... (?) */
} } }
- /* Save reps for next block */
- rep[0] = offset_1 ? offset_1 : savedOffset;
- rep[1] = offset_2 ? offset_2 : savedOffset;
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
+#endif /* build exclusions */
-size_t ZSTD_compressBlock_btlazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
}
-size_t ZSTD_compressBlock_lazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
}
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
+}
+#endif
+
+#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_greedy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
}
+#endif
-size_t ZSTD_compressBlock_btlazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
}
-size_t ZSTD_compressBlock_lazy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_greedy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
}
-
-size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
}
+#endif
-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
}
+#endif
-
+#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_lazy_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth)
@@ -1212,7 +1945,7 @@
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
+ const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
const BYTE* const base = ms->window.base;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const prefixStart = base + dictLimit;
@@ -1220,18 +1953,21 @@
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const dictStart = dictBase + ms->window.lowLimit;
const U32 windowLog = ms->cParams.windowLog;
-
- typedef size_t (*searchMax_f)(
- ZSTD_matchState_t* ms,
- const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
- searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
U32 offset_1 = rep[0], offset_2 = rep[1];
- DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic");
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
+
+ /* Reset the lazy skipping state */
+ ms->lazySkipping = 0;
/* init */
ip += (ip == prefixStart);
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
+ }
/* Match Loop */
#if defined(__x86_64__)
@@ -1242,7 +1978,7 @@
#endif
while (ip < ilimit) {
size_t matchLength=0;
- size_t offset=0;
+ size_t offBase = REPCODE1_TO_OFFBASE;
const BYTE* start=ip+1;
U32 curr = (U32)(ip-base);
@@ -1251,7 +1987,8 @@
const U32 repIndex = (U32)(curr+1 - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
+ & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@@ -1260,14 +1997,23 @@
} }
/* first search (depth 0) */
- { size_t offsetFound = 999999999;
- size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
+ { size_t ofbCandidate = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
if (ml2 > matchLength)
- matchLength = ml2, start = ip, offset=offsetFound;
+ matchLength = ml2, start = ip, offBase = ofbCandidate;
}
if (matchLength < 4) {
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ size_t const step = ((size_t)(ip-anchor) >> kSearchStrength);
+ ip += step + 1; /* jump faster over incompressible sections */
+ /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.
+ * In this mode we stop inserting every position into our tables, and only insert
+ * positions that we search, which is one in step positions.
+ * The exact cutoff is flexible, I've just chosen a number that is reasonably high,
+ * so we minimize the compression ratio loss in "normal" scenarios. This mode gets
+ * triggered once we've gone 2KB without finding any matches.
+ */
+ ms->lazySkipping = step > kLazySkippingStep;
continue;
}
@@ -1277,29 +2023,30 @@
ip ++;
curr++;
/* check repCode */
- if (offset) {
+ if (offBase) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
int const gain2 = (int)(repLength * 3);
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
if ((repLength >= 4) && (gain2 > gain1))
- matchLength = repLength, offset = 0, start = ip;
+ matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;
} }
/* search match, depth 1 */
- { size_t offset2=999999999;
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ { size_t ofbCandidate = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
continue; /* search a better one */
} }
@@ -1308,49 +2055,57 @@
ip ++;
curr++;
/* check repCode */
- if (offset) {
+ if (offBase) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
int const gain2 = (int)(repLength * 4);
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
if ((repLength >= 4) && (gain2 > gain1))
- matchLength = repLength, offset = 0, start = ip;
+ matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;
} }
/* search match, depth 2 */
- { size_t offset2=999999999;
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+ { size_t ofbCandidate = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
continue;
} } }
break; /* nothing found : store previous solution */
}
/* catch up */
- if (offset) {
- U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+ if (OFFBASE_IS_OFFSET(offBase)) {
+ U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));
const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
- offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+ offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);
}
/* store sequence */
_storeSequence:
- { size_t const litLength = start - anchor;
- ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
+ { size_t const litLength = (size_t)(start - anchor);
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
anchor = ip = start + matchLength;
}
+ if (ms->lazySkipping) {
+ /* We've found a match, disable lazy skipping mode, and refill the hash cache. */
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
+ }
+ ms->lazySkipping = 0;
+ }
/* check immediate repcode */
while (ip <= ilimit) {
@@ -1359,13 +2114,14 @@
const U32 repIndex = repCurrent - offset_2;
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
+ & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
ip += matchLength;
anchor = ip;
continue; /* faster when present ... (?) */
@@ -1380,35 +2136,65 @@
/* Return the last literals size */
return (size_t)(iend - anchor);
}
+#endif /* build exclusions */
-
+#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_greedy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
}
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
+}
+#endif
+
+#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
}
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
+}
+#endif
+
+#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
}
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
+}
+#endif
+
+#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btlazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
}
+#endif
================================================
FILE: zram_patch/lz4armv8.S
================================================
/*
* lz4armv8.S
* LZ4 decompression optimization based on arm64 NEON instruction
*/
#include
#include
#include
/**
* _lz4_decompress_asm: The fast LZ4 decompression, lz4 decompression algothrim asm
* routine,support Huawei EROFS filesystem striving for maximum decompression speed.
* Entry point _lz4_decompress_asm.
* @para:
* x0 = current destination address ptr
* x1 = destination start position
* x2 = destination end position
* x3 = current source address ptr
* x4 = source end position
* x5 = flag for DIP
* @ret:
* 0 on success, -1 on failure
*
* x7: match_length
* x8: literal_legth
* x9: copy start ptr
* x10: copy end ptr
*/
#define match_length x7
#define literal_length x8
#define copy_from_ptr x9 /* copy source ptr*/
#define copy_to_ptr x10 /* copy destination ptr*/
#define w_tmp w11 /* temp var */
#define tmp x11
#define w_offset w12
#define offset x12
#define permtable_addr x13
#define cplen_table_addr x14
#define save_dst x15
#define save_src x16
#define offset_src_ptr x17
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
#define w_tmp_match_length w6
#define tmp_match_length x6
#else
#define w_tmp_match_length w18
#define tmp_match_length x18
#endif
/* x3 >= x4 src overflow */
.macro check_src_overflow
cmp x3, x4
b.hs Done
.endm
.macro check_src_overflow1
cmp x3, x4
b.hs Done1
.endm
/* x0 >= x2 dst overflow */
.macro check_dst_overflow
cmp x0, x2
b.hs Done
.endm
.macro check_dst_overflow1
cmp x0, x2
b.hs Done1
.endm
.altmacro
.macro lz4_decompress_asm_generic doprfm=1
stp x29, x30, [sp, #-16]!
mov x29, sp
stp x3, x0, [sp, #-16]! /* push src and dst in stack */
ldr x3, [x3] /* x3 = *src_ptr */
ldr x0, [x0] /* x0 = *dst_ptr */
adr_l permtable_addr, Permtable
adr_l cplen_table_addr, Copylength_table
1:
/*
* Lz4_decompress_begin:
* save current dst and src ,ensure when return from asm routine
* current both of "dst" and "src" save good position.
*/
mov save_dst, x0
mov save_src, x3
check_dst_overflow
check_src_overflow
.if \doprfm
add tmp, x0, #512
cmp x2, tmp
b.ls 2f
prfm pstl2strm,[x0,#512]
.endif
2:
/* Decode_token: */
ldrb w_tmp, [x3], #1 /* read Token Byte */
lsr literal_length, tmp, #4 /* get literal_length */
and tmp_match_length, tmp, #0xf /* get match_length */
add match_length, tmp_match_length, #4 /* match_length >=4 */
/*
* literal_length <= 14 : no more literal length byte,fllowing zero
* or more bytes are liteal bytes.
*/
cmp literal_length, #14
b.ls 6f
/*
* literal_length == 15 : more literal length bytes after TokenByte.
* continue decoding more literal length bytes.
*/
3:
/* Get_literal_length: */
check_src_overflow
ldrb w_tmp, [x3], #1
add literal_length, literal_length, tmp
cmp tmp, #255
b.eq 3b
/* literal copy */
4:
/* Copy_long_literal_hs_15: */
mov copy_from_ptr, x3
mov copy_to_ptr, x0
add x3, x3, literal_length
add x0, x0, literal_length
check_dst_overflow
check_src_overflow
5:
/* Copy_long_literal_loop: */
ldr q0, [copy_from_ptr], #16
str q0, [copy_to_ptr], #16
cmp x0, copy_to_ptr
b.ls 7f
b 5b
6:
/* Copy_literal_lt_15: */
ldr q0, [x3]
str q0, [x0]
add x3, x3, literal_length
add x0, x0, literal_length
/* Decode offset and match_length */
7:
/* Decode_offset_matchlength: */
mov offset_src_ptr, x3
ldrh w_offset, [x3], #2 /* 2Byte: offset bytes */
cbz offset, Failed /* match_length == 0 is invalid */
sub copy_from_ptr, x0, offset
cmp copy_from_ptr, x1
b.lo Failed
mov copy_to_ptr, x0
/*
* set x0 to the end of "match copy";
*/
add x0, x0, match_length
cmp match_length, #19
b.lo 9f
/*
* continue decoding more match length bytes.
*/
8:
/* Get_long_matchlength: */
check_src_overflow1
ldrb w_tmp, [x3], #1
add x0, x0, tmp
add match_length, match_length, tmp
cmp tmp, #255
b.eq 8b
/*
* here got the matchlength,start "match copy".
*/
9:
/* Copy_match_begin: */
check_dst_overflow1
cmp offset , match_length
b.hs 14f
10:
/* Cond_offset_lt_matchlength: */
cmp offset , #32
b.hs 14f
11:
/* Copy_offset_lt_32: */
ldr q1, [copy_from_ptr]
add tmp, permtable_addr, offset, lsl #5
ldp q2, q3, [tmp]
tbl v0.16b, {v1.16b}, v2.16b
tbl v1.16b, {v1.16b}, v3.16b
cmp offset , #16
b.lo 12f
ldp q0, q1, [copy_from_ptr]
12:
/* Copy_match_perm: */
ldrb w_tmp, [cplen_table_addr, offset]
stp q0, q1, [copy_to_ptr]
add copy_to_ptr, copy_to_ptr, tmp
cmp x0, copy_to_ptr
b.ls 1b
13:
/* Copy_offset_lt_32_loop: */
stp q0, q1, [copy_to_ptr]
add copy_to_ptr, copy_to_ptr, tmp
stp q0, q1, [copy_to_ptr]
add copy_to_ptr, copy_to_ptr, tmp
cmp x0, copy_to_ptr
b.hi 13b
b 1b
/* offset >= match */
14:
/* Cond_offset_ge_matchlength: */
ldr q0, [copy_from_ptr], #16
str q0, [copy_to_ptr], #16
cmp x0, copy_to_ptr
b.ls 1b
15:
/* Copy_offset_ge_match_loop: */
ldp q0, q1, [copy_from_ptr], #32
stp q0, q1, [copy_to_ptr], #32
cmp x0, copy_to_ptr
b.hi 15b
b 1b
.endm
.text
.p2align 4
SYM_FUNC_START(_lz4_decompress_asm)
lz4_decompress_asm_generic
SYM_FUNC_END(_lz4_decompress_asm)
SYM_INNER_LABEL(Failed, SYM_L_LOCAL)
mov tmp, #-1
b Exit_here
Done1:
cbz x5, Done
sub save_src, offset_src_ptr, #1
strb w_tmp_match_length, [save_src]
add save_dst,save_dst,literal_length
Done:
mov tmp, #0
Exit_here:
ldp x3, x0, [sp], #16
str save_src, [x3]
str save_dst, [x0]
mov x0, tmp
ldp x29, x30, [sp], #16
ret x30
/*
* In case of offset <= 31 < matchlength ,expand the pattern and store in
* repeating pattern size(RPS),store the RPS in Copylength_table.
* case 1): 1 <= offset <= 15
* expand the pattern according to the Permtable and store their repeating pattern in q0 q1;
* RPS = 32 - (32 % offset) offset <= 31
* case 2): offset >= 16
* read the pattern and store in q0 q1.
* RPS = offset.
*/
.pushsection ".rodata", "a"
.p2align 8
Permtable:
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 //offset = 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 //offset = 1
.byte 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 //offset = 2
.byte 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1 //offset = 3
.byte 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 //offset = 4
.byte 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1 //offset = 5
.byte 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1 //offset = 6
.byte 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3 //offset = 7
.byte 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7 //offset = 8
.byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4 //offset = 9
.byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1 //offset = 10
.byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 //offset = 11
.byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 0, 1, 2, 3, 4, 5, 6, 7 //offset = 12
.byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12, 0, 1, 2, 3, 4, 5 //offset = 13
.byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13, 0, 1, 2, 3 //offset = 14
.byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14, 0, 1 //offset = 15
.p2align 8
Copylength_table:
.byte 32,32,32,30,32,30,30,28,32,27,30,22,24,26,28,30 // 0 .. 15
.byte 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 // 16 .. 31
.popsection
.text
.p2align 4
SYM_FUNC_START(_lz4_decompress_asm_noprfm)
lz4_decompress_asm_generic 0
SYM_FUNC_END(_lz4_decompress_asm_noprfm)