From df50f456bd94850bf86b3ede49772f641b16279f Mon Sep 17 00:00:00 2001 From: felipecr Date: Mon, 27 Apr 2026 13:29:38 +0200 Subject: [PATCH 01/15] rustfmt alignment --- crates/mps_hook/src/main.rs | 5 ++++- crates/pce_hook/src/main.rs | 36 +++++++++++++----------------------- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/crates/mps_hook/src/main.rs b/crates/mps_hook/src/main.rs index c909534..06ea060 100644 --- a/crates/mps_hook/src/main.rs +++ b/crates/mps_hook/src/main.rs @@ -63,7 +63,10 @@ fn run_unix() -> i32 { /// ps/grep check: returns true if `nvidia-cuda-mps-server` is running for the given UID. fn server_running_for_uid_ps_grep(uid: u32) -> Result { - let pattern = format!(r#"ps -eo uid=,comm= | grep -E "^\s*{}\s+nvidia-cuda-mps-server(\s|$)" -q"#, uid); + let pattern = format!( + r#"ps -eo uid=,comm= | grep -E "^\s*{}\s+nvidia-cuda-mps-server(\s|$)" -q"#, + uid + ); let status = Command::new("sh") .arg("-lc") .arg(&pattern) diff --git a/crates/pce_hook/src/main.rs b/crates/pce_hook/src/main.rs index 8803e75..2c3a343 100644 --- a/crates/pce_hook/src/main.rs +++ b/crates/pce_hook/src/main.rs @@ -22,7 +22,6 @@ struct Precreate { container_edits: Vec, } - #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct ContainerEdit { @@ -33,7 +32,6 @@ struct ContainerEdit { mounts: Vec, } - #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct Mount { @@ -47,7 +45,6 @@ struct Mount { options: Option>, } - fn main() -> io::Result<()> { // we go for run if let Err(e) = run() { @@ -61,7 +58,6 @@ fn main() -> io::Result<()> { Ok(()) } - fn run() -> Result<(), String> { // Read and parse stdin JSON let mut value = read_stdin_json()?; @@ -81,19 +77,20 @@ fn run() -> Result<(), String> { // Pretty-print output JSON with trailing newline let mut stdout = io::stdout().lock(); - + serde_json::to_writer_pretty(&mut stdout, &value) .map_err(|e| format!("Failed to write JSON to stdout: {e}"))?; stdout .write_all(b"\n") .map_err(|e| format!("Failed to write newline to stdout: {e}"))?; - stdout.flush().map_err(|e| format!("Failed to flush stdout: {e}"))?; + stdout + .flush() + .map_err(|e| format!("Failed to flush stdout: {e}"))?; Ok(()) } - // Precreate takes as stdin the container config json // We return error if we cannot read or // if we cannot parse a valid input json @@ -104,11 +101,9 @@ fn read_stdin_json() -> Result { .read_to_string(&mut input) .map_err(|e| format!("Failed to read from stdin: {e}"))?; - serde_json::from_str(&input) - .map_err(|e| format!("Invalid JSON: {e}")) + serde_json::from_str(&input).map_err(|e| format!("Invalid JSON: {e}")) } - // Reading for precreate container edits input // we try to read the input file // we try to parse it into json @@ -126,8 +121,8 @@ fn read_pce_input() -> Result<(Vec, Vec), String> { .map_err(|e| format!("PCE_INPUT: fail to read {}: {}", path, e))?; // parse into json - let pre: Precreate = serde_json::from_str(&s) - .map_err(|e| format!("PCE_INPUT: Invalid JSON: {e}"))?; + let pre: Precreate = + serde_json::from_str(&s).map_err(|e| format!("PCE_INPUT: Invalid JSON: {e}"))?; // extract mounts and envs let mut mounts = Vec::new(); @@ -140,18 +135,14 @@ fn read_pce_input() -> Result<(Vec, Vec), String> { Ok((mounts, envs)) } - /// Ensure a `Value` is an object and return it as a mutable map. fn ensure_obj<'a>( candidate: Option<&'a mut Map>, err: &str, ) -> Result<&'a mut Map, String> { - candidate.ok_or_else(|| format!("Validation error: {err}.")) - } - // Manual write of mount block as cdi and container config formats dont match fn append_mounts(obj: &mut Map, mounts_to_add: Vec) -> Result<(), String> { let mounts = ensure_array_field(obj, "mounts")?; @@ -191,7 +182,6 @@ fn append_mounts(obj: &mut Map, mounts_to_add: Vec) -> Res // Ok(()) //} - fn ensure_array_field<'a>( obj: &'a mut Map, field: &str, @@ -212,13 +202,14 @@ fn ensure_array_field<'a>( let v = e.into_mut(); // &'a mut Value match v { Value::Array(ref mut arr) => Ok(arr), - _ => Err(format!("Validation error: '{field}' exists but is not an array.")), + _ => Err(format!( + "Validation error: '{field}' exists but is not an array." + )), } } } } - /// Validate a list of "KEY=value" strings. fn validate_env_strings(entries: Vec) -> Result, String> { for s in &entries { @@ -228,7 +219,6 @@ fn validate_env_strings(entries: Vec) -> Result, String> { Ok(entries) } - fn validate_kv_format(s: &str) -> Result<(), String> { if let Some((k, _v)) = s.split_once('=') { if k.is_empty() { @@ -240,7 +230,6 @@ fn validate_kv_format(s: &str) -> Result<(), String> { } } - // merging envs into the container config json is as follows // 1. we need to add envs into the process object // 1.5 we create process if it is not there @@ -253,7 +242,9 @@ fn merge_process_env_strings( env_entries: Vec, ) -> Result<(), String> { // Ensure "process" is an object - let process_val = obj.entry("process".to_string()).or_insert_with(|| json!({})); + let process_val = obj + .entry("process".to_string()) + .or_insert_with(|| json!({})); let process_obj = process_val .as_object_mut() .ok_or_else(|| "Validation error: 'process' exists but is not an object.".to_string())?; @@ -279,4 +270,3 @@ fn merge_process_env_strings( Ok(()) } - From f2c37817833a3bc050f33f29207f2346fc575f9c Mon Sep 17 00:00:00 2001 From: felipecr Date: Mon, 27 Apr 2026 13:30:58 +0200 Subject: [PATCH 02/15] new vagrant test env for hooks --- test/vagrant/ubuntu-24.04/README.md | 42 +++ test/vagrant/ubuntu-24.04/Vagrantfile | 98 +++++++ .../ubuntu-24.04/prepare-cloud-image.sh | 136 ++++++++++ .../ubuntu-24.04/provision/install-podman.sh | 242 ++++++++++++++++++ 4 files changed, 518 insertions(+) create mode 100644 test/vagrant/ubuntu-24.04/README.md create mode 100644 test/vagrant/ubuntu-24.04/Vagrantfile create mode 100755 test/vagrant/ubuntu-24.04/prepare-cloud-image.sh create mode 100755 test/vagrant/ubuntu-24.04/provision/install-podman.sh diff --git a/test/vagrant/ubuntu-24.04/README.md b/test/vagrant/ubuntu-24.04/README.md new file mode 100644 index 0000000..e9cb274 --- /dev/null +++ b/test/vagrant/ubuntu-24.04/README.md @@ -0,0 +1,42 @@ +# Ubuntu 24.04 Vagrant test VM +Provisions an Ubuntu 24.04 guest with Podman plus the runtime pieces we need to test OCI hooks + +## What + +- install only the prerequisites from Ubuntu 24.04 packages +- install Podman itself from the static bundle used by the host-tools flow +- configure rootless Podman for the `vagrant` user +- sync this repository into the guest for iterative hook testing + +## Usage + +From this directory: + +```bash +./prepare-cloud-image.sh +vagrant destroy -f +vagrant up +vagrant ssh +``` + +If you change files on the host and want to refresh the VM: + +```bash +vagrant rsync +``` + +Once on the VM (via vagrant ssh): + +```bash +cd /workspace/performance-extensions +cargo build --release +bats test +``` + +To focus on the Podman integration test: + +```bash +cd /workspace/performance-extensions +cargo build --release +bats test/pce-podman.bats +``` diff --git a/test/vagrant/ubuntu-24.04/Vagrantfile b/test/vagrant/ubuntu-24.04/Vagrantfile new file mode 100644 index 0000000..09b0a4b --- /dev/null +++ b/test/vagrant/ubuntu-24.04/Vagrantfile @@ -0,0 +1,98 @@ +require "rbconfig" + +Vagrant.configure("2") do |config| + host_cpu = RbConfig::CONFIG["host_cpu"] + guest_arch = + case host_cpu + when /arm|aarch64/ + "arm64" + else + "amd64" + end + + image_name = "noble-server-cloudimg-#{guest_arch}.img" + image_disk_size = ENV["UBUNTU_CLOUD_IMAGE_SIZE"] || "40G" + prepared_image_name = "noble-server-cloudimg-#{guest_arch}-#{image_disk_size.downcase}.qcow2" + image_path = ENV["UBUNTU_CLOUD_IMAGE"] || File.expand_path(".cache/#{prepared_image_name}", __dir__) + seed_path = ENV["UBUNTU_CLOUD_INIT_SEED"] || File.expand_path(".cache/nocloud-seed.iso", __dir__) + qemu_arch = guest_arch == "arm64" ? "aarch64" : "x86_64" + qemu_dir_candidates = [ + ENV["QEMU_DIR"], + "/usr/local/share/qemu", + "/opt/homebrew/share/qemu" + ].compact + qemu_dir = qemu_dir_candidates.find { |path| File.directory?(path) } + qemu_machine = ENV["QEMU_MACHINE"] || (guest_arch == "arm64" ? "virt,accel=hvf,highmem=off" : "q35") + qemu_cpu = ENV["QEMU_CPU"] || (guest_arch == "arm64" ? "cortex-a72" : "qemu64") + qemu_smp = ENV["QEMU_SMP"] || "2" + qemu_memory = ENV["QEMU_MEMORY"] || (guest_arch == "arm64" ? "2G" : "4G") + qemu_ssh_port = ENV["QEMU_SSH_PORT"] || 50022 + + config.vm.hostname = "performance-extensions-noble" + config.vm.boot_timeout = 900 + + config.vm.synced_folder ".", "/vagrant", disabled: true + + repo_root = File.expand_path("../../..", __dir__) + config.vm.synced_folder repo_root, "/workspace/performance-extensions", + type: "rsync", + rsync__exclude: [".git/", "target/", ".vagrant/", ".cache/"] + + config.ssh.username = "vagrant" + config.ssh.private_key_path = File.expand_path("~/.vagrant.d/insecure_private_key") + config.ssh.insert_key = false + + config.vm.provider "qemu" do |qe| + raise <<~MSG unless File.exist?(image_path) + Missing Ubuntu cloud image: #{image_path} + + Run: + ./prepare-cloud-image.sh + + Or set UBUNTU_CLOUD_IMAGE to an existing Ubuntu 24.04 qcow2 image path. + MSG + + raise <<~MSG unless File.exist?(seed_path) + Missing NoCloud seed image: #{seed_path} + + Run: + ./prepare-cloud-image.sh + + Or set UBUNTU_CLOUD_INIT_SEED to an existing seed ISO path. + MSG + + raise <<~MSG if qemu_dir.nil? + Missing QEMU firmware directory. + + Checked: + #{qemu_dir_candidates.join("\n ")} + + Set QEMU_DIR to the directory containing edk2 firmware files such as: + export QEMU_DIR=/usr/local/share/qemu + MSG + + qe.image_path = image_path + qe.arch = qemu_arch + qe.machine = qemu_machine + qe.cpu = qemu_cpu + qe.memory = qemu_memory + qe.smp = qemu_smp + qe.ssh_port = qemu_ssh_port + qe.qemu_dir = qemu_dir + qe.disk_resize = "40G" + qe.ssh_auto_correct = true + qe.net_device = guest_arch == "arm64" ? "virtio-net-device" : "virtio-net-pci" + qe.extra_qemu_args = %W(-drive file=#{seed_path},if=virtio,media=cdrom,format=raw) + end + + config.vm.provision( + "shell", + path: "provision/install-podman.sh", + env: { + "PODMAN_STATIC_URL" => ENV["PODMAN_STATIC_URL"], + "PODMAN_STATIC_VERSION" => ENV["PODMAN_STATIC_VERSION"], + "VM_USER" => ENV["VM_USER"] + }, + args: ["/workspace/performance-extensions"] + ) +end diff --git a/test/vagrant/ubuntu-24.04/prepare-cloud-image.sh b/test/vagrant/ubuntu-24.04/prepare-cloud-image.sh new file mode 100755 index 0000000..bbe2d3d --- /dev/null +++ b/test/vagrant/ubuntu-24.04/prepare-cloud-image.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +CACHE_DIR="${ROOT_DIR}/.cache" +SEED_DIR="${CACHE_DIR}/nocloud" +SEED_IMAGE="${CACHE_DIR}/nocloud-seed.iso" +VAGRANT_INSECURE_PRIVATE_KEY="${HOME}/.vagrant.d/insecure_private_key" +VM_DISK_SIZE="${UBUNTU_CLOUD_IMAGE_SIZE:-40G}" + +log() { + printf '[prepare-cloud-image] %s\n' "$*" +} + +detect_arch() { + case "$(uname -m)" in + arm64|aarch64) + printf 'arm64\n' + ;; + x86_64|amd64) + printf 'amd64\n' + ;; + *) + echo "unsupported host architecture: $(uname -m)" >&2 + exit 1 + ;; + esac +} + +main() { + local arch image_name image_url downloaded_image_path prepared_image_path insecure_pubkey + + arch="$(detect_arch)" + image_name="noble-server-cloudimg-${arch}.img" + image_url="https://cloud-images.ubuntu.com/noble/current/${image_name}" + downloaded_image_path="${CACHE_DIR}/${image_name}" + prepared_image_path="${CACHE_DIR}/noble-server-cloudimg-${arch}-${VM_DISK_SIZE,,}.qcow2" + insecure_pubkey="$(ssh-keygen -y -f "${VAGRANT_INSECURE_PRIVATE_KEY}")" + + mkdir -p "${CACHE_DIR}" + + if [ -f "${downloaded_image_path}" ]; then + log "reusing ${downloaded_image_path}" + else + log "downloading ${image_url}" + curl -fL "${image_url}" -o "${downloaded_image_path}" + log "saved ${downloaded_image_path}" + fi + + if ! command -v qemu-img >/dev/null 2>&1; then + echo "missing qemu-img; install QEMU on the host first" >&2 + exit 1 + fi + + if [ ! -f "${prepared_image_path}" ]; then + log "creating resized guest image ${prepared_image_path}" + cp "${downloaded_image_path}" "${prepared_image_path}" + else + log "reusing ${prepared_image_path}" + fi + + qemu-img resize "${prepared_image_path}" "${VM_DISK_SIZE}" >/dev/null + log "resized ${prepared_image_path} to ${VM_DISK_SIZE}" + + mkdir -p "${SEED_DIR}" + cat > "${SEED_DIR}/user-data" < "${SEED_DIR}/meta-data" </dev/null 2>&1; then + log "building NoCloud seed with cloud-localds" + cloud-localds "${SEED_IMAGE}" "${SEED_DIR}/user-data" "${SEED_DIR}/meta-data" + elif command -v hdiutil >/dev/null 2>&1; then + local tmp_base="${CACHE_DIR}/nocloud-seed" + local generated_path="" + rm -f "${tmp_base}" "${tmp_base}.cdr" "${tmp_base}.iso" "${SEED_IMAGE}" + log "building NoCloud seed with hdiutil" + hdiutil makehybrid \ + -o "${tmp_base}" \ + "${SEED_DIR}" \ + -iso \ + -joliet \ + -default-volume-name cidata \ + >/dev/null + + for candidate in "${tmp_base}" "${tmp_base}.cdr" "${tmp_base}.iso"; do + if [ -f "${candidate}" ]; then + generated_path="${candidate}" + break + fi + done + + if [ -z "${generated_path}" ]; then + echo "hdiutil did not create an output image under ${tmp_base}[.cdr|.iso]" >&2 + exit 1 + fi + + mv "${generated_path}" "${SEED_IMAGE}" + elif command -v genisoimage >/dev/null 2>&1; then + log "building NoCloud seed with genisoimage" + genisoimage -output "${SEED_IMAGE}" -volid cidata -joliet -rock "${SEED_DIR}/user-data" "${SEED_DIR}/meta-data" >/dev/null + elif command -v mkisofs >/dev/null 2>&1; then + log "building NoCloud seed with mkisofs" + mkisofs -output "${SEED_IMAGE}" -volid cidata -joliet -rock "${SEED_DIR}/user-data" "${SEED_DIR}/meta-data" >/dev/null + else + echo "missing tool to build cloud-init seed ISO; install cloud-localds, hdiutil, genisoimage, or mkisofs" >&2 + exit 1 + fi + + log "saved ${SEED_IMAGE}" +} + +main "$@" diff --git a/test/vagrant/ubuntu-24.04/provision/install-podman.sh b/test/vagrant/ubuntu-24.04/provision/install-podman.sh new file mode 100755 index 0000000..6eaf542 --- /dev/null +++ b/test/vagrant/ubuntu-24.04/provision/install-podman.sh @@ -0,0 +1,242 @@ +#!/usr/bin/env bash +set -euo pipefail + +REPO_MOUNT="${1:-/workspace/performance-extensions}" +VM_USER="${VM_USER:-vagrant}" +CACHE_DIR="${CACHE_DIR:-/var/cache/performance-extensions}" +DOWNLOAD_DIR="${CACHE_DIR}/downloads" +BUILD_DIR="${CACHE_DIR}/build" +PODMAN_STATIC_VERSION="${PODMAN_STATIC_VERSION:-latest}" +BATS_SUPPORT_REF="${BATS_SUPPORT_REF:-v0.3.0}" +BATS_ASSERT_REF="${BATS_ASSERT_REF:-v2.1.0}" + +log() { + printf '[vagrant-podman] %s\n' "$*" +} + +require_root() { + if [ "${EUID}" -ne 0 ]; then + echo "this provisioner must run as root" >&2 + exit 1 + fi +} + +require_cmd() { + command -v "$1" >/dev/null 2>&1 || { + echo "missing required command: $1" >&2 + exit 1 + } +} + +detect_arch() { + case "$(uname -m)" in + aarch64|arm64) + printf 'arm64\n' + ;; + x86_64|amd64) + printf 'amd64\n' + ;; + *) + echo "unsupported guest architecture: $(uname -m)" >&2 + exit 1 + ;; + esac +} + +podman_static_url() { + local arch="$1" + + if [ -n "${PODMAN_STATIC_URL:-}" ]; then + printf '%s\n' "${PODMAN_STATIC_URL}" + return + fi + + if [ "${PODMAN_STATIC_VERSION}" = "latest" ]; then + printf 'https://github.com/mgoltzsche/podman-static/releases/latest/download/podman-linux-%s.tar.gz\n' "${arch}" + else + printf 'https://github.com/mgoltzsche/podman-static/releases/download/%s/podman-linux-%s.tar.gz\n' "${PODMAN_STATIC_VERSION}" "${arch}" + fi +} + +download_if_missing() { + local url="$1" + local dest="$2" + + if [ ! -f "${dest}" ]; then + log "downloading ${url}" + curl -fsSL "${url}" -o "${dest}" + fi +} + +append_subid_range() { + local file="$1" + local user="$2" + local range_start="$3" + local range_size="$4" + + if ! grep -q "^${user}:" "${file}"; then + printf '%s:%s:%s\n' "${user}" "${range_start}" "${range_size}" >> "${file}" + fi +} + +configure_rootless_user() { + local user="$1" + local uid + local gid + local home + + uid="$(id -u "${user}")" + gid="$(id -g "${user}")" + home="$(getent passwd "${user}" | cut -d: -f6)" + + append_subid_range /etc/subuid "${user}" 100000 65536 + append_subid_range /etc/subgid "${user}" 100000 65536 + + install -d -m 0700 -o "${uid}" -g "${gid}" "/run/user/${uid}" + install -d -m 0755 -o "${uid}" -g "${gid}" "${home}/.config" + install -d -m 0755 -o "${uid}" -g "${gid}" "${home}/.config/containers" + + cat > "${home}/.config/containers/containers.conf" </dev/null 2>&1; then + log "removing distro podman packages to avoid mixing binaries" + apt-get remove -y podman podman-docker || true + fi +} + +install_podman_static() { + local arch="$1" + local url="$2" + local tarball="${DOWNLOAD_DIR}/podman-linux-${arch}.tar.gz" + local unpack_dir="${BUILD_DIR}/podman-linux-${arch}" + local bundle_root="${unpack_dir}/podman-linux-${arch}" + + mkdir -p "${DOWNLOAD_DIR}" "${BUILD_DIR}" + download_if_missing "${url}" "${tarball}" + + rm -rf "${unpack_dir}" + mkdir -p "${unpack_dir}" + tar -xzf "${tarball}" -C "${unpack_dir}" + + log "installing static podman bundle into /usr/local and /etc" + cp -R "${bundle_root}/usr/." /usr/ + cp -R "${bundle_root}/etc/." /etc/ + + test -x /usr/local/bin/podman +} + +configure_apparmor() { + local profile="/etc/apparmor.d/podman" + + if [ -f "${profile}" ]; then + sed -Ei 's!^profile podman /usr/bin/podman !profile podman /usr/{bin,local/bin}/podman !' "${profile}" + if command -v apparmor_parser >/dev/null 2>&1; then + apparmor_parser -r "${profile}" || true + fi + fi +} + +verify_install() { + local user="$1" + local uid + + uid="$(id -u "${user}")" + + PATH="/usr/local/bin:${PATH}" podman --version + runuser -l "${user}" -c "export PATH=/usr/local/bin:\$PATH XDG_RUNTIME_DIR=/run/user/${uid}; podman info --format '{{.Host.OCIRuntime.Name}}'" +} + +write_summary() { + local summary_file="/etc/motd.d/performance-extensions-podman" + + mkdir -p "$(dirname "${summary_file}")" + cat > "${summary_file}" < Date: Mon, 27 Apr 2026 19:37:19 +0200 Subject: [PATCH 03/15] v1 single lib replace --- crates/pc_injection_hook/Cargo.toml | 9 + crates/pc_injection_hook/README.md | 25 + crates/pc_injection_hook/src/main.rs | 1355 ++++++++++++++++++++++++++ 3 files changed, 1389 insertions(+) create mode 100644 crates/pc_injection_hook/Cargo.toml create mode 100644 crates/pc_injection_hook/README.md create mode 100644 crates/pc_injection_hook/src/main.rs diff --git a/crates/pc_injection_hook/Cargo.toml b/crates/pc_injection_hook/Cargo.toml new file mode 100644 index 0000000..ff07522 --- /dev/null +++ b/crates/pc_injection_hook/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "pc_injection_hook" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +serde = { version = "1", features = ["derive"] } +serde_json = "1" diff --git a/crates/pc_injection_hook/README.md b/crates/pc_injection_hook/README.md new file mode 100644 index 0000000..6e0af61 --- /dev/null +++ b/crates/pc_injection_hook/README.md @@ -0,0 +1,25 @@ +# pc_injection_hook + +Precreate hook that plans library injection from the container rootfs and rewrites the OCI config +to add bind mounts to inject host libs. + +## What it does + +* Reads the OCI runtime config from `stdin`. +* Finds the container rootfs from `root.path`. +* Discovers container libraries with `ldconfig -r -p`. +* Plans bind mounts for: + * primary libraries from `INJECTION_PRIMARY_LIBS` + * optional dependency libraries from `INJECTION_DEPENDENCY_LIBS` + * optional extra files from `INJECTION_EXTRA_FILES` +* Appends the required mounts to the OCI config and writes the updated JSON to `stdout`. +* Adds `LD_LIBRARY_PATH` when the plan injects a runtime library directory instead of + overwriting an existing container library path. + +## Notes + +* This hook pairs with `ldcache_hook` when it overwrites existing library paths that are + already present in the container cache! +* When the plan introduces new lib injection paths, the hook also updates `LD_LIBRARY_PATH` because a + prestart `ldconfig -r ` run does not see runtime-only bind mounts. New lib injection are + exposed through a host-side staging directory mounted at `/run/pc-injection/`. diff --git a/crates/pc_injection_hook/src/main.rs b/crates/pc_injection_hook/src/main.rs new file mode 100644 index 0000000..ce1b163 --- /dev/null +++ b/crates/pc_injection_hook/src/main.rs @@ -0,0 +1,1355 @@ +use serde_json::{Map, Value}; +use std::collections::{HashMap, HashSet}; +use std::env; +use std::error::Error as StdError; +use std::ffi::OsStr; +use std::fmt; +use std::fs; +use std::io::{self, Read, Write}; +use std::os::unix::fs::symlink; +use std::path::{Component, Path, PathBuf}; +use std::process::{self, Command}; +use std::time::{SystemTime, UNIX_EPOCH}; + +fn main() { + match run() { + Ok(()) => process::exit(0), + Err(error) => { + eprintln!("pc_injection_hook: {error}"); + process::exit(1); + } + } +} + +fn run() -> Result<()> { + let mut config = read_stdin_json_value()?; + let inputs = load_inputs(&config)?; + let discovery = discover_container_libraries(&inputs)?; + let edits = plan_config_edits(&inputs, &discovery.libraries)?; + apply_config_edits(&mut config, &edits)?; + write_stdout_json(&config)?; + + for warning in discovery.warnings.into_iter().chain(edits.warnings) { + eprintln!("pc_injection_hook: warning: {warning}"); + } + + Ok(()) +} + +type Result = std::result::Result; + +#[derive(Debug)] +enum Error { + Message(String), + Io { context: String, source: io::Error }, + Json(serde_json::Error), +} + +impl Error { + fn message(message: impl Into) -> Self { + Self::Message(message.into()) + } + + fn io(context: impl Into, source: io::Error) -> Self { + Self::Io { + context: context.into(), + source, + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Message(message) => write!(f, "{message}"), + Self::Io { context, source } => write!(f, "{context}: {source}"), + Self::Json(source) => write!(f, "invalid JSON: {source}"), + } + } +} + +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + Self::Message(_) => None, + Self::Io { source, .. } => Some(source), + Self::Json(source) => Some(source), + } + } +} + +impl From for Error { + fn from(source: serde_json::Error) -> Self { + Self::Json(source) + } +} + +#[derive(Debug, Clone)] +struct HookInputs { + rootfs: PathBuf, + ldconfig: PathBuf, + primary_libs: Vec, + dependency_libs: Vec, + extra_files: Vec, + compatibility_policy: CompatibilityPolicy, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +enum CompatibilityPolicy { + Major, + Full, + Strict, +} + +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] +struct AbiVersion { + major: Option, + minor: Option, + patch: Option, +} + +impl AbiVersion { + fn has_major(&self) -> bool { + self.major.is_some() + } + + fn components(&self) -> [Option; 3] { + [self.major, self.minor, self.patch] + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +struct Library { + path: PathBuf, + file_name: String, + linker_name: String, + real_name: String, + abi: AbiVersion, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +struct DiscoveryOutcome { + libraries: Vec, + warnings: Vec, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +struct ConfigEdits { + mounts: Vec, + ld_library_path_dirs: Vec, + warnings: Vec, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +struct MountEdit { + source: PathBuf, + destination: PathBuf, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +struct MountDecision { + mounts: Vec, + ld_library_path_dir: Option, + warning: Option, +} + +fn read_stdin_json_value() -> Result { + let mut input = String::new(); + io::stdin() + .read_to_string(&mut input) + .map_err(|e| Error::io("failed to read OCI config from stdin", e))?; + serde_json::from_str(&input).map_err(Error::from) +} + +fn write_stdout_json(value: &Value) -> Result<()> { + let mut stdout = io::stdout().lock(); + serde_json::to_writer_pretty(&mut stdout, value)?; + stdout + .write_all(b"\n") + .map_err(|e| Error::io("failed to write newline to stdout", e))?; + stdout + .flush() + .map_err(|e| Error::io("failed to flush stdout", e))?; + Ok(()) +} + +fn load_inputs(config: &Value) -> Result { + let root_path = config + .get("root") + .and_then(Value::as_object) + .and_then(|root| root.get("path")) + .and_then(Value::as_str) + .ok_or_else(|| Error::message("OCI config is missing root.path"))?; + + Ok(HookInputs { + rootfs: resolve_rootfs(root_path)?, + ldconfig: PathBuf::from(env::var_os("LDCONFIG_PATH").unwrap_or_else(|| "ldconfig".into())), + primary_libs: parse_required_library_list("INJECTION_PRIMARY_LIBS")?, + dependency_libs: parse_optional_library_list("INJECTION_DEPENDENCY_LIBS")?, + extra_files: parse_optional_path_list("INJECTION_EXTRA_FILES"), + compatibility_policy: CompatibilityPolicy::from_env("INJECTION_COMPATIBILITY")?, + }) +} + +fn resolve_rootfs(root_path: &str) -> Result { + let root = Path::new(root_path); + if root.is_absolute() { + return Ok(root.to_path_buf()); + } + + Err(Error::message(format!( + "pc_injection_hook requires an absolute OCI root.path in precreate mode: {root_path}" + ))) +} + +fn parse_required_library_list(var: &'static str) -> Result> { + let raw = env::var_os(var).ok_or_else(|| { + Error::message(format!( + "the environment variable {var} is expected to be a non-empty colon-separated list of paths" + )) + })?; + + let paths: Vec<_> = env::split_paths(&raw).collect(); + if paths.is_empty() { + return Err(Error::message(format!( + "the environment variable {var} is expected to be a non-empty colon-separated list of paths" + ))); + } + + paths.into_iter().map(Library::parse_host).collect() +} + +fn parse_optional_library_list(var: &'static str) -> Result> { + match env::var_os(var) { + Some(value) if !value.is_empty() => { + env::split_paths(&value).map(Library::parse_host).collect() + } + _ => Ok(Vec::new()), + } +} + +fn parse_optional_path_list(var: &'static str) -> Vec { + match env::var_os(var) { + Some(value) if !value.is_empty() => env::split_paths(&value).collect(), + _ => Vec::new(), + } +} + +impl CompatibilityPolicy { + fn from_env(var: &'static str) -> Result { + match env::var(var) { + Ok(value) => Self::parse(&value), + Err(_) => Ok(Self::Major), + } + } + + fn parse(value: &str) -> Result { + match value { + "major" => Ok(Self::Major), + "full" => Ok(Self::Full), + "strict" => Ok(Self::Strict), + other => Err(Error::message(format!( + "unsupported compatibility policy '{other}'" + ))), + } + } +} + +impl Library { + // constructor from host libs + fn parse_host(path: impl Into) -> Result { + let path = path.into(); + Self::from_name_source(path, None) + } + + // constructor for container libs (rootfs is used to resolve relative symlinks) + fn parse_container(path: impl Into, rootfs: &Path) -> Result { + let path = path.into(); + Self::from_name_source(path, Some(rootfs)) + } + + // shared base constructor + fn from_name_source(path: PathBuf, rootfs: Option<&Path>) -> Result { + let name = name_for_parsing(&path, rootfs)?; + let (linker_name, abi, real_name) = parse_library_name(&name)?; + Ok(Self { + path, + file_name: name, + linker_name, + real_name, + abi, + }) + } + + fn path(&self) -> &Path { + &self.path + } + + fn file_name(&self) -> &str { + &self.file_name + } + + fn linker_name(&self) -> &str { + &self.linker_name + } + + fn real_name(&self) -> &str { + &self.real_name + } + + fn has_major_version(&self) -> bool { + self.abi.has_major() + } + + // Compatibility checks + fn is_major_compatible_with(&self, other: &Self) -> bool { + self.linker_name == other.linker_name && self.abi.major == other.abi.major + } + + fn is_full_compatible_with(&self, other: &Self) -> bool { + self.is_major_compatible_with(other) && self.abi.minor >= other.abi.minor + } + + fn is_strict_compatible_with(&self, other: &Self) -> bool { + self.is_major_compatible_with(other) && self.abi.minor == other.abi.minor + } + + fn pick_best_candidate<'a>(&self, candidates: &'a [Library]) -> Option<&'a Library> { + if candidates.is_empty() { + return None; + } + + if let Some(exact) = candidates + .iter() + .find(|candidate| candidate.real_name == self.real_name) + { + return Some(exact); + } + + let host_key = self.version_key(); + let older_or_equal = candidates + .iter() + .filter(|candidate| candidate.version_key() <= host_key) + .max_by_key(|candidate| candidate.version_key()); + if older_or_equal.is_some() { + return older_or_equal; + } + + candidates + .iter() + .min_by_key(|candidate| candidate.version_key()) + } + + fn link_chain_names(file_name: &str) -> Result> { + let (linker_name, abi, _) = parse_library_name(file_name)?; + let mut names = vec![linker_name]; + for component in abi.components().into_iter().flatten() { + let next = format!( + "{}.{}", + names.last().expect("link chain has at least one element"), + component + ); + names.push(next); + } + Ok(names) + } + + fn version_key(&self) -> (Option, Option, Option) { + (self.abi.major, self.abi.minor, self.abi.patch) + } +} + +// we try to resolve the "real" lib name, fallback is filename +fn name_for_parsing(path: &Path, rootfs: Option<&Path>) -> Result { + if let Some(rootfs) = rootfs { + let joined = resolve_in_rootfs(rootfs, path); + if let Ok(real) = fs::canonicalize(&joined) { + if real.starts_with(rootfs) { + return file_name_to_string(&real); + } + } + } + file_name_to_string(path) +} + +fn file_name_to_string(path: &Path) -> Result { + let name = path.file_name().and_then(OsStr::to_str).ok_or_else(|| { + Error::message(format!( + "shared library path has no valid file name: {}", + path.display() + )) + })?; + Ok(name.to_string()) +} + +// this is how we extract linker_name, abi, and real_name +fn parse_library_name(name: &str) -> Result<(String, AbiVersion, String)> { + let Some(so_idx) = name.find(".so") else { + return Err(Error::message(format!( + "shared library name does not contain '.so': {name}" + ))); + }; + + let linker_name = name[..so_idx + 3].to_string(); + let suffix = &name[so_idx + 3..]; + + // simplest case, only .so + if suffix.is_empty() { + return Ok(( + linker_name.clone(), + AbiVersion { + major: None, + minor: None, + patch: None, + }, + linker_name, + )); + } + + // basic check that suffix is .so. + if !suffix.starts_with('.') { + return Err(Error::message(format!( + "unsupported shared library suffix in {name}" + ))); + } + + // we parse each component + let mut components = suffix[1..].split('.'); + let major = components + .next() + .map(parse_component) + .transpose()? + .flatten(); + let minor = components + .next() + .map(parse_component) + .transpose()? + .flatten(); + let patch = components + .next() + .map(parse_component) + .transpose()? + .flatten(); + + if components.next().is_some() { + return Err(Error::message(format!( + "unsupported ABI version with more than 3 components in {name}" + ))); + } + + Ok(( + linker_name.clone(), + AbiVersion { + major, + minor, + patch, + }, + name.to_string(), + )) +} + +fn parse_component(component: &str) -> Result> { + if component.is_empty() { + return Ok(None); + } + component + .parse::() + .map(Some) + .map_err(|_| Error::message(format!("invalid ABI version component: {component}"))) +} + +fn discover_container_libraries(inputs: &HookInputs) -> Result { + let requested_linkers = inputs + .primary_libs + .iter() + .chain(&inputs.dependency_libs) + .map(|lib| lib.linker_name().to_string()) + .collect::>(); + let mut libraries = Vec::new(); + let mut warnings = Vec::new(); + + for path in list_dynamic_linker_libraries(&inputs.ldconfig, &inputs.rootfs)? { + match Library::parse_container(path.clone(), &inputs.rootfs) { + Ok(lib) => libraries.push(lib), + Err(error) => { + let linker_hint = file_name_to_string(&path) + .ok() + .and_then(|name| linker_name_hint(&name)); + if let Some(linker_name) = linker_hint { + if requested_linkers.contains(&linker_name) { + return Err(Error::message(format!( + "failed to parse container library {} for requested linker {}: {}", + path.display(), + linker_name, + error + ))); + } + } + + push_warning( + &mut warnings, + format!( + "skipping unparseable container library {}: {}", + path.display(), + error + ), + ); + } + } + } + + Ok(DiscoveryOutcome { + libraries, + warnings, + }) +} + +fn linker_name_hint(name: &str) -> Option { + name.find(".so").map(|index| name[..index + 3].to_string()) +} + +fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result { + let container_index = index_container_libraries(container_libs); + let fallback_dir = PathBuf::from("/run/pc-injection"); + let mut mounts = Vec::new(); + let mut warnings = Vec::new(); + let mut ld_library_path_dirs = Vec::new(); + + if !inputs + .primary_libs + .iter() + .any(|lib| container_index.contains_key(lib.linker_name())) + { + return Err(Error::message( + "failed to activate library injection: no primary libraries found in the container linker cache", + )); + } + + for lib in &inputs.primary_libs { + validate_regular_source_file(lib.path(), "primary library")?; + if !lib.has_major_version() { + return Err(Error::message(format!( + "primary library {} must contain at least a major ABI number", + lib.path().display() + ))); + } + } + + for host in &inputs.primary_libs { + let candidates = container_index + .get(host.linker_name()) + .cloned() + .unwrap_or_default(); + + if !candidates.is_empty() && !candidates.iter().any(Library::has_major_version) { + return Err(Error::message(format!( + "container libraries matching {} must contain at least a major ABI number", + host.path().display() + ))); + } + + let decision = choose_primary_mounts( + host, + &candidates, + inputs.compatibility_policy, + &fallback_dir, + )?; + append_decision_mounts( + &mut mounts, + &mut ld_library_path_dirs, + &mut warnings, + &inputs.rootfs, + decision, + )?; + } + + for host in &inputs.dependency_libs { + validate_regular_source_file(host.path(), "dependency library")?; + let candidates = container_index + .get(host.linker_name()) + .cloned() + .unwrap_or_default(); + + let decision = choose_dependency_mounts(host, &candidates, &fallback_dir)?; + append_decision_mounts( + &mut mounts, + &mut ld_library_path_dirs, + &mut warnings, + &inputs.rootfs, + decision, + )?; + } + + for file in &inputs.extra_files { + validate_extra_source_file(file)?; + validate_mount_destination(&inputs.rootfs, file)?; + mounts.push(MountEdit { + source: file.clone(), + destination: file.clone(), + }); + } + + dedupe_mounts(&mut mounts)?; + dedupe_paths(&mut ld_library_path_dirs); + + Ok(ConfigEdits { + mounts, + ld_library_path_dirs, + warnings, + }) +} + +fn append_decision_mounts( + mounts: &mut Vec, + ld_library_path_dirs: &mut Vec, + warnings: &mut Vec, + rootfs: &Path, + decision: MountDecision, +) -> Result<()> { + for mount in decision.mounts { + if !mount.destination.starts_with("/run/pc-injection") { + validate_mount_destination(rootfs, &mount.destination)?; + } + mounts.push(mount); + } + + if let Some(dir) = decision.ld_library_path_dir { + ld_library_path_dirs.push(dir); + } + + if let Some(warning) = decision.warning { + push_warning(warnings, warning); + } + + Ok(()) +} + +fn choose_primary_mounts( + host: &Library, + candidates: &[Library], + compatibility: CompatibilityPolicy, + fallback_dir: &Path, +) -> Result { + if candidates.is_empty() { + return fallback_mount_decision(host, fallback_dir, None); + } + + let best = select_best_primary_candidate(host, candidates, compatibility)?; + + match compatibility { + CompatibilityPolicy::Major => { + if host.is_major_compatible_with(&best) { + overwrite_mount_decision(host, best) + } else { + Err(Error::message(major_mismatch_message(host, &best))) + } + } + CompatibilityPolicy::Full => { + if host.is_full_compatible_with(&best) { + overwrite_mount_decision(host, best) + } else if host.is_major_compatible_with(&best) { + fallback_mount_decision( + host, + fallback_dir, + Some(partial_compatibility_message(host, &best)), + ) + } else { + Err(Error::message(major_mismatch_message(host, &best))) + } + } + CompatibilityPolicy::Strict => { + if host.is_strict_compatible_with(&best) { + overwrite_mount_decision(host, best) + } else { + Err(Error::message(strict_mismatch_message(host, &best))) + } + } + } +} + +fn choose_dependency_mounts( + host: &Library, + candidates: &[Library], + fallback_dir: &Path, +) -> Result { + if candidates.is_empty() { + return fallback_mount_decision(host, fallback_dir, None); + } + + let best = select_best_dependency_candidate(host, candidates)?; + + if host.is_full_compatible_with(&best) { + overwrite_mount_decision(host, best) + } else if host.is_major_compatible_with(&best) { + fallback_mount_decision( + host, + fallback_dir, + Some(partial_compatibility_message(host, &best)), + ) + } else { + fallback_mount_decision( + host, + fallback_dir, + Some(format!( + "{}; mounting {} into {} with LD_LIBRARY_PATH fallback", + major_mismatch_message(host, &best), + host.path().display(), + fallback_dir.display() + )), + ) + } +} + +fn overwrite_mount_decision(host: &Library, container: Library) -> Result { + Ok(MountDecision { + mounts: vec![MountEdit { + source: host.path().to_path_buf(), + destination: container.path().to_path_buf(), + }], + ld_library_path_dir: None, + warning: None, + }) +} + +fn fallback_mount_decision( + host: &Library, + _dir: &Path, + warning: Option, +) -> Result { + let names = fallback_link_names(host.file_name())?; + let source = create_fallback_staging_dir(host.path(), &names)?; + let destination = PathBuf::from("/run/pc-injection").join(host.file_name()); + let ld_library_path_dir = Some(destination.clone()); + Ok(MountDecision { + mounts: vec![MountEdit { + source, + destination, + }], + ld_library_path_dir, + warning, + }) +} + +fn create_fallback_staging_dir(source: &Path, names: &[String]) -> Result { + let staging_dir = unique_temp_path("staging"); + fs::create_dir_all(&staging_dir) + .map_err(|e| Error::io(format!("failed to create {}", staging_dir.display()), e))?; + + for name in names { + let link = staging_dir.join(name); + symlink(source, &link).map_err(|e| { + Error::io( + format!("failed to create fallback symlink {}", link.display()), + e, + ) + })?; + } + + Ok(staging_dir) +} + +fn fallback_link_names(file_name: &str) -> Result> { + let chain = Library::link_chain_names(file_name)?; + if chain.len() <= 1 { + return Ok(chain); + } + + let mut names = Vec::new(); + if let Some(soname) = chain.get(1) { + names.push(soname.clone()); + } + if let Some(real_name) = chain.last() { + if names.last() != Some(real_name) { + names.push(real_name.clone()); + } + } + Ok(names) +} + +fn select_best_primary_candidate( + host: &Library, + candidates: &[Library], + compatibility: CompatibilityPolicy, +) -> Result { + let compatible_candidates = candidates + .iter() + .filter(|candidate| match compatibility { + CompatibilityPolicy::Major => host.is_major_compatible_with(candidate), + CompatibilityPolicy::Full => host.is_major_compatible_with(candidate), + CompatibilityPolicy::Strict => host.is_strict_compatible_with(candidate), + }) + .cloned() + .collect::>(); + + pick_best_candidate(host, candidates, &compatible_candidates) +} + +fn select_best_dependency_candidate(host: &Library, candidates: &[Library]) -> Result { + let compatible_candidates = candidates + .iter() + .filter(|candidate| host.is_major_compatible_with(candidate)) + .cloned() + .collect::>(); + + pick_best_candidate(host, candidates, &compatible_candidates) +} + +fn pick_best_candidate( + host: &Library, + candidates: &[Library], + compatible_candidates: &[Library], +) -> Result { + host.pick_best_candidate(if compatible_candidates.is_empty() { + candidates + } else { + compatible_candidates + }) + .cloned() + .ok_or_else(|| Error::message("internal error: candidate selection returned no library")) +} + +fn major_mismatch_message(host: &Library, container: &Library) -> String { + format!( + "primary library {} is not ABI compatible with container library {}", + host.real_name(), + container.real_name() + ) +} + +fn partial_compatibility_message(host: &Library, container: &Library) -> String { + format!( + "partial ABI compatibility detected: host library {} is older than container library {}", + host.real_name(), + container.real_name() + ) +} + +fn strict_mismatch_message(host: &Library, container: &Library) -> String { + format!( + "primary library {} is not strictly ABI compatible with container library {}", + host.real_name(), + container.real_name() + ) +} + +fn index_container_libraries(container_libs: &[Library]) -> HashMap> { + let mut index = HashMap::>::new(); + for lib in container_libs { + index + .entry(lib.linker_name().to_string()) + .or_default() + .push(lib.clone()); + } + index +} + +fn list_dynamic_linker_libraries(ldconfig: &Path, rootfs: &Path) -> Result> { + let output = Command::new(ldconfig) + .arg("-r") + .arg(rootfs) + .arg("-p") + .output() + .map_err(|e| Error::io(format!("failed to execute {}", ldconfig.display()), e))?; + + if !output.status.success() { + return Err(Error::message(format!( + "{} -r {} -p failed with status {}", + ldconfig.display(), + rootfs.display(), + output.status + ))); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + let mut libraries = Vec::new(); + for line in stdout.lines() { + if let Some((_, path)) = line.split_once("=>") { + let trimmed = path.trim(); + if !trimmed.is_empty() { + libraries.push(PathBuf::from(trimmed)); + } + } + } + Ok(libraries) +} + +fn validate_regular_source_file(source: &Path, label: &str) -> Result<()> { + let source_metadata = fs::metadata(source) + .map_err(|e| Error::io(format!("failed to stat {label} {}", source.display()), e))?; + if !source_metadata.is_file() { + return Err(Error::message(format!( + "{label} must be a regular file: {}", + source.display() + ))); + } + Ok(()) +} + +fn validate_extra_source_file(source: &Path) -> Result<()> { + let source_metadata = fs::symlink_metadata(source).map_err(|e| { + Error::io( + format!("failed to stat extra-path source {}", source.display()), + e, + ) + })?; + let file_type = source_metadata.file_type(); + + if file_type.is_symlink() { + return Err(Error::message(format!( + "INJECTION_EXTRA_FILES entries must be regular files, not symlinks: {}", + source.display() + ))); + } + + if !file_type.is_file() { + return Err(Error::message(format!( + "INJECTION_EXTRA_FILES entries must be regular files: {}", + source.display() + ))); + } + + Ok(()) +} + +fn validate_mount_destination(rootfs: &Path, destination: &Path) -> Result<()> { + let rootfs_real = fs::canonicalize(rootfs) + .map_err(|e| Error::io(format!("failed to resolve rootfs {}", rootfs.display()), e))?; + let relative = normalize_container_relative_path(destination)?; + let parent = relative.parent().unwrap_or_else(|| Path::new("")); + let parent_real = resolve_existing_directory_in_rootfs(&rootfs_real, parent)?; + if !parent_real.starts_with(&rootfs_real) { + return Err(Error::message(format!( + "mount destination escapes the rootfs: {}", + destination.display() + ))); + } + + let target = parent_real.join(relative.file_name().ok_or_else(|| { + Error::message(format!( + "mount destination has no file name: {}", + destination.display() + )) + })?); + if let Ok(metadata) = fs::symlink_metadata(&target) { + let file_type = metadata.file_type(); + if file_type.is_dir() && !file_type.is_symlink() { + return Err(Error::message(format!( + "mount destination already exists as a directory: {}", + destination.display() + ))); + } + } + + Ok(()) +} + +fn normalize_container_relative_path(destination: &Path) -> Result { + let relative = match destination.strip_prefix("/") { + Ok(relative) => relative, + Err(_) => destination, + }; + + let mut normalized = PathBuf::new(); + for component in relative.components() { + match component { + Component::Normal(part) => normalized.push(part), + Component::CurDir | Component::ParentDir => { + return Err(Error::message(format!( + "mount destination must not contain '.' or '..' components: {}", + destination.display() + ))); + } + Component::RootDir | Component::Prefix(_) => { + return Err(Error::message(format!( + "mount destination must be a normalized container path: {}", + destination.display() + ))); + } + } + } + + if normalized.file_name().is_none() { + return Err(Error::message(format!( + "mount destination has no valid file name: {}", + destination.display() + ))); + } + + Ok(normalized) +} + +fn resolve_existing_directory_in_rootfs(rootfs: &Path, relative: &Path) -> Result { + let mut current = rootfs.to_path_buf(); + for component in relative.components() { + current.push(component.as_os_str()); + let metadata = fs::symlink_metadata(¤t) + .map_err(|e| Error::io(format!("failed to inspect {}", current.display()), e))?; + let file_type = metadata.file_type(); + if !file_type.is_dir() && !file_type.is_symlink() { + return Err(Error::message(format!( + "mount destination parent is not a directory: {}", + current.display() + ))); + } + + current = fs::canonicalize(¤t) + .map_err(|e| Error::io(format!("failed to resolve {}", current.display()), e))?; + if !current.starts_with(rootfs) { + return Err(Error::message(format!( + "mount destination escapes the rootfs through {}", + current.display() + ))); + } + } + + Ok(current) +} + +fn resolve_in_rootfs(rootfs: &Path, container_path: &Path) -> PathBuf { + match container_path.strip_prefix("/") { + Ok(relative) => rootfs.join(relative), + Err(_) => rootfs.join(container_path), + } +} + +fn push_warning(warnings: &mut Vec, warning: String) { + if !warnings.iter().any(|existing| existing == &warning) { + warnings.push(warning); + } +} + +fn dedupe_mounts(mounts: &mut Vec) -> Result<()> { + let mut seen = HashMap::::new(); + let mut deduped = Vec::new(); + + for mount in mounts.drain(..) { + match seen.get(&mount.destination) { + Some(existing) if existing != &mount.source => { + return Err(Error::message(format!( + "conflicting planned mounts for {}: {} vs {}", + mount.destination.display(), + existing.display(), + mount.source.display() + ))); + } + Some(_) => {} + None => { + seen.insert(mount.destination.clone(), mount.source.clone()); + deduped.push(mount); + } + } + } + + *mounts = deduped; + Ok(()) +} + +fn dedupe_paths(paths: &mut Vec) { + let mut seen = HashSet::new(); + paths.retain(|path| seen.insert(path.clone())); +} + +fn apply_config_edits(config: &mut Value, edits: &ConfigEdits) -> Result<()> { + let obj = config + .as_object_mut() + .ok_or_else(|| Error::message("top-level OCI config JSON must be an object"))?; + + if !edits.mounts.is_empty() { + append_mounts(obj, &edits.mounts)?; + } + + if !edits.ld_library_path_dirs.is_empty() { + merge_ld_library_path(obj, &edits.ld_library_path_dirs)?; + } + + Ok(()) +} + +fn append_mounts(obj: &mut Map, mounts_to_add: &[MountEdit]) -> Result<()> { + let mounts = ensure_array_field(obj, "mounts")?; + + for mount in mounts_to_add { + let mut out = Map::new(); + out.insert( + "destination".to_string(), + Value::String(mount.destination.display().to_string()), + ); + out.insert("type".to_string(), Value::String("bind".to_string())); + out.insert( + "source".to_string(), + Value::String(mount.source.display().to_string()), + ); + out.insert( + "options".to_string(), + Value::Array( + ["ro", "rbind", "nosuid", "nodev"] + .into_iter() + .map(|value| Value::String(value.to_string())) + .collect(), + ), + ); + mounts.push(Value::Object(out)); + } + + Ok(()) +} + +fn merge_ld_library_path(obj: &mut Map, dirs: &[PathBuf]) -> Result<()> { + let dirs_as_strings = dirs + .iter() + .map(|path| path.display().to_string()) + .collect::>(); + + let process_val = obj + .entry("process".to_string()) + .or_insert_with(|| Value::Object(Map::new())); + let process_obj = process_val + .as_object_mut() + .ok_or_else(|| Error::message("validation error: 'process' exists but is not an object"))?; + let env_arr = ensure_array_field(process_obj, "env")?; + + let existing_index = env_arr.iter().rposition(|value| { + value + .as_str() + .and_then(|entry| entry.split_once('=').map(|(key, _)| key)) + .is_some_and(|key| key == "LD_LIBRARY_PATH") + }); + + let existing_entries = existing_index + .and_then(|idx| env_arr[idx].as_str()) + .and_then(|entry| entry.split_once('=').map(|(_, value)| value.to_string())) + .unwrap_or_default(); + + let mut merged = Vec::new(); + let mut seen = HashSet::new(); + for dir in &dirs_as_strings { + if seen.insert(dir.clone()) { + merged.push(dir.clone()); + } + } + for dir in existing_entries + .split(':') + .filter(|entry| !entry.is_empty()) + { + if seen.insert(dir.to_string()) { + merged.push(dir.to_string()); + } + } + + let value = format!("LD_LIBRARY_PATH={}", merged.join(":")); + match existing_index { + Some(idx) => env_arr[idx] = Value::String(value), + None => env_arr.push(Value::String(value)), + } + + Ok(()) +} + +fn ensure_array_field<'a>( + obj: &'a mut Map, + field: &str, +) -> Result<&'a mut Vec> { + use serde_json::map::Entry; + + match obj.entry(field.to_string()) { + Entry::Vacant(entry) => { + let value = entry.insert(Value::Array(Vec::new())); + Ok(value.as_array_mut().expect("inserted an array")) + } + Entry::Occupied(entry) => { + let value = entry.into_mut(); + match value { + Value::Array(arr) => Ok(arr), + _ => Err(Error::message(format!( + "validation error: '{field}' exists but is not an array" + ))), + } + } + } +} + +fn unique_temp_path(label: &str) -> PathBuf { + let nonce = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("system time should be after unix epoch") + .as_nanos(); + std::env::temp_dir().join(format!("pc_injection_hook-{label}-{nonce}")) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::os::unix::fs::PermissionsExt; + + #[test] + fn fallback_primary_adds_mounts_and_ld_library_path() { + let temp_root = unique_temp_path("fallback-primary"); + let rootfs = temp_root.join("rootfs"); + let host_file = temp_root.join("host/libmpi.so.12.2"); + + fs::create_dir_all(&rootfs).unwrap(); + fs::create_dir_all(host_file.parent().unwrap()).unwrap(); + fs::write(&host_file, b"payload").unwrap(); + + let inputs = HookInputs { + rootfs: rootfs.clone(), + ldconfig: "ldconfig".into(), + primary_libs: vec![Library::parse_host(&host_file).unwrap()], + dependency_libs: Vec::new(), + extra_files: Vec::new(), + compatibility_policy: CompatibilityPolicy::Full, + }; + let container_libs = vec![Library::parse_host("/usr/lib/libmpi.so.12.4").unwrap()]; + + let edits = plan_config_edits(&inputs, &container_libs).unwrap(); + assert_eq!(edits.mounts.len(), 1); + assert_eq!( + edits.mounts[0].destination, + PathBuf::from("/run/pc-injection/libmpi.so.12.2") + ); + assert!(fs::symlink_metadata(edits.mounts[0].source.join("libmpi.so.12")).is_ok()); + assert!(fs::symlink_metadata(edits.mounts[0].source.join("libmpi.so.12.2")).is_ok()); + assert_eq!( + edits.ld_library_path_dirs, + vec![PathBuf::from("/run/pc-injection/libmpi.so.12.2")] + ); + assert_eq!(edits.warnings.len(), 1); + + fs::remove_dir_all(&temp_root).unwrap(); + } + + #[test] + fn compatible_primary_overwrites_existing_container_path() { + let temp_root = unique_temp_path("overwrite-primary"); + let rootfs = temp_root.join("rootfs"); + let host_file = temp_root.join("host/libmpi.so.12.5"); + + fs::create_dir_all(rootfs.join("usr/lib64")).unwrap(); + fs::create_dir_all(host_file.parent().unwrap()).unwrap(); + fs::write(&host_file, b"payload").unwrap(); + + let inputs = HookInputs { + rootfs, + ldconfig: "ldconfig".into(), + primary_libs: vec![Library::parse_host(&host_file).unwrap()], + dependency_libs: Vec::new(), + extra_files: Vec::new(), + compatibility_policy: CompatibilityPolicy::Major, + }; + let container_libs = vec![Library::parse_host("/usr/lib64/libmpi.so.12.3").unwrap()]; + + let edits = plan_config_edits(&inputs, &container_libs).unwrap(); + assert_eq!( + edits.mounts, + vec![MountEdit { + source: host_file.clone(), + destination: PathBuf::from("/usr/lib64/libmpi.so.12.3"), + }] + ); + assert!(edits.ld_library_path_dirs.is_empty()); + assert!(edits.warnings.is_empty()); + + fs::remove_dir_all(&temp_root).unwrap(); + } + + #[test] + fn config_edits_merge_ld_library_path_and_mounts() { + let mut config = serde_json::json!({ + "root": { "path": "/rootfs" }, + "mounts": [], + "process": { + "env": ["FOO=BAR", "LD_LIBRARY_PATH=/usr/lib64"] + } + }); + let edits = ConfigEdits { + mounts: vec![MountEdit { + source: PathBuf::from("/host/libmpi.so.12"), + destination: PathBuf::from("/lib/libmpi.so.12"), + }], + ld_library_path_dirs: vec![PathBuf::from("/lib")], + warnings: Vec::new(), + }; + + apply_config_edits(&mut config, &edits).unwrap(); + let env = config["process"]["env"].as_array().unwrap(); + assert!(env + .iter() + .any(|value| value == "LD_LIBRARY_PATH=/lib:/usr/lib64")); + let mounts = config["mounts"].as_array().unwrap(); + assert_eq!(mounts.len(), 1); + assert_eq!(mounts[0]["type"], "bind"); + assert_eq!(mounts[0]["destination"], "/lib/libmpi.so.12"); + } + + #[test] + fn discovery_fails_for_unparseable_requested_library() { + let temp_root = unique_temp_path("requested-parse"); + let rootfs = temp_root.join("rootfs"); + let host_file = temp_root.join("host/libmpi.so.12"); + let ldconfig = temp_root.join("fake-ldconfig.sh"); + + fs::create_dir_all(&rootfs).unwrap(); + fs::create_dir_all(host_file.parent().unwrap()).unwrap(); + fs::write(&host_file, b"payload").unwrap(); + fs::write( + &ldconfig, + "#!/bin/sh\nprintf '%s\n' 'libmpi.so (libc6,x86-64) => /usr/lib/libmpi.so.bad.suffix'\n", + ) + .unwrap(); + fs::set_permissions(&ldconfig, fs::Permissions::from_mode(0o755)).unwrap(); + + let inputs = HookInputs { + rootfs, + ldconfig, + primary_libs: vec![Library::parse_host(&host_file).unwrap()], + dependency_libs: Vec::new(), + extra_files: Vec::new(), + compatibility_policy: CompatibilityPolicy::Major, + }; + + let error = discover_container_libraries(&inputs).unwrap_err(); + assert!(error + .to_string() + .contains("failed to parse container library")); + + fs::remove_dir_all(&temp_root).unwrap(); + } + + #[test] + fn extra_files_require_regular_sources() { + let temp_root = unique_temp_path("extra-files"); + let rootfs = temp_root.join("rootfs"); + let primary = temp_root.join("host/libmpi.so.12"); + let extra = temp_root.join("opt/tools/tool.sh"); + + fs::create_dir_all(rootfs.join("usr/lib")).unwrap(); + fs::create_dir_all(resolve_in_rootfs(&rootfs, extra.parent().unwrap())).unwrap(); + fs::create_dir_all(extra.parent().unwrap()).unwrap(); + fs::create_dir_all(primary.parent().unwrap()).unwrap(); + fs::write(&primary, b"payload").unwrap(); + fs::write(&extra, b"#!/bin/sh\n").unwrap(); + + let inputs = HookInputs { + rootfs, + ldconfig: "ldconfig".into(), + primary_libs: vec![Library::parse_host(&primary).unwrap()], + dependency_libs: Vec::new(), + extra_files: vec![extra.clone()], + compatibility_policy: CompatibilityPolicy::Major, + }; + let container_libs = vec![Library::parse_host("/usr/lib/libmpi.so.12.1").unwrap()]; + + let edits = plan_config_edits(&inputs, &container_libs).unwrap(); + assert!(edits.mounts.iter().any(|mount| mount.destination == extra)); + + fs::remove_dir_all(&temp_root).unwrap(); + } + + #[test] + fn relative_root_path_is_rejected() { + let error = resolve_rootfs("rootfs").unwrap_err(); + assert!(error + .to_string() + .contains("requires an absolute OCI root.path")); + } +} From 491ae4200df849934af5ded44d7cf95acb506509 Mon Sep 17 00:00:00 2001 From: felipecr Date: Mon, 27 Apr 2026 19:38:12 +0200 Subject: [PATCH 04/15] adding pc_injection to workspace --- .gitignore | 2 ++ Cargo.toml | 7 ++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 74bc6b3..d125f28 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,5 @@ target .DS_Store *.swp *.lock +test/vagrant/ubuntu-24.04/.vagrant/ +test/vagrant/ubuntu-24.04/.cache/ diff --git a/Cargo.toml b/Cargo.toml index e40bdce..20b59de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,10 @@ [workspace] -members = ["crates/pce_hook", "crates/mps_hook", "crates/ldcache_hook"] +members = [ + "crates/pce_hook", + "crates/mps_hook", + "crates/ldcache_hook", + "crates/pc_injection_hook", +] resolver = "2" [profile.release] From e89c8dadae163a950e4a00c5eb836bbd42119864 Mon Sep 17 00:00:00 2001 From: felipecr Date: Mon, 27 Apr 2026 19:42:41 +0200 Subject: [PATCH 05/15] tests for injection v1 --- test/pc-injection-podman.bats | 116 ++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 test/pc-injection-podman.bats diff --git a/test/pc-injection-podman.bats b/test/pc-injection-podman.bats new file mode 100644 index 0000000..5cc352d --- /dev/null +++ b/test/pc-injection-podman.bats @@ -0,0 +1,116 @@ +#!/usr/bin/env bats +bats_require_minimum_version 1.5.0 +source /usr/local/lib/bats/bats-support/load.bash +source /usr/local/lib/bats/bats-assert/load.bash + +make_pc_injection_hook_dir() { + local primary_lib="$1" + local dependency_lib="$2" + local ldconfig_path="$3" + local hooks_dir + hooks_dir="$(mktemp -d)" + + local repo bin + repo="$(git rev-parse --show-toplevel 2>/dev/null || pwd)" + bin="$repo/target/release/pc_injection_hook" + + if [[ ! -x "$bin" ]]; then + echo "pc_injection_hook binary not found at $bin." >&2 + rm -rf "$hooks_dir" + return 1 + fi + + cat >"$hooks_dir/pc-injection.json" </dev/null + + run command -v gcc + assert_success + gcc_path="$output" + + run command -v ldconfig + assert_success + ldconfig_path="$output" + + run bash -lc ' + while read -r line; do + case "$line" in + *"libz.so.1 "*) + set -- $line + printf "%s\n" "${!#}" + exit 0 + ;; + esac + done < <(ldconfig -p) + exit 1 + ' + assert_success + assert_output --partial "/" + primary_lib="$output" + + workdir="$(mktemp -d)" + src="$workdir/libpcitest.c" + dependency_lib="$workdir/libpcitest.so.1.0.0" + + cat >"$src" <<'EOF' +int pcitest_value(void) { return 42; } +EOF + + run "$gcc_path" -shared -fPIC -Wl,-soname,libpcitest.so.1 -o "$dependency_lib" "$src" + assert_success + + hooks_dir="$(make_pc_injection_hook_dir "$primary_lib" "$dependency_lib" "$ldconfig_path")" + [ -n "$hooks_dir" ] + + run podman --hooks-dir="$hooks_dir" run --rm \ + --annotation pc-injection.enable=false \ + "$IMAGE" bash -lc ' + [ -z "${LD_LIBRARY_PATH:-}" ] && + [ ! -e /run/pc-injection/libpcitest.so.1.0.0 ] + ' + assert_success + + run podman --hooks-dir="$hooks_dir" run --rm \ + --annotation pc-injection.enable=true \ + "$IMAGE" bash -lc ' + printf "LD_LIBRARY_PATH=%s\n" "${LD_LIBRARY_PATH:-}" + test -d /run/pc-injection/libpcitest.so.1.0.0 + test -L /run/pc-injection/libpcitest.so.1.0.0/libpcitest.so.1 + test -L /run/pc-injection/libpcitest.so.1.0.0/libpcitest.so.1.0.0 + ' + + { + printf '%s\n' "$output" + printf '%s\n' "$stderr" + } >&3 + + assert_success + assert_output --partial "LD_LIBRARY_PATH=/run/pc-injection/libpcitest.so.1.0.0" + + rm -rf "$workdir" "$hooks_dir" +} From 22e8807803da1288d0e55836ee98310dd4a02865 Mon Sep 17 00:00:00 2001 From: felipecr Date: Tue, 28 Apr 2026 00:40:07 +0200 Subject: [PATCH 06/15] primary replaces all hits, dependencies go over LD_LIB_PATH --- crates/pc_injection_hook/src/main.rs | 334 ++++++++++----------------- 1 file changed, 124 insertions(+), 210 deletions(-) diff --git a/crates/pc_injection_hook/src/main.rs b/crates/pc_injection_hook/src/main.rs index ce1b163..3b94a3e 100644 --- a/crates/pc_injection_hook/src/main.rs +++ b/crates/pc_injection_hook/src/main.rs @@ -91,7 +91,7 @@ struct HookInputs { primary_libs: Vec, dependency_libs: Vec, extra_files: Vec, - compatibility_policy: CompatibilityPolicy, + _compatibility_policy: CompatibilityPolicy, } #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -187,7 +187,7 @@ fn load_inputs(config: &Value) -> Result { primary_libs: parse_required_library_list("INJECTION_PRIMARY_LIBS")?, dependency_libs: parse_optional_library_list("INJECTION_DEPENDENCY_LIBS")?, extra_files: parse_optional_path_list("INJECTION_EXTRA_FILES"), - compatibility_policy: CompatibilityPolicy::from_env("INJECTION_COMPATIBILITY")?, + _compatibility_policy: CompatibilityPolicy::from_env("INJECTION_COMPATIBILITY")?, }) } @@ -306,40 +306,6 @@ impl Library { self.linker_name == other.linker_name && self.abi.major == other.abi.major } - fn is_full_compatible_with(&self, other: &Self) -> bool { - self.is_major_compatible_with(other) && self.abi.minor >= other.abi.minor - } - - fn is_strict_compatible_with(&self, other: &Self) -> bool { - self.is_major_compatible_with(other) && self.abi.minor == other.abi.minor - } - - fn pick_best_candidate<'a>(&self, candidates: &'a [Library]) -> Option<&'a Library> { - if candidates.is_empty() { - return None; - } - - if let Some(exact) = candidates - .iter() - .find(|candidate| candidate.real_name == self.real_name) - { - return Some(exact); - } - - let host_key = self.version_key(); - let older_or_equal = candidates - .iter() - .filter(|candidate| candidate.version_key() <= host_key) - .max_by_key(|candidate| candidate.version_key()); - if older_or_equal.is_some() { - return older_or_equal; - } - - candidates - .iter() - .min_by_key(|candidate| candidate.version_key()) - } - fn link_chain_names(file_name: &str) -> Result> { let (linker_name, abi, _) = parse_library_name(file_name)?; let mut names = vec![linker_name]; @@ -353,10 +319,6 @@ impl Library { } Ok(names) } - - fn version_key(&self) -> (Option, Option, Option) { - (self.abi.major, self.abi.minor, self.abi.patch) - } } // we try to resolve the "real" lib name, fallback is filename @@ -459,12 +421,6 @@ fn parse_component(component: &str) -> Result> { } fn discover_container_libraries(inputs: &HookInputs) -> Result { - let requested_linkers = inputs - .primary_libs - .iter() - .chain(&inputs.dependency_libs) - .map(|lib| lib.linker_name().to_string()) - .collect::>(); let mut libraries = Vec::new(); let mut warnings = Vec::new(); @@ -472,20 +428,6 @@ fn discover_container_libraries(inputs: &HookInputs) -> Result match Library::parse_container(path.clone(), &inputs.rootfs) { Ok(lib) => libraries.push(lib), Err(error) => { - let linker_hint = file_name_to_string(&path) - .ok() - .and_then(|name| linker_name_hint(&name)); - if let Some(linker_name) = linker_hint { - if requested_linkers.contains(&linker_name) { - return Err(Error::message(format!( - "failed to parse container library {} for requested linker {}: {}", - path.display(), - linker_name, - error - ))); - } - } - push_warning( &mut warnings, format!( @@ -504,10 +446,6 @@ fn discover_container_libraries(inputs: &HookInputs) -> Result }) } -fn linker_name_hint(name: &str) -> Option { - name.find(".so").map(|index| name[..index + 3].to_string()) -} - fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result { let container_index = index_container_libraries(container_libs); let fallback_dir = PathBuf::from("/run/pc-injection"); @@ -548,12 +486,7 @@ fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result< ))); } - let decision = choose_primary_mounts( - host, - &candidates, - inputs.compatibility_policy, - &fallback_dir, - )?; + let decision = choose_primary_mounts(host, &candidates, &fallback_dir)?; append_decision_mounts( &mut mounts, &mut ld_library_path_dirs, @@ -627,85 +560,62 @@ fn append_decision_mounts( fn choose_primary_mounts( host: &Library, candidates: &[Library], - compatibility: CompatibilityPolicy, fallback_dir: &Path, ) -> Result { - if candidates.is_empty() { - return fallback_mount_decision(host, fallback_dir, None); - } - - let best = select_best_primary_candidate(host, candidates, compatibility)?; - - match compatibility { - CompatibilityPolicy::Major => { - if host.is_major_compatible_with(&best) { - overwrite_mount_decision(host, best) - } else { - Err(Error::message(major_mismatch_message(host, &best))) - } - } - CompatibilityPolicy::Full => { - if host.is_full_compatible_with(&best) { - overwrite_mount_decision(host, best) - } else if host.is_major_compatible_with(&best) { - fallback_mount_decision( - host, - fallback_dir, - Some(partial_compatibility_message(host, &best)), - ) - } else { - Err(Error::message(major_mismatch_message(host, &best))) - } - } - CompatibilityPolicy::Strict => { - if host.is_strict_compatible_with(&best) { - overwrite_mount_decision(host, best) - } else { - Err(Error::message(strict_mismatch_message(host, &best))) - } - } - } + choose_same_major_mounts(host, candidates, fallback_dir) } fn choose_dependency_mounts( host: &Library, - candidates: &[Library], + _candidates: &[Library], fallback_dir: &Path, ) -> Result { - if candidates.is_empty() { - return fallback_mount_decision(host, fallback_dir, None); - } + fallback_mount_decision( + host, + fallback_dir, + Some(format!( + "injecting dependency library {} through LD_LIBRARY_PATH fallback", + host.path().display() + )), + ) +} - let best = select_best_dependency_candidate(host, candidates)?; +fn choose_same_major_mounts( + host: &Library, + candidates: &[Library], + fallback_dir: &Path, +) -> Result { + let same_major_candidates = candidates + .iter() + .filter(|candidate| host.is_major_compatible_with(candidate)) + .cloned() + .collect::>(); - if host.is_full_compatible_with(&best) { - overwrite_mount_decision(host, best) - } else if host.is_major_compatible_with(&best) { - fallback_mount_decision( - host, - fallback_dir, - Some(partial_compatibility_message(host, &best)), - ) - } else { - fallback_mount_decision( + if same_major_candidates.is_empty() { + return fallback_mount_decision( host, fallback_dir, Some(format!( - "{}; mounting {} into {} with LD_LIBRARY_PATH fallback", - major_mismatch_message(host, &best), + "no same-major container match found for host library {}; mounting {} into {} with LD_LIBRARY_PATH fallback", + host.real_name(), host.path().display(), fallback_dir.display() )), - ) + ); } + + overwrite_mount_decision(host, &same_major_candidates) } -fn overwrite_mount_decision(host: &Library, container: Library) -> Result { +fn overwrite_mount_decision(host: &Library, containers: &[Library]) -> Result { Ok(MountDecision { - mounts: vec![MountEdit { - source: host.path().to_path_buf(), - destination: container.path().to_path_buf(), - }], + mounts: containers + .iter() + .map(|container| MountEdit { + source: host.path().to_path_buf(), + destination: container.path().to_path_buf(), + }) + .collect(), ld_library_path_dir: None, warning: None, }) @@ -766,72 +676,6 @@ fn fallback_link_names(file_name: &str) -> Result> { Ok(names) } -fn select_best_primary_candidate( - host: &Library, - candidates: &[Library], - compatibility: CompatibilityPolicy, -) -> Result { - let compatible_candidates = candidates - .iter() - .filter(|candidate| match compatibility { - CompatibilityPolicy::Major => host.is_major_compatible_with(candidate), - CompatibilityPolicy::Full => host.is_major_compatible_with(candidate), - CompatibilityPolicy::Strict => host.is_strict_compatible_with(candidate), - }) - .cloned() - .collect::>(); - - pick_best_candidate(host, candidates, &compatible_candidates) -} - -fn select_best_dependency_candidate(host: &Library, candidates: &[Library]) -> Result { - let compatible_candidates = candidates - .iter() - .filter(|candidate| host.is_major_compatible_with(candidate)) - .cloned() - .collect::>(); - - pick_best_candidate(host, candidates, &compatible_candidates) -} - -fn pick_best_candidate( - host: &Library, - candidates: &[Library], - compatible_candidates: &[Library], -) -> Result { - host.pick_best_candidate(if compatible_candidates.is_empty() { - candidates - } else { - compatible_candidates - }) - .cloned() - .ok_or_else(|| Error::message("internal error: candidate selection returned no library")) -} - -fn major_mismatch_message(host: &Library, container: &Library) -> String { - format!( - "primary library {} is not ABI compatible with container library {}", - host.real_name(), - container.real_name() - ) -} - -fn partial_compatibility_message(host: &Library, container: &Library) -> String { - format!( - "partial ABI compatibility detected: host library {} is older than container library {}", - host.real_name(), - container.real_name() - ) -} - -fn strict_mismatch_message(host: &Library, container: &Library) -> String { - format!( - "primary library {} is not strictly ABI compatible with container library {}", - host.real_name(), - container.real_name() - ) -} - fn index_container_libraries(container_libs: &[Library]) -> HashMap> { let mut index = HashMap::>::new(); for lib in container_libs { @@ -1197,9 +1041,9 @@ mod tests { primary_libs: vec![Library::parse_host(&host_file).unwrap()], dependency_libs: Vec::new(), extra_files: Vec::new(), - compatibility_policy: CompatibilityPolicy::Full, + _compatibility_policy: CompatibilityPolicy::Full, }; - let container_libs = vec![Library::parse_host("/usr/lib/libmpi.so.12.4").unwrap()]; + let container_libs = vec![Library::parse_host("/usr/lib/libmpi.so.13.4").unwrap()]; let edits = plan_config_edits(&inputs, &container_libs).unwrap(); assert_eq!(edits.mounts.len(), 1); @@ -1225,6 +1069,7 @@ mod tests { let host_file = temp_root.join("host/libmpi.so.12.5"); fs::create_dir_all(rootfs.join("usr/lib64")).unwrap(); + fs::create_dir_all(rootfs.join("opt/vendor")).unwrap(); fs::create_dir_all(host_file.parent().unwrap()).unwrap(); fs::write(&host_file, b"payload").unwrap(); @@ -1234,17 +1079,27 @@ mod tests { primary_libs: vec![Library::parse_host(&host_file).unwrap()], dependency_libs: Vec::new(), extra_files: Vec::new(), - compatibility_policy: CompatibilityPolicy::Major, + _compatibility_policy: CompatibilityPolicy::Major, }; - let container_libs = vec![Library::parse_host("/usr/lib64/libmpi.so.12.3").unwrap()]; + let container_libs = vec![ + Library::parse_host("/usr/lib64/libmpi.so.12.3").unwrap(), + Library::parse_host("/opt/vendor/libmpi.so.12.7").unwrap(), + Library::parse_host("/usr/lib64/libmpi.so.11.9").unwrap(), + ]; let edits = plan_config_edits(&inputs, &container_libs).unwrap(); assert_eq!( edits.mounts, - vec![MountEdit { - source: host_file.clone(), - destination: PathBuf::from("/usr/lib64/libmpi.so.12.3"), - }] + vec![ + MountEdit { + source: host_file.clone(), + destination: PathBuf::from("/usr/lib64/libmpi.so.12.3"), + }, + MountEdit { + source: host_file.clone(), + destination: PathBuf::from("/opt/vendor/libmpi.so.12.7"), + }, + ] ); assert!(edits.ld_library_path_dirs.is_empty()); assert!(edits.warnings.is_empty()); @@ -1252,6 +1107,65 @@ mod tests { fs::remove_dir_all(&temp_root).unwrap(); } + #[test] + fn dependency_always_uses_fallback_mount_after_primary_activation() { + let temp_root = unique_temp_path("overwrite-dependency"); + let rootfs = temp_root.join("rootfs"); + let primary = temp_root.join("host/libmpi.so.12.5"); + let dependency = temp_root.join("host/libhwloc.so.15.2"); + + fs::create_dir_all(rootfs.join("usr/lib64")).unwrap(); + fs::create_dir_all(rootfs.join("opt/vendor")).unwrap(); + fs::create_dir_all(primary.parent().unwrap()).unwrap(); + fs::write(&primary, b"payload").unwrap(); + fs::write(&dependency, b"payload").unwrap(); + + let inputs = HookInputs { + rootfs, + ldconfig: "ldconfig".into(), + primary_libs: vec![Library::parse_host(&primary).unwrap()], + dependency_libs: vec![Library::parse_host(&dependency).unwrap()], + extra_files: Vec::new(), + _compatibility_policy: CompatibilityPolicy::Full, + }; + let container_libs = vec![ + Library::parse_host("/usr/lib64/libmpi.so.12.3").unwrap(), + Library::parse_host("/usr/lib64/libhwloc.so.15.0").unwrap(), + Library::parse_host("/opt/vendor/libhwloc.so.15.9").unwrap(), + Library::parse_host("/usr/lib64/libhwloc.so.14.8").unwrap(), + ]; + + let edits = plan_config_edits(&inputs, &container_libs).unwrap(); + assert_eq!( + edits.mounts, + vec![ + MountEdit { + source: primary.clone(), + destination: PathBuf::from("/usr/lib64/libmpi.so.12.3"), + }, + MountEdit { + source: edits.mounts[1].source.clone(), + destination: PathBuf::from("/run/pc-injection/libhwloc.so.15.2"), + }, + ] + ); + assert_eq!( + edits.ld_library_path_dirs, + vec![PathBuf::from("/run/pc-injection/libhwloc.so.15.2")] + ); + assert_eq!( + edits.warnings, + vec![format!( + "injecting dependency library {} through LD_LIBRARY_PATH fallback", + dependency.display() + )] + ); + assert!(fs::symlink_metadata(edits.mounts[1].source.join("libhwloc.so.15")).is_ok()); + assert!(fs::symlink_metadata(edits.mounts[1].source.join("libhwloc.so.15.2")).is_ok()); + + fs::remove_dir_all(&temp_root).unwrap(); + } + #[test] fn config_edits_merge_ld_library_path_and_mounts() { let mut config = serde_json::json!({ @@ -1282,7 +1196,7 @@ mod tests { } #[test] - fn discovery_fails_for_unparseable_requested_library() { + fn discovery_warns_for_unparseable_container_library() { let temp_root = unique_temp_path("requested-parse"); let rootfs = temp_root.join("rootfs"); let host_file = temp_root.join("host/libmpi.so.12"); @@ -1304,13 +1218,13 @@ mod tests { primary_libs: vec![Library::parse_host(&host_file).unwrap()], dependency_libs: Vec::new(), extra_files: Vec::new(), - compatibility_policy: CompatibilityPolicy::Major, + _compatibility_policy: CompatibilityPolicy::Major, }; - let error = discover_container_libraries(&inputs).unwrap_err(); - assert!(error - .to_string() - .contains("failed to parse container library")); + let discovery = discover_container_libraries(&inputs).unwrap(); + assert!(discovery.libraries.is_empty()); + assert_eq!(discovery.warnings.len(), 1); + assert!(discovery.warnings[0].contains("skipping unparseable container library")); fs::remove_dir_all(&temp_root).unwrap(); } @@ -1335,7 +1249,7 @@ mod tests { primary_libs: vec![Library::parse_host(&primary).unwrap()], dependency_libs: Vec::new(), extra_files: vec![extra.clone()], - compatibility_policy: CompatibilityPolicy::Major, + _compatibility_policy: CompatibilityPolicy::Major, }; let container_libs = vec![Library::parse_host("/usr/lib/libmpi.so.12.1").unwrap()]; From 66547f52a8ebed916a9670dec9ee129dd167ba7e Mon Sep 17 00:00:00 2001 From: felipecr Date: Tue, 28 Apr 2026 02:29:55 +0200 Subject: [PATCH 07/15] updated warning for mismatched libs --- crates/pc_injection_hook/src/main.rs | 121 +++++++++++++++++++++++---- 1 file changed, 104 insertions(+), 17 deletions(-) diff --git a/crates/pc_injection_hook/src/main.rs b/crates/pc_injection_hook/src/main.rs index 3b94a3e..8b686a0 100644 --- a/crates/pc_injection_hook/src/main.rs +++ b/crates/pc_injection_hook/src/main.rs @@ -150,7 +150,7 @@ struct MountEdit { struct MountDecision { mounts: Vec, ld_library_path_dir: Option, - warning: Option, + warnings: Vec, } fn read_stdin_json_value() -> Result { @@ -453,6 +453,7 @@ fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result< let mut warnings = Vec::new(); let mut ld_library_path_dirs = Vec::new(); + // at least 1 library to inject needs to exist in container if !inputs .primary_libs .iter() @@ -463,6 +464,7 @@ fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result< )); } + // check injection has major ABI for lib in &inputs.primary_libs { validate_regular_source_file(lib.path(), "primary library")?; if !lib.has_major_version() { @@ -473,6 +475,7 @@ fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result< } } + // here we decide if we replace or add for host in &inputs.primary_libs { let candidates = container_index .get(host.linker_name()) @@ -550,7 +553,7 @@ fn append_decision_mounts( ld_library_path_dirs.push(dir); } - if let Some(warning) = decision.warning { + for warning in decision.warnings { push_warning(warnings, warning); } @@ -573,10 +576,10 @@ fn choose_dependency_mounts( fallback_mount_decision( host, fallback_dir, - Some(format!( + vec![format!( "injecting dependency library {} through LD_LIBRARY_PATH fallback", host.path().display() - )), + )], ) } @@ -585,6 +588,21 @@ fn choose_same_major_mounts( candidates: &[Library], fallback_dir: &Path, ) -> Result { + let mut warnings = Vec::new(); + let mismatched_candidates = candidates + .iter() + .filter(|candidate| candidate.linker_name() == host.linker_name()) + .filter(|candidate| !host.is_major_compatible_with(candidate)) + .map(|candidate| candidate.real_name().to_string()) + .collect::>(); + if !mismatched_candidates.is_empty() { + warnings.push(format!( + "skipping same-name container libraries with different major ABI for {}: {}", + host.real_name(), + mismatched_candidates.join(", ") + )); + } + let same_major_candidates = candidates .iter() .filter(|candidate| host.is_major_compatible_with(candidate)) @@ -592,22 +610,28 @@ fn choose_same_major_mounts( .collect::>(); if same_major_candidates.is_empty() { + warnings.push(format!( + "no same-major container match found for host library {}; mounting {} into {} with LD_LIBRARY_PATH", + host.real_name(), + host.path().display(), + fallback_dir.display() + )); return fallback_mount_decision( host, fallback_dir, - Some(format!( - "no same-major container match found for host library {}; mounting {} into {} with LD_LIBRARY_PATH fallback", - host.real_name(), - host.path().display(), - fallback_dir.display() - )), + warnings, ); } - overwrite_mount_decision(host, &same_major_candidates) + overwrite_mount_decision(host, &same_major_candidates, warnings) } -fn overwrite_mount_decision(host: &Library, containers: &[Library]) -> Result { +// Here we build the mountEdit to overwrite lib with host +fn overwrite_mount_decision( + host: &Library, + containers: &[Library], + warnings: Vec, +) -> Result { Ok(MountDecision { mounts: containers .iter() @@ -617,14 +641,16 @@ fn overwrite_mount_decision(host: &Library, containers: &[Library]) -> Result, + warnings: Vec, ) -> Result { let names = fallback_link_names(host.file_name())?; let source = create_fallback_staging_dir(host.path(), &names)?; @@ -636,7 +662,7 @@ fn fallback_mount_decision( destination, }], ld_library_path_dir, - warning, + warnings, }) } @@ -729,6 +755,7 @@ fn validate_regular_source_file(source: &Path, label: &str) -> Result<()> { Ok(()) } +// INJECTION_EXTRA_FILES are raw file mounts and we need those to be exact fn validate_extra_source_file(source: &Path) -> Result<()> { let source_metadata = fs::symlink_metadata(source).map_err(|e| { Error::io( @@ -1057,7 +1084,18 @@ mod tests { edits.ld_library_path_dirs, vec![PathBuf::from("/run/pc-injection/libmpi.so.12.2")] ); - assert_eq!(edits.warnings.len(), 1); + assert_eq!( + edits.warnings, + vec![ + "skipping same-name container libraries with different major ABI for libmpi.so.12.2: libmpi.so.13.4".to_string(), + format!( + "no same-major container match found for host library {}; mounting {} into {} with LD_LIBRARY_PATH fallback", + "libmpi.so.12.2", + host_file.display(), + "/run/pc-injection" + ), + ] + ); fs::remove_dir_all(&temp_root).unwrap(); } @@ -1102,7 +1140,56 @@ mod tests { ] ); assert!(edits.ld_library_path_dirs.is_empty()); - assert!(edits.warnings.is_empty()); + assert_eq!( + edits.warnings, + vec![ + "skipping same-name container libraries with different major ABI for libmpi.so.12.5: libmpi.so.11.9".to_string() + ] + ); + + fs::remove_dir_all(&temp_root).unwrap(); + } + + #[test] + fn primary_warns_for_major_mismatch_and_only_overwrites_same_major_candidates() { + let temp_root = unique_temp_path("warn-major-mismatch"); + let rootfs = temp_root.join("rootfs"); + let host_file = temp_root.join("host/libmpi.so.12.5"); + + fs::create_dir_all(rootfs.join("usr/lib64")).unwrap(); + fs::create_dir_all(rootfs.join("opt/vendor")).unwrap(); + fs::create_dir_all(host_file.parent().unwrap()).unwrap(); + fs::write(&host_file, b"payload").unwrap(); + + let inputs = HookInputs { + rootfs, + ldconfig: "ldconfig".into(), + primary_libs: vec![Library::parse_host(&host_file).unwrap()], + dependency_libs: Vec::new(), + extra_files: Vec::new(), + _compatibility_policy: CompatibilityPolicy::Major, + }; + let container_libs = vec![ + Library::parse_host("/usr/lib64/libmpi.so.12.3").unwrap(), + Library::parse_host("/opt/vendor/libmpi.so.11.7").unwrap(), + Library::parse_host("/usr/lib64/libmpi.so.13.1").unwrap(), + ]; + + let edits = plan_config_edits(&inputs, &container_libs).unwrap(); + assert_eq!( + edits.mounts, + vec![MountEdit { + source: host_file.clone(), + destination: PathBuf::from("/usr/lib64/libmpi.so.12.3"), + }] + ); + assert!(edits.ld_library_path_dirs.is_empty()); + assert_eq!( + edits.warnings, + vec![ + "skipping same-name container libraries with different major ABI for libmpi.so.12.5: libmpi.so.11.7, libmpi.so.13.1".to_string() + ] + ); fs::remove_dir_all(&temp_root).unwrap(); } From 4a34da4c24a97c57d39a90ebed8fa496d331b7e0 Mon Sep 17 00:00:00 2001 From: felipecr Date: Tue, 28 Apr 2026 12:46:46 +0200 Subject: [PATCH 08/15] simplification of mount handling --- crates/pc_injection_hook/src/main.rs | 93 ++++++---------------------- 1 file changed, 19 insertions(+), 74 deletions(-) diff --git a/crates/pc_injection_hook/src/main.rs b/crates/pc_injection_hook/src/main.rs index 8b686a0..160d35c 100644 --- a/crates/pc_injection_hook/src/main.rs +++ b/crates/pc_injection_hook/src/main.rs @@ -494,7 +494,6 @@ fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result< &mut mounts, &mut ld_library_path_dirs, &mut warnings, - &inputs.rootfs, decision, )?; } @@ -511,20 +510,20 @@ fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result< &mut mounts, &mut ld_library_path_dirs, &mut warnings, - &inputs.rootfs, decision, )?; } for file in &inputs.extra_files { validate_extra_source_file(file)?; - validate_mount_destination(&inputs.rootfs, file)?; + validate_mount_destination(file)?; mounts.push(MountEdit { source: file.clone(), destination: file.clone(), }); } + // Ensure we do not have duplicated decisions dedupe_mounts(&mut mounts)?; dedupe_paths(&mut ld_library_path_dirs); @@ -539,12 +538,11 @@ fn append_decision_mounts( mounts: &mut Vec, ld_library_path_dirs: &mut Vec, warnings: &mut Vec, - rootfs: &Path, decision: MountDecision, ) -> Result<()> { for mount in decision.mounts { if !mount.destination.starts_with("/run/pc-injection") { - validate_mount_destination(rootfs, &mount.destination)?; + validate_mount_destination(&mount.destination)?; } mounts.push(mount); } @@ -782,98 +780,40 @@ fn validate_extra_source_file(source: &Path) -> Result<()> { Ok(()) } -fn validate_mount_destination(rootfs: &Path, destination: &Path) -> Result<()> { - let rootfs_real = fs::canonicalize(rootfs) - .map_err(|e| Error::io(format!("failed to resolve rootfs {}", rootfs.display()), e))?; - let relative = normalize_container_relative_path(destination)?; - let parent = relative.parent().unwrap_or_else(|| Path::new("")); - let parent_real = resolve_existing_directory_in_rootfs(&rootfs_real, parent)?; - if !parent_real.starts_with(&rootfs_real) { +fn validate_mount_destination(destination: &Path) -> Result<()> { + if !destination.is_absolute() { return Err(Error::message(format!( - "mount destination escapes the rootfs: {}", + "mount destination must be absolute: {}", destination.display() ))); } - let target = parent_real.join(relative.file_name().ok_or_else(|| { - Error::message(format!( - "mount destination has no file name: {}", - destination.display() - )) - })?); - if let Ok(metadata) = fs::symlink_metadata(&target) { - let file_type = metadata.file_type(); - if file_type.is_dir() && !file_type.is_symlink() { - return Err(Error::message(format!( - "mount destination already exists as a directory: {}", - destination.display() - ))); - } - } - - Ok(()) -} - -fn normalize_container_relative_path(destination: &Path) -> Result { - let relative = match destination.strip_prefix("/") { - Ok(relative) => relative, - Err(_) => destination, - }; - - let mut normalized = PathBuf::new(); - for component in relative.components() { + for component in destination.components() { match component { - Component::Normal(part) => normalized.push(part), + Component::Normal(_) | Component::RootDir => {} Component::CurDir | Component::ParentDir => { return Err(Error::message(format!( "mount destination must not contain '.' or '..' components: {}", destination.display() ))); } - Component::RootDir | Component::Prefix(_) => { + Component::Prefix(_) => { return Err(Error::message(format!( - "mount destination must be a normalized container path: {}", + "mount destination must be a Unix-style absolute path: {}", destination.display() ))); } } } - if normalized.file_name().is_none() { + if destination.file_name().is_none() { return Err(Error::message(format!( "mount destination has no valid file name: {}", destination.display() ))); } - Ok(normalized) -} - -fn resolve_existing_directory_in_rootfs(rootfs: &Path, relative: &Path) -> Result { - let mut current = rootfs.to_path_buf(); - for component in relative.components() { - current.push(component.as_os_str()); - let metadata = fs::symlink_metadata(¤t) - .map_err(|e| Error::io(format!("failed to inspect {}", current.display()), e))?; - let file_type = metadata.file_type(); - if !file_type.is_dir() && !file_type.is_symlink() { - return Err(Error::message(format!( - "mount destination parent is not a directory: {}", - current.display() - ))); - } - - current = fs::canonicalize(¤t) - .map_err(|e| Error::io(format!("failed to resolve {}", current.display()), e))?; - if !current.starts_with(rootfs) { - return Err(Error::message(format!( - "mount destination escapes the rootfs through {}", - current.display() - ))); - } - } - - Ok(current) + Ok(()) } fn resolve_in_rootfs(rootfs: &Path, container_path: &Path) -> PathBuf { @@ -920,6 +860,7 @@ fn dedupe_paths(paths: &mut Vec) { paths.retain(|path| seen.insert(path.clone())); } +// Now we apply edit to OCI config fn apply_config_edits(config: &mut Value, edits: &ConfigEdits) -> Result<()> { let obj = config .as_object_mut() @@ -971,6 +912,7 @@ fn merge_ld_library_path(obj: &mut Map, dirs: &[PathBuf]) -> Resu .map(|path| path.display().to_string()) .collect::>(); + // ensuring OCI json has config.process.env entry let process_val = obj .entry("process".to_string()) .or_insert_with(|| Value::Object(Map::new())); @@ -979,13 +921,13 @@ fn merge_ld_library_path(obj: &mut Map, dirs: &[PathBuf]) -> Resu .ok_or_else(|| Error::message("validation error: 'process' exists but is not an object"))?; let env_arr = ensure_array_field(process_obj, "env")?; + // check if we already got an env with LD_LIBRARY_PATH let existing_index = env_arr.iter().rposition(|value| { value .as_str() .and_then(|entry| entry.split_once('=').map(|(key, _)| key)) .is_some_and(|key| key == "LD_LIBRARY_PATH") }); - let existing_entries = existing_index .and_then(|idx| env_arr[idx].as_str()) .and_then(|entry| entry.split_once('=').map(|(_, value)| value.to_string())) @@ -993,11 +935,13 @@ fn merge_ld_library_path(obj: &mut Map, dirs: &[PathBuf]) -> Resu let mut merged = Vec::new(); let mut seen = HashSet::new(); + // Add new libs first for dir in &dirs_as_strings { if seen.insert(dir.clone()) { merged.push(dir.clone()); } } + // Append existing entries for dir in existing_entries .split(':') .filter(|entry| !entry.is_empty()) @@ -1007,6 +951,7 @@ fn merge_ld_library_path(obj: &mut Map, dirs: &[PathBuf]) -> Resu } } + // Replace or append into env_var let value = format!("LD_LIBRARY_PATH={}", merged.join(":")); match existing_index { Some(idx) => env_arr[idx] = Value::String(value), @@ -1089,7 +1034,7 @@ mod tests { vec![ "skipping same-name container libraries with different major ABI for libmpi.so.12.2: libmpi.so.13.4".to_string(), format!( - "no same-major container match found for host library {}; mounting {} into {} with LD_LIBRARY_PATH fallback", + "no same-major container match found for host library {}; mounting {} into {} with LD_LIBRARY_PATH", "libmpi.so.12.2", host_file.display(), "/run/pc-injection" From bddceeff87ae8f73b0b99d64a95edcb40ae4dd9e Mon Sep 17 00:00:00 2001 From: felipecr Date: Tue, 28 Apr 2026 17:44:00 +0200 Subject: [PATCH 09/15] rust format fix --- crates/ldcache_hook/src/main.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/ldcache_hook/src/main.rs b/crates/ldcache_hook/src/main.rs index 9d57f5c..2db3cf8 100644 --- a/crates/ldcache_hook/src/main.rs +++ b/crates/ldcache_hook/src/main.rs @@ -1,7 +1,6 @@ use serde::Deserialize; use std::{ - env, - fs, + env, fs, io::{self, Read}, path::{Path, PathBuf}, process::{self, Command, Stdio}, @@ -130,4 +129,3 @@ fn summarize_cache(rootfs: &Path) { } } } - From 1065c65e5318a639df51697b96453f5b8ccab152 Mon Sep 17 00:00:00 2001 From: felipecr Date: Tue, 28 Apr 2026 17:44:38 +0200 Subject: [PATCH 10/15] added new options to match network injection capability --- crates/pc_injection_hook/README.md | 15 + crates/pc_injection_hook/src/main.rs | 396 ++++++++++++++++++++++++++- 2 files changed, 403 insertions(+), 8 deletions(-) diff --git a/crates/pc_injection_hook/README.md b/crates/pc_injection_hook/README.md index 6e0af61..c579100 100644 --- a/crates/pc_injection_hook/README.md +++ b/crates/pc_injection_hook/README.md @@ -12,9 +12,11 @@ to add bind mounts to inject host libs. * primary libraries from `INJECTION_PRIMARY_LIBS` * optional dependency libraries from `INJECTION_DEPENDENCY_LIBS` * optional extra files from `INJECTION_EXTRA_FILES` + * optional extra OCI mounts from `INJECTION_EXTRA_MOUNTS` * Appends the required mounts to the OCI config and writes the updated JSON to `stdout`. * Adds `LD_LIBRARY_PATH` when the plan injects a runtime library directory instead of overwriting an existing container library path. +* Merges optional environment variables from `INJECTION_EXTRA_ENV` into `process.env`. ## Notes @@ -23,3 +25,16 @@ to add bind mounts to inject host libs. * When the plan introduces new lib injection paths, the hook also updates `LD_LIBRARY_PATH` because a prestart `ldconfig -r ` run does not see runtime-only bind mounts. New lib injection are exposed through a host-side staging directory mounted at `/run/pc-injection/`. + +## Optional hook env vars + +* `INJECTION_EXTRA_ENV`: semicolon-separated `KEY=VALUE` entries. +* `INJECTION_EXTRA_MOUNTS`: semicolon-separated mount entries in + `source:destination:type:option1,option2,...` format. + +Example: + +```text +INJECTION_EXTRA_ENV=MPIR_CVAR_CH4_OFI_MULTI_NIC_STRIPING_THRESHOLD=100000000;FOO=bar +INJECTION_EXTRA_MOUNTS=/var/spool/slurmd:/var/spool/slurmd:none:x-create=dir,bind,rw,nosuid,noexec,nodev,private;/var/lib/hugetlbfs:/var/lib/hugetlbfs:bind:rbind,rw,nosuid,nodev,private +``` diff --git a/crates/pc_injection_hook/src/main.rs b/crates/pc_injection_hook/src/main.rs index 160d35c..948e39b 100644 --- a/crates/pc_injection_hook/src/main.rs +++ b/crates/pc_injection_hook/src/main.rs @@ -1,4 +1,4 @@ -use serde_json::{Map, Value}; +use serde_json::{json, Map, Value}; use std::collections::{HashMap, HashSet}; use std::env; use std::error::Error as StdError; @@ -91,6 +91,8 @@ struct HookInputs { primary_libs: Vec, dependency_libs: Vec, extra_files: Vec, + extra_mounts: Vec, + extra_env: Vec, _compatibility_policy: CompatibilityPolicy, } @@ -137,6 +139,8 @@ struct DiscoveryOutcome { struct ConfigEdits { mounts: Vec, ld_library_path_dirs: Vec, + extra_mounts: Vec, + extra_env: Vec, warnings: Vec, } @@ -146,6 +150,14 @@ struct MountEdit { destination: PathBuf, } +#[derive(Clone, Debug, Eq, PartialEq)] +struct ExtraMountEdit { + source: PathBuf, + destination: PathBuf, + mount_type: String, + options: Vec, +} + #[derive(Clone, Debug, Eq, PartialEq)] struct MountDecision { mounts: Vec, @@ -187,6 +199,8 @@ fn load_inputs(config: &Value) -> Result { primary_libs: parse_required_library_list("INJECTION_PRIMARY_LIBS")?, dependency_libs: parse_optional_library_list("INJECTION_DEPENDENCY_LIBS")?, extra_files: parse_optional_path_list("INJECTION_EXTRA_FILES"), + extra_mounts: parse_optional_mount_specs("INJECTION_EXTRA_MOUNTS")?, + extra_env: parse_optional_env_specs("INJECTION_EXTRA_ENV")?, _compatibility_policy: CompatibilityPolicy::from_env("INJECTION_COMPATIBILITY")?, }) } @@ -235,6 +249,123 @@ fn parse_optional_path_list(var: &'static str) -> Vec { } } +fn parse_optional_env_specs(var: &'static str) -> Result> { + let Some(raw) = env::var_os(var) else { + return Ok(Vec::new()); + }; + + if raw.is_empty() { + return Ok(Vec::new()); + } + + let raw = raw + .to_str() + .ok_or_else(|| Error::message(format!("{var} must contain valid UTF-8")))?; + + if raw.trim().is_empty() { + return Ok(Vec::new()); + } + + let entries = raw + .split(';') + .filter(|entry| !entry.trim().is_empty()) + .map(|entry| entry.trim().to_string()) + .collect::>(); + + validate_env_strings(&entries)?; + Ok(entries) +} + +fn parse_optional_mount_specs(var: &'static str) -> Result> { + let Some(raw) = env::var_os(var) else { + return Ok(Vec::new()); + }; + + if raw.is_empty() { + return Ok(Vec::new()); + } + + let raw = raw + .to_str() + .ok_or_else(|| Error::message(format!("{var} must contain valid UTF-8")))?; + + if raw.trim().is_empty() { + return Ok(Vec::new()); + } + + raw.split(';') + .filter(|entry| !entry.trim().is_empty()) + .map(|entry| parse_mount_spec_entry(var, entry.trim())) + .collect() +} + +fn parse_mount_spec_entry(var: &'static str, entry: &str) -> Result { + let parts = entry.splitn(4, ':').collect::>(); + if parts.len() != 4 { + return Err(Error::message(format!( + "{var} mount entries must use source:destination:type:options format: {entry}" + ))); + } + + let source = PathBuf::from(parts[0].trim()); + let destination = PathBuf::from(parts[1].trim()); + let mount_type = parts[2].trim(); + let options = if parts[3].trim().is_empty() { + Vec::new() + } else { + parts[3] + .split(',') + .map(str::trim) + .filter(|option| !option.is_empty()) + .map(ToString::to_string) + .collect::>() + }; + + validate_mount_source_path(&source, "extra mount source")?; + validate_mount_destination(&destination)?; + validate_mount_options(&options)?; + + let mount_type = match mount_type { + "" | "none" | "bind" => "bind".to_string(), + other => { + return Err(Error::message(format!( + "unsupported extra mount type '{other}', only bind-style mounts are supported" + ))) + } + }; + + Ok(ExtraMountEdit { + source, + destination, + mount_type, + options: strip_non_oci_mount_options(options), + }) +} + +fn strip_non_oci_mount_options(options: Vec) -> Vec { + options + .into_iter() + .filter(|option| option != "x-create=dir") + .collect() +} + +fn validate_mount_options(options: &[String]) -> Result<()> { + for option in options { + match option.as_str() { + "bind" | "rbind" | "ro" | "rw" | "nosuid" | "suid" | "nodev" | "dev" | "noexec" + | "exec" | "private" | "rprivate" | "slave" | "rslave" | "shared" | "rshared" + | "x-create=dir" => {} + other => { + return Err(Error::message(format!( + "unsupported extra mount option '{other}'" + ))) + } + } + } + + Ok(()) +} + impl CompatibilityPolicy { fn from_env(var: &'static str) -> Result { match env::var(var) { @@ -523,6 +654,11 @@ fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result< }); } + let mut extra_mounts = inputs.extra_mounts.clone(); + dedupe_extra_mounts(&mut extra_mounts)?; + validate_mount_conflicts(&mounts, &extra_mounts)?; + let extra_env = inputs.extra_env.clone(); + // Ensure we do not have duplicated decisions dedupe_mounts(&mut mounts)?; dedupe_paths(&mut ld_library_path_dirs); @@ -530,6 +666,8 @@ fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result< Ok(ConfigEdits { mounts, ld_library_path_dirs, + extra_mounts, + extra_env, warnings, }) } @@ -614,11 +752,7 @@ fn choose_same_major_mounts( host.path().display(), fallback_dir.display() )); - return fallback_mount_decision( - host, - fallback_dir, - warnings, - ); + return fallback_mount_decision(host, fallback_dir, warnings); } overwrite_mount_decision(host, &same_major_candidates, warnings) @@ -643,7 +777,6 @@ fn overwrite_mount_decision( }) } - // inject a library through a temporal dir mount containing the right library names as symlinks to the host file, mounts that directory into the container, and tells the dynamic linker to search there fn fallback_mount_decision( host: &Library, @@ -753,6 +886,28 @@ fn validate_regular_source_file(source: &Path, label: &str) -> Result<()> { Ok(()) } +fn validate_mount_source_path(source: &Path, label: &str) -> Result<()> { + if !source.is_absolute() { + return Err(Error::message(format!( + "{label} must be absolute: {}", + source.display() + ))); + } + + let source_metadata = fs::symlink_metadata(source) + .map_err(|e| Error::io(format!("failed to stat {label} {}", source.display()), e))?; + let file_type = source_metadata.file_type(); + + if !(file_type.is_file() || file_type.is_dir()) { + return Err(Error::message(format!( + "{label} must be a regular file or directory: {}", + source.display() + ))); + } + + Ok(()) +} + // INJECTION_EXTRA_FILES are raw file mounts and we need those to be exact fn validate_extra_source_file(source: &Path) -> Result<()> { let source_metadata = fs::symlink_metadata(source).map_err(|e| { @@ -855,6 +1010,59 @@ fn dedupe_mounts(mounts: &mut Vec) -> Result<()> { Ok(()) } +fn dedupe_extra_mounts(mounts: &mut Vec) -> Result<()> { + let mut seen = HashMap::)>::new(); + let mut deduped = Vec::new(); + + for mount in mounts.drain(..) { + match seen.get(&mount.destination) { + Some((existing_source, existing_type, existing_options)) + if existing_source != &mount.source + || existing_type != &mount.mount_type + || existing_options != &mount.options => + { + return Err(Error::message(format!( + "conflicting planned extra mounts for {}", + mount.destination.display() + ))) + } + Some(_) => {} + None => { + seen.insert( + mount.destination.clone(), + ( + mount.source.clone(), + mount.mount_type.clone(), + mount.options.clone(), + ), + ); + deduped.push(mount); + } + } + } + + *mounts = deduped; + Ok(()) +} + +fn validate_mount_conflicts(mounts: &[MountEdit], extra_mounts: &[ExtraMountEdit]) -> Result<()> { + let planned_mounts: HashSet<_> = mounts + .iter() + .map(|mount| mount.destination.clone()) + .collect(); + + for mount in extra_mounts { + if planned_mounts.contains(&mount.destination) { + return Err(Error::message(format!( + "conflicting planned mounts for {}: destination already used by library injection", + mount.destination.display() + ))); + } + } + + Ok(()) +} + fn dedupe_paths(paths: &mut Vec) { let mut seen = HashSet::new(); paths.retain(|path| seen.insert(path.clone())); @@ -874,6 +1082,14 @@ fn apply_config_edits(config: &mut Value, edits: &ConfigEdits) -> Result<()> { merge_ld_library_path(obj, &edits.ld_library_path_dirs)?; } + if !edits.extra_mounts.is_empty() { + append_extra_mounts(obj, &edits.extra_mounts)?; + } + + if !edits.extra_env.is_empty() { + merge_process_env_strings(obj, &edits.extra_env)?; + } + Ok(()) } @@ -906,6 +1122,33 @@ fn append_mounts(obj: &mut Map, mounts_to_add: &[MountEdit]) -> R Ok(()) } +fn append_extra_mounts( + obj: &mut Map, + mounts_to_add: &[ExtraMountEdit], +) -> Result<()> { + let mounts = ensure_array_field(obj, "mounts")?; + + for mount in mounts_to_add { + let mut out = Map::new(); + out.insert( + "destination".to_string(), + Value::String(mount.destination.display().to_string()), + ); + out.insert("type".to_string(), Value::String(mount.mount_type.clone())); + out.insert( + "source".to_string(), + Value::String(mount.source.display().to_string()), + ); + out.insert( + "options".to_string(), + Value::Array(mount.options.iter().cloned().map(Value::String).collect()), + ); + mounts.push(Value::Object(out)); + } + + Ok(()) +} + fn merge_ld_library_path(obj: &mut Map, dirs: &[PathBuf]) -> Result<()> { let dirs_as_strings = dirs .iter() @@ -961,6 +1204,56 @@ fn merge_ld_library_path(obj: &mut Map, dirs: &[PathBuf]) -> Resu Ok(()) } +fn validate_env_strings(entries: &[String]) -> Result<()> { + for entry in entries { + validate_kv_format(entry)?; + } + + Ok(()) +} + +fn validate_kv_format(entry: &str) -> Result<()> { + if let Some((key, _)) = entry.split_once('=') { + if key.is_empty() { + return Err(Error::message("empty environment variable name before '='")); + } + Ok(()) + } else { + Err(Error::message(format!( + "invalid env entry (expected KEY=VALUE): {entry}" + ))) + } +} + +fn merge_process_env_strings(obj: &mut Map, env_entries: &[String]) -> Result<()> { + let process_val = obj + .entry("process".to_string()) + .or_insert_with(|| json!({})); + let process_obj = process_val + .as_object_mut() + .ok_or_else(|| Error::message("validation error: 'process' exists but is not an object"))?; + let env_arr = ensure_array_field(process_obj, "env")?; + + for new in env_entries { + let (new_key, _) = new + .split_once('=') + .expect("environment entries must be validated before merging"); + + if let Some(idx) = env_arr.iter().rposition(|value| { + value + .as_str() + .and_then(|entry| entry.split_once('=').map(|(key, _)| key)) + .is_some_and(|key| key == new_key) + }) { + env_arr[idx] = Value::String(new.clone()); + } else { + env_arr.push(Value::String(new.clone())); + } + } + + Ok(()) +} + fn ensure_array_field<'a>( obj: &'a mut Map, field: &str, @@ -1013,6 +1306,8 @@ mod tests { primary_libs: vec![Library::parse_host(&host_file).unwrap()], dependency_libs: Vec::new(), extra_files: Vec::new(), + extra_mounts: Vec::new(), + extra_env: Vec::new(), _compatibility_policy: CompatibilityPolicy::Full, }; let container_libs = vec![Library::parse_host("/usr/lib/libmpi.so.13.4").unwrap()]; @@ -1062,6 +1357,8 @@ mod tests { primary_libs: vec![Library::parse_host(&host_file).unwrap()], dependency_libs: Vec::new(), extra_files: Vec::new(), + extra_mounts: Vec::new(), + extra_env: Vec::new(), _compatibility_policy: CompatibilityPolicy::Major, }; let container_libs = vec![ @@ -1112,6 +1409,8 @@ mod tests { primary_libs: vec![Library::parse_host(&host_file).unwrap()], dependency_libs: Vec::new(), extra_files: Vec::new(), + extra_mounts: Vec::new(), + extra_env: Vec::new(), _compatibility_policy: CompatibilityPolicy::Major, }; let container_libs = vec![ @@ -1158,6 +1457,8 @@ mod tests { primary_libs: vec![Library::parse_host(&primary).unwrap()], dependency_libs: vec![Library::parse_host(&dependency).unwrap()], extra_files: Vec::new(), + extra_mounts: Vec::new(), + extra_env: Vec::new(), _compatibility_policy: CompatibilityPolicy::Full, }; let container_libs = vec![ @@ -1213,6 +1514,18 @@ mod tests { destination: PathBuf::from("/lib/libmpi.so.12"), }], ld_library_path_dirs: vec![PathBuf::from("/lib")], + extra_mounts: vec![ExtraMountEdit { + source: PathBuf::from("/var/spool/slurmd"), + destination: PathBuf::from("/var/spool/slurmd"), + mount_type: "bind".to_string(), + options: vec![ + "bind".to_string(), + "rw".to_string(), + "nosuid".to_string(), + "nodev".to_string(), + ], + }], + extra_env: vec!["MPIR_CVAR_CH4_OFI_MULTI_NIC_STRIPING_THRESHOLD=100000000".to_string()], warnings: Vec::new(), }; @@ -1221,10 +1534,18 @@ mod tests { assert!(env .iter() .any(|value| value == "LD_LIBRARY_PATH=/lib:/usr/lib64")); + assert!(env + .iter() + .any(|value| { value == "MPIR_CVAR_CH4_OFI_MULTI_NIC_STRIPING_THRESHOLD=100000000" })); let mounts = config["mounts"].as_array().unwrap(); - assert_eq!(mounts.len(), 1); + assert_eq!(mounts.len(), 2); assert_eq!(mounts[0]["type"], "bind"); assert_eq!(mounts[0]["destination"], "/lib/libmpi.so.12"); + assert_eq!(mounts[1]["destination"], "/var/spool/slurmd"); + assert_eq!( + mounts[1]["options"], + serde_json::json!(["bind", "rw", "nosuid", "nodev"]) + ); } #[test] @@ -1250,6 +1571,8 @@ mod tests { primary_libs: vec![Library::parse_host(&host_file).unwrap()], dependency_libs: Vec::new(), extra_files: Vec::new(), + extra_mounts: Vec::new(), + extra_env: Vec::new(), _compatibility_policy: CompatibilityPolicy::Major, }; @@ -1281,6 +1604,8 @@ mod tests { primary_libs: vec![Library::parse_host(&primary).unwrap()], dependency_libs: Vec::new(), extra_files: vec![extra.clone()], + extra_mounts: Vec::new(), + extra_env: Vec::new(), _compatibility_policy: CompatibilityPolicy::Major, }; let container_libs = vec![Library::parse_host("/usr/lib/libmpi.so.12.1").unwrap()]; @@ -1298,4 +1623,59 @@ mod tests { .to_string() .contains("requires an absolute OCI root.path")); } + + #[test] + fn parse_optional_env_specs_accepts_semicolon_separated_entries() { + std::env::set_var( + "INJECTION_EXTRA_ENV", + "FOO=bar;MPIR_CVAR_CH4_OFI_MULTI_NIC_STRIPING_THRESHOLD=100000000", + ); + + let entries = parse_optional_env_specs("INJECTION_EXTRA_ENV").unwrap(); + assert_eq!( + entries, + vec![ + "FOO=bar".to_string(), + "MPIR_CVAR_CH4_OFI_MULTI_NIC_STRIPING_THRESHOLD=100000000".to_string() + ] + ); + + std::env::remove_var("INJECTION_EXTRA_ENV"); + } + + #[test] + fn parse_optional_mount_specs_normalizes_bind_mounts() { + let temp_root = unique_temp_path("extra-mount-spec"); + let mount_source = temp_root.join("var/spool/slurmd"); + fs::create_dir_all(&mount_source).unwrap(); + + std::env::set_var( + "INJECTION_EXTRA_MOUNTS", + format!( + "{}:/var/spool/slurmd:none:x-create=dir,bind,rw,nosuid,noexec,nodev,private", + mount_source.display() + ), + ); + + let mounts = parse_optional_mount_specs("INJECTION_EXTRA_MOUNTS").unwrap(); + assert_eq!( + mounts, + vec![ExtraMountEdit { + source: mount_source.clone(), + destination: PathBuf::from("/var/spool/slurmd"), + mount_type: "bind".to_string(), + options: vec![ + "bind".to_string(), + "rw".to_string(), + "nosuid".to_string(), + "noexec".to_string(), + "nodev".to_string(), + "private".to_string(), + ], + }] + ); + + std::env::remove_var("INJECTION_EXTRA_MOUNTS"); + fs::remove_dir_all(&temp_root).unwrap(); + } } From 26f1c1f6b71d9b82541c39a8f351b264ec977587 Mon Sep 17 00:00:00 2001 From: felipecr Date: Tue, 28 Apr 2026 17:57:39 +0200 Subject: [PATCH 11/15] test covers new options --- test/pc-injection-podman.bats | 105 ++++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) diff --git a/test/pc-injection-podman.bats b/test/pc-injection-podman.bats index 5cc352d..3e0380b 100644 --- a/test/pc-injection-podman.bats +++ b/test/pc-injection-podman.bats @@ -44,6 +44,51 @@ EOF printf '%s\n' "$hooks_dir" } +make_pc_injection_hook_dir_with_extras() { + local primary_lib="$1" + local dependency_lib="$2" + local ldconfig_path="$3" + local extra_mounts="$4" + local extra_env="$5" + local hooks_dir + hooks_dir="$(mktemp -d)" + + local repo bin + repo="$(git rev-parse --show-toplevel 2>/dev/null || pwd)" + bin="$repo/target/release/pc_injection_hook" + + if [[ ! -x "$bin" ]]; then + echo "pc_injection_hook binary not found at $bin." >&2 + rm -rf "$hooks_dir" + return 1 + fi + + cat >"$hooks_dir/pc-injection.json" </dev/null + + run command -v ldconfig + assert_success + ldconfig_path="$output" + + run bash -lc ' + while read -r line; do + case "$line" in + *"libz.so.1 "*) + set -- $line + printf "%s\n" "${!#}" + exit 0 + ;; + esac + done < <(ldconfig -p) + exit 1 + ' + assert_success + assert_output --partial "/" + primary_lib="$output" + + workdir="$(mktemp -d)" + extra_mount_src="$workdir/slurmd" + mkdir -p "$extra_mount_src" + printf 'from-host\n' >"$extra_mount_src/marker" + + extra_mounts="$extra_mount_src:/var/spool/slurmd:bind:bind,rw,nosuid,noexec,nodev,private" + extra_env="MPIR_CVAR_CH4_OFI_MULTI_NIC_STRIPING_THRESHOLD=100000000" + + hooks_dir="$( + make_pc_injection_hook_dir_with_extras \ + "$primary_lib" \ + "$primary_lib" \ + "$ldconfig_path" \ + "$extra_mounts" \ + "$extra_env" + )" + [ -n "$hooks_dir" ] + + run podman --hooks-dir="$hooks_dir" run --rm \ + --annotation pc-injection.enable=true \ + "$IMAGE" bash -lc ' + [ "$MPIR_CVAR_CH4_OFI_MULTI_NIC_STRIPING_THRESHOLD" = "100000000" ] && + [ -f /var/spool/slurmd/marker ] + ' + + { + printf '%s\n' "$output" + printf '%s\n' "$stderr" + } >&3 + + assert_success + + rm -rf "$workdir" "$hooks_dir" +} From 503ba4e5872f02ee650ce64896dea4a6711d8445 Mon Sep 17 00:00:00 2001 From: felipecr Date: Tue, 28 Apr 2026 18:05:43 +0200 Subject: [PATCH 12/15] test helper handles deps as options, cleaner --- test/pc-injection-podman.bats | 52 +++++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/test/pc-injection-podman.bats b/test/pc-injection-podman.bats index 3e0380b..a5058b4 100644 --- a/test/pc-injection-podman.bats +++ b/test/pc-injection-podman.bats @@ -20,7 +20,8 @@ make_pc_injection_hook_dir() { return 1 fi - cat >"$hooks_dir/pc-injection.json" <"$hooks_dir/pc-injection.json" <"$hooks_dir/pc-injection.json" <"$hooks_dir/pc-injection.json" <"$hooks_dir/pc-injection.json" <"$hooks_dir/pc-injection.json" < Date: Tue, 28 Apr 2026 18:51:38 +0200 Subject: [PATCH 13/15] minor detail tweak to readme --- crates/pc_injection_hook/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/pc_injection_hook/README.md b/crates/pc_injection_hook/README.md index c579100..d20165f 100644 --- a/crates/pc_injection_hook/README.md +++ b/crates/pc_injection_hook/README.md @@ -20,8 +20,6 @@ to add bind mounts to inject host libs. ## Notes -* This hook pairs with `ldcache_hook` when it overwrites existing library paths that are - already present in the container cache! * When the plan introduces new lib injection paths, the hook also updates `LD_LIBRARY_PATH` because a prestart `ldconfig -r ` run does not see runtime-only bind mounts. New lib injection are exposed through a host-side staging directory mounted at `/run/pc-injection/`. From 68744819f81ce6c06bafa0b23e9a381f168617c7 Mon Sep 17 00:00:00 2001 From: felipecr Date: Tue, 28 Apr 2026 19:03:50 +0200 Subject: [PATCH 14/15] tweak to readme --- crates/pc_injection_hook/README.md | 35 +++++++++++++++++------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/crates/pc_injection_hook/README.md b/crates/pc_injection_hook/README.md index d20165f..f32f87c 100644 --- a/crates/pc_injection_hook/README.md +++ b/crates/pc_injection_hook/README.md @@ -1,22 +1,27 @@ -# pc_injection_hook +# Precreate Injection Hook Precreate hook that plans library injection from the container rootfs and rewrites the OCI config to add bind mounts to inject host libs. -## What it does - -* Reads the OCI runtime config from `stdin`. -* Finds the container rootfs from `root.path`. -* Discovers container libraries with `ldconfig -r -p`. -* Plans bind mounts for: - * primary libraries from `INJECTION_PRIMARY_LIBS` - * optional dependency libraries from `INJECTION_DEPENDENCY_LIBS` - * optional extra files from `INJECTION_EXTRA_FILES` - * optional extra OCI mounts from `INJECTION_EXTRA_MOUNTS` -* Appends the required mounts to the OCI config and writes the updated JSON to `stdout`. -* Adds `LD_LIBRARY_PATH` when the plan injects a runtime library directory instead of - overwriting an existing container library path. -* Merges optional environment variables from `INJECTION_EXTRA_ENV` into `process.env`. +## Architecture Overview + +This hook is architected as a small compiler for OCI specs. + +Its lifecycle in main.rs is a five-stage pipeline: + +* Read the incoming OCI config JSON from stdin. +* Load hook inputs from the config plus environment variables. +* Discover what libraries the container already exposes. +* Plan a set of safe config edits. +* Apply those edits and emit a rewritten OCI config to stdout. + +The core data model is: +* HookInputs is the input contract +* Library keeps the semantic unit of logic: path, parsed linker name, real name, and ABI version +* ConfigEdits is the planned output: mounts, LD\_LIBRARY\_PATH additions, extra mounts, extra env, and warnings + +For each input library, the planning layer makes one decision: overwrite an existing container library path, or inject through a directory and extend LD\_LIBRARY\_PATH +Always deciding replacement if ABI mayor is respected, otherwise it does directory placement. ## Notes From e0f4181d06c0ebb8c7ac93bf2c47ecd245d4450d Mon Sep 17 00:00:00 2001 From: felipecr Date: Wed, 29 Apr 2026 23:35:42 +0200 Subject: [PATCH 15/15] fix primary lib contraint & real lib injection --- crates/pc_injection_hook/src/main.rs | 162 +++++++++++++++++++++------ test/pc-injection-podman.bats | 66 ++++++++++- 2 files changed, 192 insertions(+), 36 deletions(-) diff --git a/crates/pc_injection_hook/src/main.rs b/crates/pc_injection_hook/src/main.rs index 948e39b..46bfd8d 100644 --- a/crates/pc_injection_hook/src/main.rs +++ b/crates/pc_injection_hook/src/main.rs @@ -584,17 +584,6 @@ fn plan_config_edits(inputs: &HookInputs, container_libs: &[Library]) -> Result< let mut warnings = Vec::new(); let mut ld_library_path_dirs = Vec::new(); - // at least 1 library to inject needs to exist in container - if !inputs - .primary_libs - .iter() - .any(|lib| container_index.contains_key(lib.linker_name())) - { - return Err(Error::message( - "failed to activate library injection: no primary libraries found in the container linker cache", - )); - } - // check injection has major ABI for lib in &inputs.primary_libs { validate_regular_source_file(lib.path(), "primary library")?; @@ -783,28 +772,76 @@ fn fallback_mount_decision( _dir: &Path, warnings: Vec, ) -> Result { - let names = fallback_link_names(host.file_name())?; - let source = create_fallback_staging_dir(host.path(), &names)?; + let fallback = plan_fallback_staging(host.path(), host.file_name())?; let destination = PathBuf::from("/run/pc-injection").join(host.file_name()); let ld_library_path_dir = Some(destination.clone()); Ok(MountDecision { - mounts: vec![MountEdit { - source, - destination, - }], + mounts: vec![ + MountEdit { + source: fallback.staging_dir, + destination: destination.clone(), + }, + MountEdit { + source: fallback.real_source, + destination: destination.join(fallback.real_file_name), + }, + ], ld_library_path_dir, warnings, }) } -fn create_fallback_staging_dir(source: &Path, names: &[String]) -> Result { +#[derive(Clone, Debug, Eq, PartialEq)] +struct FallbackStaging { + staging_dir: PathBuf, + real_source: PathBuf, + real_file_name: String, +} + +fn plan_fallback_staging(source: &Path, requested_name: &str) -> Result { + let real_source = canonical_library_source(source)?; + let real_file_name = file_name_to_string(&real_source)?; + let alias_names = fallback_alias_names(requested_name, &real_file_name)?; + let staging_dir = create_fallback_staging_dir(&alias_names, &real_file_name)?; + + Ok(FallbackStaging { + staging_dir, + real_source, + real_file_name, + }) +} + +fn canonical_library_source(source: &Path) -> Result { + let metadata = fs::symlink_metadata(source).map_err(|e| { + Error::io( + format!("failed to stat library source {}", source.display()), + e, + ) + })?; + + if metadata.file_type().is_symlink() { + fs::canonicalize(source).map_err(|e| { + Error::io( + format!( + "failed to resolve canonical library source {}", + source.display() + ), + e, + ) + }) + } else { + Ok(source.to_path_buf()) + } +} + +fn create_fallback_staging_dir(alias_names: &[String], real_file_name: &str) -> Result { let staging_dir = unique_temp_path("staging"); fs::create_dir_all(&staging_dir) .map_err(|e| Error::io(format!("failed to create {}", staging_dir.display()), e))?; - for name in names { + for name in alias_names { let link = staging_dir.join(name); - symlink(source, &link).map_err(|e| { + symlink(real_file_name, &link).map_err(|e| { Error::io( format!("failed to create fallback symlink {}", link.display()), e, @@ -815,24 +852,44 @@ fn create_fallback_staging_dir(source: &Path, names: &[String]) -> Result Result> { - let chain = Library::link_chain_names(file_name)?; - if chain.len() <= 1 { - return Ok(chain); - } - +fn fallback_alias_names(requested_name: &str, real_file_name: &str) -> Result> { let mut names = Vec::new(); - if let Some(soname) = chain.get(1) { - names.push(soname.clone()); - } - if let Some(real_name) = chain.last() { - if names.last() != Some(real_name) { - names.push(real_name.clone()); + let mut seen = HashSet::new(); + + for chain in [ + fallback_link_names(requested_name)?, + fallback_link_names(real_file_name)?, + ] { + for name in chain { + if name != real_file_name && seen.insert(name.clone()) { + names.push(name); + } } } + Ok(names) } +fn fallback_link_names(file_name: &str) -> Result> { + let chain = Library::link_chain_names(file_name)?; + match chain.len() { + 0 => Ok(Vec::new()), + 1 => Ok(chain), + _ => { + let mut names = Vec::new(); + if let Some(soname) = chain.get(1) { + names.push(soname.clone()); + } + if let Some(real_name) = chain.last() { + if names.last() != Some(real_name) { + names.push(real_name.clone()); + } + } + Ok(names) + } + } +} + fn index_container_libraries(container_libs: &[Library]) -> HashMap> { let mut index = HashMap::>::new(); for lib in container_libs { @@ -1313,13 +1370,18 @@ mod tests { let container_libs = vec![Library::parse_host("/usr/lib/libmpi.so.13.4").unwrap()]; let edits = plan_config_edits(&inputs, &container_libs).unwrap(); - assert_eq!(edits.mounts.len(), 1); + assert_eq!(edits.mounts.len(), 2); assert_eq!( edits.mounts[0].destination, PathBuf::from("/run/pc-injection/libmpi.so.12.2") ); + assert_eq!(edits.mounts[1].source, host_file); + assert_eq!( + edits.mounts[1].destination, + PathBuf::from("/run/pc-injection/libmpi.so.12.2/libmpi.so.12.2") + ); assert!(fs::symlink_metadata(edits.mounts[0].source.join("libmpi.so.12")).is_ok()); - assert!(fs::symlink_metadata(edits.mounts[0].source.join("libmpi.so.12.2")).is_ok()); + assert!(fs::symlink_metadata(edits.mounts[0].source.join("libmpi.so.12.2")).is_err()); assert_eq!( edits.ld_library_path_dirs, vec![PathBuf::from("/run/pc-injection/libmpi.so.12.2")] @@ -1480,6 +1542,12 @@ mod tests { source: edits.mounts[1].source.clone(), destination: PathBuf::from("/run/pc-injection/libhwloc.so.15.2"), }, + MountEdit { + source: dependency.clone(), + destination: PathBuf::from( + "/run/pc-injection/libhwloc.so.15.2/libhwloc.so.15.2" + ), + }, ] ); assert_eq!( @@ -1494,7 +1562,31 @@ mod tests { )] ); assert!(fs::symlink_metadata(edits.mounts[1].source.join("libhwloc.so.15")).is_ok()); - assert!(fs::symlink_metadata(edits.mounts[1].source.join("libhwloc.so.15.2")).is_ok()); + assert!(fs::symlink_metadata(edits.mounts[1].source.join("libhwloc.so.15.2")).is_err()); + + fs::remove_dir_all(&temp_root).unwrap(); + } + + #[test] + fn symlink_dependency_stages_real_file_and_relative_alias() { + let temp_root = unique_temp_path("symlink-dependency"); + let primary = temp_root.join("host/libmpi.so.12.5"); + let dependency_real = temp_root.join("host/libcxi.so.1.5.0"); + let dependency_link = temp_root.join("host/libcxi.so.1"); + + fs::create_dir_all(primary.parent().unwrap()).unwrap(); + fs::write(&primary, b"payload").unwrap(); + fs::write(&dependency_real, b"payload").unwrap(); + symlink("libcxi.so.1.5.0", &dependency_link).unwrap(); + + let fallback = plan_fallback_staging(&dependency_link, "libcxi.so.1").unwrap(); + assert_eq!(fallback.real_source, dependency_real); + assert_eq!(fallback.real_file_name, "libcxi.so.1.5.0"); + assert_eq!( + fs::read_link(fallback.staging_dir.join("libcxi.so.1")).unwrap(), + PathBuf::from("libcxi.so.1.5.0") + ); + assert!(fs::symlink_metadata(fallback.staging_dir.join("libcxi.so.1.5.0")).is_err()); fs::remove_dir_all(&temp_root).unwrap(); } diff --git a/test/pc-injection-podman.bats b/test/pc-injection-podman.bats index a5058b4..d1bd726 100644 --- a/test/pc-injection-podman.bats +++ b/test/pc-injection-podman.bats @@ -192,7 +192,7 @@ EOF printf "LD_LIBRARY_PATH=%s\n" "${LD_LIBRARY_PATH:-}" test -d /run/pc-injection/libpcitest.so.1.0.0 test -L /run/pc-injection/libpcitest.so.1.0.0/libpcitest.so.1 - test -L /run/pc-injection/libpcitest.so.1.0.0/libpcitest.so.1.0.0 + test -f /run/pc-injection/libpcitest.so.1.0.0/libpcitest.so.1.0.0 ' { @@ -206,6 +206,70 @@ EOF rm -rf "$workdir" "$hooks_dir" } +@test "pc_injection_hook stages symlinked dependency as real file plus alias in Podman" { + : "${IMAGE:=ubuntu:24.04}" + + podman pull "$IMAGE" >/dev/null + + run command -v gcc + assert_success + gcc_path="$output" + + run command -v ldconfig + assert_success + ldconfig_path="$output" + + run bash -lc ' + while read -r line; do + case "$line" in + *"libz.so.1 "*) + set -- $line + printf "%s\n" "${!#}" + exit 0 + ;; + esac + done < <(ldconfig -p) + exit 1 + ' + assert_success + assert_output --partial "/" + primary_lib="$output" + + workdir="$(mktemp -d)" + src="$workdir/libpcisymlink.c" + dependency_real="$workdir/libpcisymlink.so.1.0.0" + dependency_link="$workdir/libpcisymlink.so.1" + + cat >"$src" <<'EOF' +int pcisymlink_value(void) { return 42; } +EOF + + run "$gcc_path" -shared -fPIC -Wl,-soname,libpcisymlink.so.1 -o "$dependency_real" "$src" + assert_success + ln -s "$(basename "$dependency_real")" "$dependency_link" + + hooks_dir="$(make_pc_injection_hook_dir "$primary_lib" "$dependency_link" "$ldconfig_path")" + [ -n "$hooks_dir" ] + + run podman --hooks-dir="$hooks_dir" run --rm \ + --annotation pc-injection.enable=true \ + "$IMAGE" bash -lc ' + test "$LD_LIBRARY_PATH" = "/run/pc-injection/libpcisymlink.so.1" && + test -L /run/pc-injection/libpcisymlink.so.1/libpcisymlink.so.1 && + test -f /run/pc-injection/libpcisymlink.so.1/libpcisymlink.so.1.0.0 && + test "$(readlink /run/pc-injection/libpcisymlink.so.1/libpcisymlink.so.1)" = "libpcisymlink.so.1.0.0" + ' + + { + printf '%s\n' "$output" + printf '%s\n' "$stderr" + } >&3 + + assert_success + + rm -rf "$workdir" "$hooks_dir" +} + @test "pc_injection_hook adds extra env and mount in Podman" { : "${IMAGE:=ubuntu:24.04}"