From 748d061ec48fd93fb99b9a50c516546492d38d55 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:21:56 +0700 Subject: [PATCH 01/37] chore(backup): scaffold nexus-backup crate Co-Authored-By: Claude Sonnet 4.6 --- Cargo.toml | 1 + crates/nexus-backup/Cargo.toml | 22 ++++++++++++++++++++++ crates/nexus-backup/src/lib.rs | 13 +++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 crates/nexus-backup/Cargo.toml create mode 100644 crates/nexus-backup/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 7c8568c..51622fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ members = [ "apps/agent", "apps/guest-agent", "apps/manager", "apps/installer", +"crates/nexus-backup", "crates/nexus-storage", "crates/nexus-types", ] diff --git a/crates/nexus-backup/Cargo.toml b/crates/nexus-backup/Cargo.toml new file mode 100644 index 0000000..697fc1a --- /dev/null +++ b/crates/nexus-backup/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "nexus-backup" +version = "0.1.0" +edition = "2021" + +[dependencies] +blake3 = "1" +chacha20poly1305 = { version = "0.10", features = ["alloc"] } +fastcdc = "3" +zstd = "0.13" +bincode = "1" +serde = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } +uuid = { workspace = true } +hex = "0.4" +rand = "0.8" +async-trait = "0.1" + +[dev-dependencies] +tokio = { workspace = true } +proptest = "1" diff --git a/crates/nexus-backup/src/lib.rs b/crates/nexus-backup/src/lib.rs new file mode 100644 index 0000000..d42c365 --- /dev/null +++ b/crates/nexus-backup/src/lib.rs @@ -0,0 +1,13 @@ +//! Pure-Rust backup transforms: FastCDC chunking, BLAKE3 hashing, +//! XChaCha20-Poly1305 convergent encryption, manifest serialization. +//! No I/O. Both manager and agent depend on this crate. + +pub mod chunker; +pub mod cipher; +pub mod error; +pub mod manifest; + +pub use chunker::{Chunk, Chunker, ChunkerParams}; +pub use cipher::{decrypt_chunk, decrypt_manifest, encrypt_chunk, encrypt_manifest, ChunkKey}; +pub use error::BackupError; +pub use manifest::{chunk_object_key, manifest_object_key, ChunkRef, Manifest, MANIFEST_VERSION}; From b0778bc91d36ed47df93a302b87f1c4a0865645a Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:22:12 +0700 Subject: [PATCH 02/37] feat(backup): add BackupError Co-Authored-By: Claude Sonnet 4.6 --- crates/nexus-backup/src/error.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 crates/nexus-backup/src/error.rs diff --git a/crates/nexus-backup/src/error.rs b/crates/nexus-backup/src/error.rs new file mode 100644 index 0000000..2704866 --- /dev/null +++ b/crates/nexus-backup/src/error.rs @@ -0,0 +1,25 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum BackupError { + #[error("chunker: {0}")] + Chunker(String), + + #[error("cipher: {0}")] + Cipher(String), + + #[error("manifest: {0}")] + Manifest(String), + + #[error("authentication failed (Poly1305 MAC mismatch)")] + AuthFailed, + + #[error("manifest version mismatch: got {got}, expected {expected}")] + ManifestVersion { got: u32, expected: u32 }, + + #[error("io: {0}")] + Io(#[from] std::io::Error), + + #[error("other: {0}")] + Other(#[source] Box), +} From 9516b04465aa51a217e6ac54d7791ef714681c04 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:23:50 +0700 Subject: [PATCH 03/37] feat(backup): XChaCha20-Poly1305 convergent encryption Co-Authored-By: Claude Sonnet 4.6 --- crates/nexus-backup/src/cipher.rs | 140 ++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 crates/nexus-backup/src/cipher.rs diff --git a/crates/nexus-backup/src/cipher.rs b/crates/nexus-backup/src/cipher.rs new file mode 100644 index 0000000..b150109 --- /dev/null +++ b/crates/nexus-backup/src/cipher.rs @@ -0,0 +1,140 @@ +use chacha20poly1305::{aead::Aead, KeyInit, XChaCha20Poly1305, XNonce}; + +use crate::error::BackupError; + +/// 32-byte XChaCha20-Poly1305 key. Per-target. Manager generates it, +/// encrypts with envelope key for storage, sends in-memory to the agent +/// during backup/restore RPC. +pub struct ChunkKey([u8; 32]); + +impl ChunkKey { + pub fn from_bytes(bytes: [u8; 32]) -> Self { Self(bytes) } + pub fn as_bytes(&self) -> &[u8; 32] { &self.0 } +} + +impl Drop for ChunkKey { + fn drop(&mut self) { + for b in &mut self.0 { + unsafe { std::ptr::write_volatile(b, 0); } + } + } +} + +/// Convergent encryption: nonce derived from BLAKE3(plaintext) so identical +/// plaintexts encrypt to identical ciphertexts under the same key. Returns +/// the ciphertext (which already includes the Poly1305 tag). +pub fn encrypt_chunk(key: &ChunkKey, plaintext: &[u8]) -> Result, BackupError> { + let cipher = XChaCha20Poly1305::new(key.as_bytes().into()); + let plaintext_hash = blake3::hash(plaintext); + let nonce = XNonce::from_slice(&plaintext_hash.as_bytes()[..24]); + cipher.encrypt(nonce, plaintext) + .map_err(|e| BackupError::Cipher(format!("encrypt: {e}"))) +} + +/// Decrypt a chunk. The caller must supply the original plaintext hash +/// (recovered from the manifest) so we can reconstruct the nonce. +/// Returns the plaintext on success, AuthFailed on tag mismatch. +pub fn decrypt_chunk( + key: &ChunkKey, + ciphertext: &[u8], + plaintext_hash: &[u8; 32], +) -> Result, BackupError> { + let cipher = XChaCha20Poly1305::new(key.as_bytes().into()); + let nonce = XNonce::from_slice(&plaintext_hash[..24]); + cipher.decrypt(nonce, ciphertext) + .map_err(|_| BackupError::AuthFailed) +} + +/// Encrypt the manifest with a random nonce. Returns nonce-prepended +/// ciphertext: `[nonce(24) | ciphertext+tag]`. +pub fn encrypt_manifest(key: &ChunkKey, plaintext: &[u8]) -> Result, BackupError> { + use rand::RngCore; + let cipher = XChaCha20Poly1305::new(key.as_bytes().into()); + let mut nonce_bytes = [0u8; 24]; + rand::thread_rng().fill_bytes(&mut nonce_bytes); + let nonce = XNonce::from_slice(&nonce_bytes); + let ciphertext = cipher.encrypt(nonce, plaintext) + .map_err(|e| BackupError::Cipher(format!("encrypt manifest: {e}")))?; + let mut out = Vec::with_capacity(24 + ciphertext.len()); + out.extend_from_slice(&nonce_bytes); + out.extend_from_slice(&ciphertext); + Ok(out) +} + +/// Inverse of `encrypt_manifest`: input is `[nonce(24) | ciphertext+tag]`. +pub fn decrypt_manifest(key: &ChunkKey, blob: &[u8]) -> Result, BackupError> { + if blob.len() < 24 { + return Err(BackupError::Cipher("manifest blob too short".into())); + } + let cipher = XChaCha20Poly1305::new(key.as_bytes().into()); + let nonce = XNonce::from_slice(&blob[..24]); + cipher.decrypt(nonce, &blob[24..]) + .map_err(|_| BackupError::AuthFailed) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn key() -> ChunkKey { ChunkKey::from_bytes([0x42u8; 32]) } + + #[test] + fn convergent_chunk_round_trip() { + let k = key(); + let plain = b"hello, backup pipeline"; + let plain_hash: [u8; 32] = *blake3::hash(plain).as_bytes(); + let cipher = encrypt_chunk(&k, plain).unwrap(); + let recovered = decrypt_chunk(&k, &cipher, &plain_hash).unwrap(); + assert_eq!(recovered, plain); + } + + #[test] + fn convergent_same_plaintext_same_ciphertext() { + let k = key(); + let plain = b"identical plaintext"; + let c1 = encrypt_chunk(&k, plain).unwrap(); + let c2 = encrypt_chunk(&k, plain).unwrap(); + assert_eq!(c1, c2, "convergent encryption must be deterministic"); + } + + #[test] + fn different_plaintext_different_ciphertext() { + let k = key(); + let c1 = encrypt_chunk(&k, b"alpha").unwrap(); + let c2 = encrypt_chunk(&k, b"bravo").unwrap(); + assert_ne!(c1, c2); + } + + #[test] + fn manifest_round_trip_with_random_nonce() { + let k = key(); + let plain = b"manifest payload bytes"; + let blob1 = encrypt_manifest(&k, plain).unwrap(); + let blob2 = encrypt_manifest(&k, plain).unwrap(); + assert_ne!(blob1, blob2, "manifest nonce must be random — successive encrypts differ"); + let r1 = decrypt_manifest(&k, &blob1).unwrap(); + assert_eq!(r1, plain); + } + + #[test] + fn tampered_chunk_fails_auth() { + let k = key(); + let plain = b"sensitive content"; + let plain_hash: [u8; 32] = *blake3::hash(plain).as_bytes(); + let mut cipher = encrypt_chunk(&k, plain).unwrap(); + cipher[0] ^= 0x01; + let err = decrypt_chunk(&k, &cipher, &plain_hash).unwrap_err(); + assert!(matches!(err, BackupError::AuthFailed)); + } + + #[test] + fn wrong_key_fails_auth() { + let k1 = key(); + let k2 = ChunkKey::from_bytes([0x99u8; 32]); + let plain = b"abc"; + let plain_hash: [u8; 32] = *blake3::hash(plain).as_bytes(); + let cipher = encrypt_chunk(&k1, plain).unwrap(); + let err = decrypt_chunk(&k2, &cipher, &plain_hash).unwrap_err(); + assert!(matches!(err, BackupError::AuthFailed)); + } +} From 080ebb9e33498e69821b15ed53cda6158a334de3 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:24:00 +0700 Subject: [PATCH 04/37] feat(backup): FastCDC chunker over AsyncRead Co-Authored-By: Claude Sonnet 4.6 --- crates/nexus-backup/src/chunker.rs | 140 +++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 crates/nexus-backup/src/chunker.rs diff --git a/crates/nexus-backup/src/chunker.rs b/crates/nexus-backup/src/chunker.rs new file mode 100644 index 0000000..12629b4 --- /dev/null +++ b/crates/nexus-backup/src/chunker.rs @@ -0,0 +1,140 @@ +use crate::error::BackupError; +use tokio::io::{AsyncRead, AsyncReadExt}; + +#[derive(Debug, Clone, Copy)] +pub struct ChunkerParams { + pub min_size: u32, + pub avg_size: u32, + pub max_size: u32, +} + +impl Default for ChunkerParams { + fn default() -> Self { + Self { min_size: 4 * 1024, avg_size: 64 * 1024, max_size: 1024 * 1024 } + } +} + +pub struct Chunk { + pub plaintext_offset: u64, + pub plaintext_length: u32, + pub plaintext_bytes: Vec, +} + +pub struct Chunker { + reader: R, + params: ChunkerParams, + buf: Vec, + offset: u64, + eof: bool, +} + +impl Chunker { + pub fn new(reader: R, params: ChunkerParams) -> Self { + Self { + reader, + params, + buf: Vec::with_capacity(params.max_size as usize * 2), + offset: 0, + eof: false, + } + } + + async fn fill_until(&mut self, target: usize) -> Result<(), BackupError> { + while self.buf.len() < target && !self.eof { + let mut tmp = vec![0u8; (target - self.buf.len()).max(64 * 1024)]; + let n = self.reader.read(&mut tmp).await?; + if n == 0 { self.eof = true; break; } + tmp.truncate(n); + self.buf.extend_from_slice(&tmp); + } + Ok(()) + } + + pub async fn next_chunk(&mut self) -> Result, BackupError> { + self.fill_until(self.params.max_size as usize).await?; + if self.buf.is_empty() { return Ok(None); } + + let cdc = fastcdc::v2020::FastCDC::new( + &self.buf, + self.params.min_size, + self.params.avg_size, + self.params.max_size, + ); + let first = cdc.into_iter().next(); + let cut_at = match first { + Some(chunk_meta) => chunk_meta.length, + None => self.buf.len(), + }; + + let bytes: Vec = self.buf.drain(..cut_at).collect(); + let chunk = Chunk { + plaintext_offset: self.offset, + plaintext_length: bytes.len() as u32, + plaintext_bytes: bytes, + }; + self.offset += chunk.plaintext_length as u64; + Ok(Some(chunk)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::io::BufReader; + + fn deterministic_payload(size: usize) -> Vec { + let mut v = vec![0u8; size]; + let mut s: u64 = 0xdeadbeefu64; + for byte in v.iter_mut() { + s = s.wrapping_mul(6364136223846793005).wrapping_add(1442695040888963407); + *byte = (s >> 33) as u8; + } + v + } + + #[tokio::test] + async fn chunks_emit_in_order_and_cover_input() { + let payload = deterministic_payload(1_500_000); + let reader = BufReader::new(&payload[..]); + let mut c = Chunker::new(reader, ChunkerParams::default()); + let mut total = 0u64; + let mut last_offset: i64 = -1; + while let Some(chunk) = c.next_chunk().await.unwrap() { + assert!(chunk.plaintext_offset as i64 > last_offset); + assert_eq!(chunk.plaintext_bytes.len(), chunk.plaintext_length as usize); + assert_eq!(chunk.plaintext_offset, total); + total += chunk.plaintext_length as u64; + last_offset = chunk.plaintext_offset as i64; + } + assert_eq!(total, payload.len() as u64); + } + + #[tokio::test] + async fn deterministic_chunking_same_input() { + let payload = deterministic_payload(800_000); + let mut c1 = Chunker::new(BufReader::new(&payload[..]), ChunkerParams::default()); + let mut c2 = Chunker::new(BufReader::new(&payload[..]), ChunkerParams::default()); + + let mut h1 = Vec::new(); + let mut h2 = Vec::new(); + while let Some(chunk) = c1.next_chunk().await.unwrap() { h1.push(blake3::hash(&chunk.plaintext_bytes)); } + while let Some(chunk) = c2.next_chunk().await.unwrap() { h2.push(blake3::hash(&chunk.plaintext_bytes)); } + assert_eq!(h1, h2, "FastCDC must be deterministic for the same input"); + } + + #[tokio::test] + async fn empty_input_yields_no_chunks() { + let payload: Vec = Vec::new(); + let mut c = Chunker::new(BufReader::new(&payload[..]), ChunkerParams::default()); + assert!(c.next_chunk().await.unwrap().is_none()); + } + + #[tokio::test] + async fn small_input_yields_single_chunk() { + let payload = deterministic_payload(2 * 1024); + let mut c = Chunker::new(BufReader::new(&payload[..]), ChunkerParams::default()); + let chunk = c.next_chunk().await.unwrap().expect("one chunk"); + assert_eq!(chunk.plaintext_length as usize, payload.len()); + assert!(c.next_chunk().await.unwrap().is_none()); + } +} From 50823617a58616f4ce2c7a981b233fe1653d4746 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:24:09 +0700 Subject: [PATCH 05/37] feat(backup): Manifest + ChunkRef serialization (bincode+zstd) Co-Authored-By: Claude Sonnet 4.6 --- crates/nexus-backup/src/manifest.rs | 126 ++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 crates/nexus-backup/src/manifest.rs diff --git a/crates/nexus-backup/src/manifest.rs b/crates/nexus-backup/src/manifest.rs new file mode 100644 index 0000000..597113f --- /dev/null +++ b/crates/nexus-backup/src/manifest.rs @@ -0,0 +1,126 @@ +use crate::error::BackupError; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +pub const MANIFEST_VERSION: u32 = 1; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ChunkRef { + pub plaintext_offset: u64, + pub plaintext_length: u32, + pub plaintext_hash: [u8; 32], + pub chunk_id: [u8; 32], + pub ciphertext_length: u32, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct Manifest { + pub version: u32, + pub backup_id: Uuid, + pub source_volume_id: Uuid, + pub source_snapshot_id: Option, + pub total_plaintext_size: u64, + pub created_at_unix_seconds: i64, + pub chunks: Vec, +} + +impl Manifest { + pub fn serialize_compressed(&self) -> Result, BackupError> { + let bytes = bincode::serialize(self) + .map_err(|e| BackupError::Manifest(format!("bincode: {e}")))?; + let compressed = zstd::stream::encode_all(&bytes[..], 3) + .map_err(|e| BackupError::Manifest(format!("zstd: {e}")))?; + Ok(compressed) + } + + pub fn deserialize_compressed(blob: &[u8]) -> Result { + let bytes = zstd::stream::decode_all(blob) + .map_err(|e| BackupError::Manifest(format!("zstd decode: {e}")))?; + let manifest: Manifest = bincode::deserialize(&bytes) + .map_err(|e| BackupError::Manifest(format!("bincode decode: {e}")))?; + if manifest.version != MANIFEST_VERSION { + return Err(BackupError::ManifestVersion { got: manifest.version, expected: MANIFEST_VERSION }); + } + Ok(manifest) + } +} + +pub fn chunk_object_key(prefix: &str, chunk_id: &[u8; 32]) -> String { + let hex = hex::encode(chunk_id); + if prefix.is_empty() { + format!("chunks/{}/{}", &hex[..2], hex) + } else { + format!("{}/chunks/{}/{}", prefix.trim_end_matches('/'), &hex[..2], hex) + } +} + +pub fn manifest_object_key(prefix: &str, backup_id: &Uuid) -> String { + if prefix.is_empty() { + format!("manifests/{}.bin", backup_id) + } else { + format!("{}/manifests/{}.bin", prefix.trim_end_matches('/'), backup_id) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_manifest() -> Manifest { + Manifest { + version: MANIFEST_VERSION, + backup_id: Uuid::nil(), + source_volume_id: Uuid::nil(), + source_snapshot_id: None, + total_plaintext_size: 12345, + created_at_unix_seconds: 1735689600, + chunks: vec![ + ChunkRef { + plaintext_offset: 0, plaintext_length: 4096, + plaintext_hash: [1u8; 32], chunk_id: [2u8; 32], + ciphertext_length: 4128, + }, + ChunkRef { + plaintext_offset: 4096, plaintext_length: 8192, + plaintext_hash: [3u8; 32], chunk_id: [4u8; 32], + ciphertext_length: 8224, + }, + ], + } + } + + #[test] + fn manifest_round_trip() { + let m = sample_manifest(); + let blob = m.serialize_compressed().unwrap(); + let recovered = Manifest::deserialize_compressed(&blob).unwrap(); + assert_eq!(m, recovered); + } + + #[test] + fn manifest_version_mismatch_rejected() { + let mut m = sample_manifest(); + m.version = 999; + let blob = m.serialize_compressed().unwrap(); + let err = Manifest::deserialize_compressed(&blob).unwrap_err(); + assert!(matches!(err, BackupError::ManifestVersion { got: 999, expected: 1 })); + } + + #[test] + fn chunk_key_format() { + let mut id = [0u8; 32]; + id[0] = 0xab; + id[1] = 0xcd; + let key = chunk_object_key("", &id); + assert!(key.starts_with("chunks/ab/abcd")); + let key2 = chunk_object_key("backup-prefix/", &id); + assert!(key2.starts_with("backup-prefix/chunks/ab/abcd")); + } + + #[test] + fn manifest_key_format() { + let id = Uuid::nil(); + assert_eq!(manifest_object_key("", &id), format!("manifests/{}.bin", id)); + assert_eq!(manifest_object_key("p/", &id), format!("p/manifests/{}.bin", id)); + } +} From 0cc02e0ba3dc27e6417daf4494276b25576c66d5 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:24:39 +0700 Subject: [PATCH 06/37] chore(backup): cargo fmt nexus-backup Co-Authored-By: Claude Sonnet 4.6 --- Cargo.lock | 285 +++++++++++++++++++++++++--- crates/nexus-backup/src/chunker.rs | 27 ++- crates/nexus-backup/src/cipher.rs | 33 +++- crates/nexus-backup/src/manifest.rs | 52 +++-- 4 files changed, 341 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c5f4c69..05d02b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,7 +35,7 @@ checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", - "cpufeatures", + "cpufeatures 0.2.17", ] [[package]] @@ -191,10 +191,22 @@ checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072" dependencies = [ "base64ct", "blake2", - "cpufeatures", + "cpufeatures 0.2.17", "password-hash", ] +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -347,13 +359,22 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "cexpr", "clang-sys", "itertools 0.12.1", @@ -370,6 +391,21 @@ dependencies = [ "which", ] +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + [[package]] name = "bit_field" version = "0.10.3" @@ -384,11 +420,11 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.4" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" +checksum = "c4512299f36f043ab09a583e57bceb5a5aab7a73db1805848e8fef3c9e8c78b3" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -400,6 +436,20 @@ dependencies = [ "digest", ] +[[package]] +name = "blake3" +version = "1.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aa83c34e62843d924f905e0f5c866eb1dd6545fc4d719e803d9ba6030371fce" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", + "cpufeatures 0.3.0", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -499,6 +549,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80f41ae168f955c12fb8960b057d70d0ca153fb83182b57d86380443527be7e9" dependencies = [ "find-msvc-tools", + "jobserver", + "libc", "shlex", ] @@ -523,6 +575,30 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures 0.2.17", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher", + "poly1305", + "zeroize", +] + [[package]] name = "chrono" version = "0.4.42" @@ -545,6 +621,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] @@ -648,6 +725,12 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "constant_time_eq" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -663,6 +746,15 @@ dependencies = [ "libc", ] +[[package]] +name = "cpufeatures" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201" +dependencies = [ + "libc", +] + [[package]] name = "crc" version = "3.3.0" @@ -727,7 +819,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "crossterm_winapi", "mio", "parking_lot", @@ -791,7 +883,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "curve25519-dalek-derive", "digest", "fiat-crypto", @@ -1133,6 +1225,12 @@ dependencies = [ "zune-inflate", ] +[[package]] +name = "fastcdc" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf51ceb43e96afbfe4dd5c6f6082af5dfd60e220820b8123792d61963f2ce6bc" + [[package]] name = "fastrand" version = "2.3.0" @@ -1894,7 +1992,7 @@ version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "cfg-if", "libc", ] @@ -1954,6 +2052,16 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.3", + "libc", +] + [[package]] name = "jpeg-decoder" version = "0.3.2" @@ -2022,7 +2130,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "libc", "redox_syscall", ] @@ -2277,6 +2385,25 @@ dependencies = [ "version_check", ] +[[package]] +name = "nexus-backup" +version = "0.1.0" +dependencies = [ + "async-trait", + "bincode", + "blake3", + "chacha20poly1305", + "fastcdc", + "hex", + "proptest", + "rand 0.8.5", + "serde", + "thiserror 1.0.69", + "tokio", + "uuid", + "zstd", +] + [[package]] name = "nexus-storage" version = "0.1.0" @@ -2307,7 +2434,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "cfg-if", "cfg_aliases", "libc", @@ -2535,7 +2662,7 @@ version = "0.10.76" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "951c002c75e16ea2c65b8c7e4d3d51d5530d8dfa7d060b4776828c88cfb18ecf" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "cfg-if", "foreign-types", "libc", @@ -2729,6 +2856,17 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures 0.2.17", + "opaque-debug", + "universal-hash", +] + [[package]] name = "polyval" version = "0.6.2" @@ -2736,7 +2874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "opaque-debug", "universal-hash", ] @@ -2832,6 +2970,25 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proptest" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b45fcc2344c680f5025fe57779faef368840d0bd1f42f216291f0dc4ace4744" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.11.1", + "num-traits", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + [[package]] name = "qoi" version = "0.4.1" @@ -2841,6 +2998,12 @@ dependencies = [ "bytemuck", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quick-xml" version = "0.33.0" @@ -2980,13 +3143,22 @@ dependencies = [ "getrandom 0.3.3", ] +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + [[package]] name = "ratatui" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabd94c2f37801c20583fc49dd5cd6b0ba68c716787c2dd6ed18571e1e63117b" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "cassowary", "compact_str", "crossterm", @@ -3027,7 +3199,7 @@ version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", ] [[package]] @@ -3230,7 +3402,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "errno", "libc", "linux-raw-sys 0.4.15", @@ -3243,7 +3415,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "errno", "libc", "linux-raw-sys 0.11.0", @@ -3291,6 +3463,18 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" +[[package]] +name = "rusty-fork" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.20" @@ -3397,9 +3581,9 @@ checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "serde" -version = "1.0.226" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", "serde_derive", @@ -3417,18 +3601,18 @@ dependencies = [ [[package]] name = "serde_core" -version = "1.0.226" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.226" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -3578,7 +3762,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "digest", ] @@ -3589,7 +3773,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "digest", ] @@ -3799,7 +3983,7 @@ checksum = "aa003f0038df784eb8fecbbac13affe3da23b45194bd57dba231c8f48199c526" dependencies = [ "atoi", "base64 0.22.1", - "bitflags 2.9.4", + "bitflags 2.11.1", "byteorder", "bytes", "chrono", @@ -3843,7 +4027,7 @@ checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46" dependencies = [ "atoi", "base64 0.22.1", - "bitflags 2.9.4", + "bitflags 2.11.1", "byteorder", "chrono", "crc", @@ -4323,7 +4507,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "bytes", "http", "http-body", @@ -4339,7 +4523,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.11.1", "bytes", "futures-util", "http", @@ -4474,6 +4658,12 @@ version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" version = "2.8.1" @@ -4703,6 +4893,15 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -5345,6 +5544,34 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", +] + [[package]] name = "zune-inflate" version = "0.2.54" diff --git a/crates/nexus-backup/src/chunker.rs b/crates/nexus-backup/src/chunker.rs index 12629b4..f552ab0 100644 --- a/crates/nexus-backup/src/chunker.rs +++ b/crates/nexus-backup/src/chunker.rs @@ -10,7 +10,11 @@ pub struct ChunkerParams { impl Default for ChunkerParams { fn default() -> Self { - Self { min_size: 4 * 1024, avg_size: 64 * 1024, max_size: 1024 * 1024 } + Self { + min_size: 4 * 1024, + avg_size: 64 * 1024, + max_size: 1024 * 1024, + } } } @@ -43,7 +47,10 @@ impl Chunker { while self.buf.len() < target && !self.eof { let mut tmp = vec![0u8; (target - self.buf.len()).max(64 * 1024)]; let n = self.reader.read(&mut tmp).await?; - if n == 0 { self.eof = true; break; } + if n == 0 { + self.eof = true; + break; + } tmp.truncate(n); self.buf.extend_from_slice(&tmp); } @@ -52,7 +59,9 @@ impl Chunker { pub async fn next_chunk(&mut self) -> Result, BackupError> { self.fill_until(self.params.max_size as usize).await?; - if self.buf.is_empty() { return Ok(None); } + if self.buf.is_empty() { + return Ok(None); + } let cdc = fastcdc::v2020::FastCDC::new( &self.buf, @@ -86,7 +95,9 @@ mod tests { let mut v = vec![0u8; size]; let mut s: u64 = 0xdeadbeefu64; for byte in v.iter_mut() { - s = s.wrapping_mul(6364136223846793005).wrapping_add(1442695040888963407); + s = s + .wrapping_mul(6364136223846793005) + .wrapping_add(1442695040888963407); *byte = (s >> 33) as u8; } v @@ -117,8 +128,12 @@ mod tests { let mut h1 = Vec::new(); let mut h2 = Vec::new(); - while let Some(chunk) = c1.next_chunk().await.unwrap() { h1.push(blake3::hash(&chunk.plaintext_bytes)); } - while let Some(chunk) = c2.next_chunk().await.unwrap() { h2.push(blake3::hash(&chunk.plaintext_bytes)); } + while let Some(chunk) = c1.next_chunk().await.unwrap() { + h1.push(blake3::hash(&chunk.plaintext_bytes)); + } + while let Some(chunk) = c2.next_chunk().await.unwrap() { + h2.push(blake3::hash(&chunk.plaintext_bytes)); + } assert_eq!(h1, h2, "FastCDC must be deterministic for the same input"); } diff --git a/crates/nexus-backup/src/cipher.rs b/crates/nexus-backup/src/cipher.rs index b150109..beab088 100644 --- a/crates/nexus-backup/src/cipher.rs +++ b/crates/nexus-backup/src/cipher.rs @@ -8,14 +8,20 @@ use crate::error::BackupError; pub struct ChunkKey([u8; 32]); impl ChunkKey { - pub fn from_bytes(bytes: [u8; 32]) -> Self { Self(bytes) } - pub fn as_bytes(&self) -> &[u8; 32] { &self.0 } + pub fn from_bytes(bytes: [u8; 32]) -> Self { + Self(bytes) + } + pub fn as_bytes(&self) -> &[u8; 32] { + &self.0 + } } impl Drop for ChunkKey { fn drop(&mut self) { for b in &mut self.0 { - unsafe { std::ptr::write_volatile(b, 0); } + unsafe { + std::ptr::write_volatile(b, 0); + } } } } @@ -27,7 +33,8 @@ pub fn encrypt_chunk(key: &ChunkKey, plaintext: &[u8]) -> Result, Backup let cipher = XChaCha20Poly1305::new(key.as_bytes().into()); let plaintext_hash = blake3::hash(plaintext); let nonce = XNonce::from_slice(&plaintext_hash.as_bytes()[..24]); - cipher.encrypt(nonce, plaintext) + cipher + .encrypt(nonce, plaintext) .map_err(|e| BackupError::Cipher(format!("encrypt: {e}"))) } @@ -41,7 +48,8 @@ pub fn decrypt_chunk( ) -> Result, BackupError> { let cipher = XChaCha20Poly1305::new(key.as_bytes().into()); let nonce = XNonce::from_slice(&plaintext_hash[..24]); - cipher.decrypt(nonce, ciphertext) + cipher + .decrypt(nonce, ciphertext) .map_err(|_| BackupError::AuthFailed) } @@ -53,7 +61,8 @@ pub fn encrypt_manifest(key: &ChunkKey, plaintext: &[u8]) -> Result, Bac let mut nonce_bytes = [0u8; 24]; rand::thread_rng().fill_bytes(&mut nonce_bytes); let nonce = XNonce::from_slice(&nonce_bytes); - let ciphertext = cipher.encrypt(nonce, plaintext) + let ciphertext = cipher + .encrypt(nonce, plaintext) .map_err(|e| BackupError::Cipher(format!("encrypt manifest: {e}")))?; let mut out = Vec::with_capacity(24 + ciphertext.len()); out.extend_from_slice(&nonce_bytes); @@ -68,7 +77,8 @@ pub fn decrypt_manifest(key: &ChunkKey, blob: &[u8]) -> Result, BackupEr } let cipher = XChaCha20Poly1305::new(key.as_bytes().into()); let nonce = XNonce::from_slice(&blob[..24]); - cipher.decrypt(nonce, &blob[24..]) + cipher + .decrypt(nonce, &blob[24..]) .map_err(|_| BackupError::AuthFailed) } @@ -76,7 +86,9 @@ pub fn decrypt_manifest(key: &ChunkKey, blob: &[u8]) -> Result, BackupEr mod tests { use super::*; - fn key() -> ChunkKey { ChunkKey::from_bytes([0x42u8; 32]) } + fn key() -> ChunkKey { + ChunkKey::from_bytes([0x42u8; 32]) + } #[test] fn convergent_chunk_round_trip() { @@ -111,7 +123,10 @@ mod tests { let plain = b"manifest payload bytes"; let blob1 = encrypt_manifest(&k, plain).unwrap(); let blob2 = encrypt_manifest(&k, plain).unwrap(); - assert_ne!(blob1, blob2, "manifest nonce must be random — successive encrypts differ"); + assert_ne!( + blob1, blob2, + "manifest nonce must be random — successive encrypts differ" + ); let r1 = decrypt_manifest(&k, &blob1).unwrap(); assert_eq!(r1, plain); } diff --git a/crates/nexus-backup/src/manifest.rs b/crates/nexus-backup/src/manifest.rs index 597113f..7a1df33 100644 --- a/crates/nexus-backup/src/manifest.rs +++ b/crates/nexus-backup/src/manifest.rs @@ -26,8 +26,8 @@ pub struct Manifest { impl Manifest { pub fn serialize_compressed(&self) -> Result, BackupError> { - let bytes = bincode::serialize(self) - .map_err(|e| BackupError::Manifest(format!("bincode: {e}")))?; + let bytes = + bincode::serialize(self).map_err(|e| BackupError::Manifest(format!("bincode: {e}")))?; let compressed = zstd::stream::encode_all(&bytes[..], 3) .map_err(|e| BackupError::Manifest(format!("zstd: {e}")))?; Ok(compressed) @@ -39,7 +39,10 @@ impl Manifest { let manifest: Manifest = bincode::deserialize(&bytes) .map_err(|e| BackupError::Manifest(format!("bincode decode: {e}")))?; if manifest.version != MANIFEST_VERSION { - return Err(BackupError::ManifestVersion { got: manifest.version, expected: MANIFEST_VERSION }); + return Err(BackupError::ManifestVersion { + got: manifest.version, + expected: MANIFEST_VERSION, + }); } Ok(manifest) } @@ -50,7 +53,12 @@ pub fn chunk_object_key(prefix: &str, chunk_id: &[u8; 32]) -> String { if prefix.is_empty() { format!("chunks/{}/{}", &hex[..2], hex) } else { - format!("{}/chunks/{}/{}", prefix.trim_end_matches('/'), &hex[..2], hex) + format!( + "{}/chunks/{}/{}", + prefix.trim_end_matches('/'), + &hex[..2], + hex + ) } } @@ -58,7 +66,11 @@ pub fn manifest_object_key(prefix: &str, backup_id: &Uuid) -> String { if prefix.is_empty() { format!("manifests/{}.bin", backup_id) } else { - format!("{}/manifests/{}.bin", prefix.trim_end_matches('/'), backup_id) + format!( + "{}/manifests/{}.bin", + prefix.trim_end_matches('/'), + backup_id + ) } } @@ -76,13 +88,17 @@ mod tests { created_at_unix_seconds: 1735689600, chunks: vec![ ChunkRef { - plaintext_offset: 0, plaintext_length: 4096, - plaintext_hash: [1u8; 32], chunk_id: [2u8; 32], + plaintext_offset: 0, + plaintext_length: 4096, + plaintext_hash: [1u8; 32], + chunk_id: [2u8; 32], ciphertext_length: 4128, }, ChunkRef { - plaintext_offset: 4096, plaintext_length: 8192, - plaintext_hash: [3u8; 32], chunk_id: [4u8; 32], + plaintext_offset: 4096, + plaintext_length: 8192, + plaintext_hash: [3u8; 32], + chunk_id: [4u8; 32], ciphertext_length: 8224, }, ], @@ -103,7 +119,13 @@ mod tests { m.version = 999; let blob = m.serialize_compressed().unwrap(); let err = Manifest::deserialize_compressed(&blob).unwrap_err(); - assert!(matches!(err, BackupError::ManifestVersion { got: 999, expected: 1 })); + assert!(matches!( + err, + BackupError::ManifestVersion { + got: 999, + expected: 1 + } + )); } #[test] @@ -120,7 +142,13 @@ mod tests { #[test] fn manifest_key_format() { let id = Uuid::nil(); - assert_eq!(manifest_object_key("", &id), format!("manifests/{}.bin", id)); - assert_eq!(manifest_object_key("p/", &id), format!("p/manifests/{}.bin", id)); + assert_eq!( + manifest_object_key("", &id), + format!("manifests/{}.bin", id) + ); + assert_eq!( + manifest_object_key("p/", &id), + format!("p/manifests/{}.bin", id) + ); } } From aa4ba356d0734bd277e277965091cfde28c4268f Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:28:52 +0700 Subject: [PATCH 07/37] feat(backup): add HostBackend::read_snapshot trait method (impls follow) Extends the HostBackend trait with read_snapshot(&VolumeSnapshotHandle) returning Box, for use by the backup pipeline to stream snapshot bytes. Adds tokio to nexus-storage [dependencies] (was dev-only) so the trait can name tokio::io::AsyncRead. Co-Authored-By: Claude Sonnet 4.6 --- crates/nexus-storage/Cargo.toml | 1 + crates/nexus-storage/src/host.rs | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/crates/nexus-storage/Cargo.toml b/crates/nexus-storage/Cargo.toml index 73e14eb..41828dc 100644 --- a/crates/nexus-storage/Cargo.toml +++ b/crates/nexus-storage/Cargo.toml @@ -10,6 +10,7 @@ thiserror = { workspace = true } uuid = { workspace = true } chrono = { workspace = true } async-trait = "0.1" +tokio = { workspace = true } [dev-dependencies] tokio = { workspace = true } diff --git a/crates/nexus-storage/src/host.rs b/crates/nexus-storage/src/host.rs index 92ad9fa..fa2abc9 100644 --- a/crates/nexus-storage/src/host.rs +++ b/crates/nexus-storage/src/host.rs @@ -1,5 +1,5 @@ use crate::error::StorageError; -use crate::handle::{AttachedPath, VolumeHandle}; +use crate::handle::{AttachedPath, VolumeHandle, VolumeSnapshotHandle}; use crate::types::BackendKind; use async_trait::async_trait; use std::path::Path; @@ -33,4 +33,19 @@ pub trait HostBackend: Send + Sync { source: &Path, target_size_bytes: u64, ) -> Result<(), StorageError>; + + /// Open a snapshot for reading. Returns a stream of bytes representing + /// the volume contents at snapshot time. Used by the backup pipeline. + /// + /// Implementations: + /// - LocalFile: open the snapshot file from disk. + /// - Iscsi/TrueNasIscsi: attach the snapshot LUN read-only and return + /// a File handle over the block device. + /// + /// Returns `StorageError::NotSupported("read_snapshot")` if the backend + /// can't expose a snapshot for streaming reads. + async fn read_snapshot( + &self, + snap: &VolumeSnapshotHandle, + ) -> Result, StorageError>; } From fc368a3f6e46537f150ce4c4a11861d508c02bbd Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:30:31 +0700 Subject: [PATCH 08/37] feat(backup): LocalFileHostBackend::read_snapshot opens snapshot file Implements HostBackend::read_snapshot for LocalFileHostBackend: treats the snapshot locator as a file path and opens it with tokio::fs::File. Adds a tokio test verifying round-trip file contents via AsyncReadExt. Co-Authored-By: Claude Sonnet 4.6 --- apps/agent/src/features/storage/local_file.rs | 36 ++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/apps/agent/src/features/storage/local_file.rs b/apps/agent/src/features/storage/local_file.rs index f1ae9c0..4ba64f4 100644 --- a/apps/agent/src/features/storage/local_file.rs +++ b/apps/agent/src/features/storage/local_file.rs @@ -1,4 +1,4 @@ -use nexus_storage::{AttachedPath, BackendKind, HostBackend, StorageError, VolumeHandle}; +use nexus_storage::{AttachedPath, BackendKind, HostBackend, StorageError, VolumeHandle, VolumeSnapshotHandle}; use std::path::{Path, PathBuf}; /// Agent-side LocalFile backend. Trivial: the locator IS the file path. @@ -45,6 +45,15 @@ impl HostBackend for LocalFileHostBackend { dst.flush().await?; Ok(()) } + + async fn read_snapshot( + &self, + snap: &VolumeSnapshotHandle, + ) -> Result, StorageError> { + let path = std::path::PathBuf::from(&snap.locator); + let f = tokio::fs::File::open(&path).await?; + Ok(Box::new(f)) + } } #[cfg(test)] @@ -83,4 +92,29 @@ mod tests { // File extended to target size (sparse tail OK). assert_eq!(std::fs::metadata(&dst).unwrap().len(), 16 * 1024); } + + #[tokio::test] + async fn read_snapshot_returns_file_contents() { + use nexus_storage::{BackendInstanceId, VolumeSnapshotHandle}; + use tokio::io::AsyncReadExt; + use uuid::Uuid; + + let dir = tempfile::tempdir().unwrap(); + let p = dir.path().join("snap.img"); + std::fs::write(&p, b"snapshot-bytes").unwrap(); + + let snap = VolumeSnapshotHandle { + snapshot_id: Uuid::new_v4(), + source_volume_id: Uuid::new_v4(), + backend_id: BackendInstanceId(Uuid::new_v4()), + backend_kind: BackendKind::LocalFile, + locator: p.display().to_string(), + }; + + let backend = LocalFileHostBackend; + let mut reader = backend.read_snapshot(&snap).await.unwrap(); + let mut buf = Vec::new(); + reader.read_to_end(&mut buf).await.unwrap(); + assert_eq!(buf, b"snapshot-bytes"); + } } From ab23f6804c80b89a4392e89beac6d8d39688cbd0 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:30:37 +0700 Subject: [PATCH 09/37] feat(backup): IscsiHostBackend::read_snapshot via iscsiadm login Implements HostBackend::read_snapshot for IscsiHostBackend: parses the snapshot locator JSON, logs in via iscsiadm (same as attach), then polls for the by-path block device to appear (up to 3 s) before returning a tokio::fs::File handle for streaming reads. Co-Authored-By: Claude Sonnet 4.6 --- apps/agent/src/features/storage/iscsi.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/apps/agent/src/features/storage/iscsi.rs b/apps/agent/src/features/storage/iscsi.rs index 18e54cf..ecdf98a 100644 --- a/apps/agent/src/features/storage/iscsi.rs +++ b/apps/agent/src/features/storage/iscsi.rs @@ -6,7 +6,7 @@ //! Locator format (JSON): {"iqn":"...","lun":N,"dataset":"...","portal":"..."} //! `dataset` is ignored on the host side; it's a control-plane concern. -use nexus_storage::{AttachedPath, BackendKind, HostBackend, StorageError, VolumeHandle}; +use nexus_storage::{AttachedPath, BackendKind, HostBackend, StorageError, VolumeHandle, VolumeSnapshotHandle}; use serde::Deserialize; use std::path::{Path, PathBuf}; @@ -130,6 +130,28 @@ impl HostBackend for IscsiHostBackend { let _ = target_size_bytes; Ok(()) } + + async fn read_snapshot( + &self, + snap: &VolumeSnapshotHandle, + ) -> Result, StorageError> { + // Snapshot's locator has the same JSON shape as a volume's locator — + // {iqn, lun, portal, dataset?} — but the LUN refers to the read-only + // snapshot extent. + let loc = Self::parse_locator(&snap.locator)?; + Self::iscsiadm_login(&loc).await?; + let dev = Self::block_device_path(&loc); + for _ in 0..30 { + if dev.exists() { + let f = tokio::fs::File::open(&dev).await?; + return Ok(Box::new(f)); + } + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + Err(StorageError::Backend( + format!("snapshot device {} did not appear after iscsi login", dev.display()).into(), + )) + } } #[cfg(test)] From 151f949b67ac7adea41f6bf783264ae5a443e07c Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:30:57 +0700 Subject: [PATCH 10/37] chore: cargo fmt - wrap long import lines in storage backends Co-Authored-By: Claude Sonnet 4.6 --- apps/agent/src/features/storage/iscsi.rs | 10 ++++++++-- apps/agent/src/features/storage/local_file.rs | 4 +++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/apps/agent/src/features/storage/iscsi.rs b/apps/agent/src/features/storage/iscsi.rs index ecdf98a..8516c46 100644 --- a/apps/agent/src/features/storage/iscsi.rs +++ b/apps/agent/src/features/storage/iscsi.rs @@ -6,7 +6,9 @@ //! Locator format (JSON): {"iqn":"...","lun":N,"dataset":"...","portal":"..."} //! `dataset` is ignored on the host side; it's a control-plane concern. -use nexus_storage::{AttachedPath, BackendKind, HostBackend, StorageError, VolumeHandle, VolumeSnapshotHandle}; +use nexus_storage::{ + AttachedPath, BackendKind, HostBackend, StorageError, VolumeHandle, VolumeSnapshotHandle, +}; use serde::Deserialize; use std::path::{Path, PathBuf}; @@ -149,7 +151,11 @@ impl HostBackend for IscsiHostBackend { tokio::time::sleep(std::time::Duration::from_millis(100)).await; } Err(StorageError::Backend( - format!("snapshot device {} did not appear after iscsi login", dev.display()).into(), + format!( + "snapshot device {} did not appear after iscsi login", + dev.display() + ) + .into(), )) } } diff --git a/apps/agent/src/features/storage/local_file.rs b/apps/agent/src/features/storage/local_file.rs index 4ba64f4..1dfac38 100644 --- a/apps/agent/src/features/storage/local_file.rs +++ b/apps/agent/src/features/storage/local_file.rs @@ -1,4 +1,6 @@ -use nexus_storage::{AttachedPath, BackendKind, HostBackend, StorageError, VolumeHandle, VolumeSnapshotHandle}; +use nexus_storage::{ + AttachedPath, BackendKind, HostBackend, StorageError, VolumeHandle, VolumeSnapshotHandle, +}; use std::path::{Path, PathBuf}; /// Agent-side LocalFile backend. Trivial: the locator IS the file path. From 5a5810effabbc5b4f933d09c231c8bd5a27537e0 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:36:55 +0700 Subject: [PATCH 11/37] =?UTF-8?q?feat(backup):=20migration=200036=20?= =?UTF-8?q?=E2=80=94=20backup=5Ftarget,=20backup,=20backup=5Fgc=5Frun?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- apps/manager/migrations/0036_backup.sql | 62 +++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 apps/manager/migrations/0036_backup.sql diff --git a/apps/manager/migrations/0036_backup.sql b/apps/manager/migrations/0036_backup.sql new file mode 100644 index 0000000..22fb0b1 --- /dev/null +++ b/apps/manager/migrations/0036_backup.sql @@ -0,0 +1,62 @@ +-- 0036_backup.sql — Chunked encrypted backup pipeline. + +CREATE TABLE IF NOT EXISTS backup_target ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name TEXT NOT NULL UNIQUE, + endpoint TEXT NOT NULL, + region TEXT, + bucket TEXT NOT NULL, + prefix TEXT NOT NULL DEFAULT '', + access_key_id TEXT NOT NULL, + encrypted_secret_access_key BYTEA NOT NULL, + encrypted_target_key BYTEA NOT NULL, + gc_hour SMALLINT NOT NULL DEFAULT 3 CHECK (gc_hour BETWEEN 0 AND 23), + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + deleted_at TIMESTAMPTZ +); + +CREATE TABLE IF NOT EXISTS backup ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + source_volume_id UUID REFERENCES volume(id) ON DELETE SET NULL, + source_snapshot_id UUID, + target_id UUID NOT NULL REFERENCES backup_target(id), + manifest_object_key TEXT, + size_bytes BIGINT NOT NULL DEFAULT 0, + unique_bytes BIGINT NOT NULL DEFAULT 0, + chunk_count BIGINT NOT NULL DEFAULT 0, + status TEXT NOT NULL DEFAULT 'running' + CHECK (status IN ('running', 'completed', 'failed', 'pruning')), + error_message TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + completed_at TIMESTAMPTZ, + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); +CREATE INDEX IF NOT EXISTS idx_backup_volume ON backup(source_volume_id); +CREATE INDEX IF NOT EXISTS idx_backup_target ON backup(target_id); +CREATE INDEX IF NOT EXISTS idx_backup_status_updated ON backup(status, updated_at) + WHERE status = 'running'; + +ALTER TABLE volume ADD COLUMN IF NOT EXISTS backup_cron TEXT; +ALTER TABLE volume ADD COLUMN IF NOT EXISTS backup_retain_count INT; +ALTER TABLE volume ADD COLUMN IF NOT EXISTS backup_target_id UUID + REFERENCES backup_target(id); + +CREATE TABLE IF NOT EXISTS backup_gc_run ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + target_id UUID NOT NULL REFERENCES backup_target(id) ON DELETE CASCADE, + started_at TIMESTAMPTZ NOT NULL DEFAULT now(), + completed_at TIMESTAMPTZ, + bytes_freed BIGINT NOT NULL DEFAULT 0, + chunks_deleted BIGINT NOT NULL DEFAULT 0, + status TEXT NOT NULL DEFAULT 'running' + CHECK (status IN ('running', 'completed', 'failed')), + error_message TEXT +); +CREATE INDEX IF NOT EXISTS idx_backup_gc_run_target ON backup_gc_run(target_id, started_at DESC); + +COMMENT ON COLUMN backup_target.encrypted_secret_access_key IS + 'AES-GCM(envelope_key) over the S3 secret access key.'; +COMMENT ON COLUMN backup_target.encrypted_target_key IS + 'AES-GCM(envelope_key) over the per-target XChaCha20-Poly1305 key used for chunk + manifest encryption.'; +COMMENT ON COLUMN backup.unique_bytes IS + 'Post-dedup ciphertext bytes that this backup actually wrote (chunks not skipped by HEAD).'; From 280842eadc1425ee7fd4fea042aa702fd1cd87f2 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:36:59 +0700 Subject: [PATCH 12/37] feat(backup): wire types BackupTarget, Backup, BackupSchedule --- crates/nexus-types/src/lib.rs | 85 +++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/crates/nexus-types/src/lib.rs b/crates/nexus-types/src/lib.rs index d6adb3a..1b55b66 100644 --- a/crates/nexus-types/src/lib.rs +++ b/crates/nexus-types/src/lib.rs @@ -1747,3 +1747,88 @@ pub struct StorageBackend { #[serde(default, skip_serializing_if = "Option::is_none")] pub deleted_at: Option>, } + +// ── Backup pipeline ────────────────────────────────────────────────────── + +#[derive( + Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize, utoipa::ToSchema, +)] +#[serde(rename_all = "lowercase")] +pub enum BackupStatus { + Running, + Completed, + Failed, + Pruning, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, utoipa::ToSchema)] +pub struct BackupTarget { + pub id: uuid::Uuid, + pub name: String, + pub endpoint: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, + pub bucket: String, + #[serde(default)] + pub prefix: String, + pub access_key_id: String, + /// gc_hour 0-23 (UTC). + pub gc_hour: u8, + pub created_at: chrono::DateTime, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub deleted_at: Option>, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, utoipa::ToSchema)] +pub struct CreateBackupTargetRequest { + pub name: String, + pub endpoint: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, + pub bucket: String, + #[serde(default)] + pub prefix: String, + pub access_key_id: String, + pub secret_access_key: String, + #[serde(default = "default_backup_gc_hour")] + pub gc_hour: u8, +} + +fn default_backup_gc_hour() -> u8 { + 3 +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, utoipa::ToSchema)] +pub struct Backup { + pub id: uuid::Uuid, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub source_volume_id: Option, + pub target_id: uuid::Uuid, + pub size_bytes: i64, + pub unique_bytes: i64, + pub chunk_count: i64, + pub status: BackupStatus, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub error_message: Option, + pub created_at: chrono::DateTime, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub completed_at: Option>, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, utoipa::ToSchema)] +pub struct BackupSchedule { + /// Standard 5-field cron expression in UTC. + pub cron: String, + pub retain_count: i32, + pub target_id: uuid::Uuid, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, utoipa::ToSchema)] +pub struct CreateBackupRequest { + pub target_id: uuid::Uuid, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, utoipa::ToSchema)] +pub struct RestoreRequest { + pub target_backend_id: uuid::Uuid, +} From f3616c7e4f74a582169edce459d0b58397830c27 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:41:21 +0700 Subject: [PATCH 13/37] chore(backup): add aws-sdk-s3, cron, nexus-backup deps to manager + agent Co-Authored-By: Claude Sonnet 4.6 --- Cargo.lock | 822 +++++++++++++++++++++++++++++++++++++--- apps/agent/Cargo.toml | 6 + apps/manager/Cargo.toml | 7 + 3 files changed, 777 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 05d02b4..0980c64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -58,15 +58,21 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "aws-config", + "aws-credential-types", + "aws-sdk-s3", + "aws-types", "axum", "bytes", "chrono", "futures", + "hex", "http-body-util", - "hyper", + "hyper 1.7.0", "hyper-util", "hyperlocal", "libc", + "nexus-backup", "nexus-storage", "nexus-types", "num_cpus", @@ -249,6 +255,390 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-config" +version = "1.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8fc176d53d6fe85017f230405e3255cedb4a02221cb55ed6d76dccbbb099b2" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 1.3.1", + "time", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "aws-credential-types" +version = "1.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e26bbf46abc608f2dc61fd6cb3b7b0665497cc259a21520151ed98f8b37d2c79" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] + +[[package]] +name = "aws-lc-rs" +version = "1.16.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ec6fb3fe69024a75fa7e1bfb48aa6cf59706a101658ea01bfd33b2b248a038f" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f50037ee5e1e41e7b8f9d161680a725bd1626cb6f8c7e901f91f942850852fe7" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "aws-runtime" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0f92058d22a46adf53ec57a6a96f34447daf02bff52e8fb956c66bcd5c6ac12" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "bytes-utils", + "fastrand", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "http-body 1.0.1", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid", +] + +[[package]] +name = "aws-sdk-s3" +version = "1.123.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c018f22146966fdd493a664f62ee2483dff256b42a08c125ab6a084bde7b77fe" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "hex", + "hmac", + "http 0.2.12", + "http 1.3.1", + "http-body 1.0.1", + "lru 0.16.4", + "percent-encoding", + "regex-lite", + "sha2", + "tracing", + "url", +] + +[[package]] +name = "aws-sdk-sts" +version = "1.98.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89c4f19655ab0856375e169865c91264de965bd74c407c7f1e403184b1049409" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "fastrand", + "http 0.2.12", + "http 1.3.1", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f6ae9b71597dc5fd115d52849d7a5556ad9265885ad3492ea8d73b93bbc46e" +dependencies = [ + "aws-credential-types", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "form_urlencoded", + "hex", + "hmac", + "http 0.2.12", + "http 1.3.1", + "percent-encoding", + "sha2", + "time", + "tracing", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cba48474f1d6807384d06fec085b909f5807e16653c5af5c45dfe89539f0b70" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-checksums" +version = "0.64.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a764fa7222922f6c0af8eea478b0ef1ba5ce1222af97e01f33ca5e957bd7f3b9" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "bytes", + "crc-fast", + "hex", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "md-5", + "pin-project-lite", + "sha1", + "sha2", + "tracing", +] + +[[package]] +name = "aws-smithy-eventstream" +version = "0.60.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c0b3e587fbaa5d7f7e870544508af8ce82ea47cd30376e69e1e37c4ac746f79" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + +[[package]] +name = "aws-smithy-http" +version = "0.63.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af4a8a5fe3e4ac7ee871237c340bbce13e982d37543b65700f4419e039f5d78e" +dependencies = [ + "aws-smithy-eventstream", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-http-client" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0709f0083aa19b704132684bc26d3c868e06bd428ccc4373b0b55c3e8748a58b" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "h2 0.3.27", + "h2 0.4.12", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper 1.7.0", + "hyper-rustls 0.24.2", + "hyper-rustls 0.27.7", + "hyper-util", + "pin-project-lite", + "rustls 0.21.12", + "rustls 0.23.32", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.3", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.62.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27b3a779093e18cad88bbae08dc4261e1d95018c4c5b9356a52bcae7c0b6e9bb" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-observability" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3f39d5bb871aaf461d59144557f16d5927a5248a983a40654d9cf3b9ba183b" +dependencies = [ + "aws-smithy-runtime-api", +] + +[[package]] +name = "aws-smithy-query" +version = "0.60.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f76a580e3d8f8961e5d48763214025a2af65c2fa4cd1fb7f270a0e107a71b0" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd3dfc18c1ce097cf81fced7192731e63809829c6cbf933c1ec47452d08e1aa" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-http-client", + "aws-smithy-observability", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "pin-utils", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c55e0837e9b8526f49e0b9bfa9ee18ddee70e853f5bc09c5d11ebceddcb0fec" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.12", + "http 1.3.1", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "576b0d6991c9c32bc14fc340582ef148311f924d41815f641a308b5d11e8e7cd" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "itoa", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce02add1aa3677d022f8adf81dcbe3046a95f17a1b1e8979c145cd21d3d22b3" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c50f3cdf47caa8d01f2be4a6663ea02418e892f9bbfd82c7b9a3a37eaccdd3a" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version", + "tracing", +] + [[package]] name = "axum" version = "0.7.9" @@ -261,10 +651,10 @@ dependencies = [ "base64 0.22.1", "bytes", "futures-util", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.7.0", "hyper-util", "itoa", "matchit", @@ -297,8 +687,8 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -353,6 +743,16 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64-simd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +dependencies = [ + "outref", + "vsimd", +] + [[package]] name = "base64ct" version = "1.8.0" @@ -471,9 +871,9 @@ dependencies = [ "futures-core", "futures-util", "hex", - "http", + "http 1.3.1", "http-body-util", - "hyper", + "hyper 1.7.0", "hyper-named-pipe", "hyper-util", "hyperlocal", @@ -527,6 +927,16 @@ version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] + [[package]] name = "cassowary" version = "0.3.0" @@ -675,6 +1085,15 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +[[package]] +name = "cmake" +version = "0.1.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +dependencies = [ + "cc", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -731,6 +1150,16 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -770,6 +1199,18 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" +[[package]] +name = "crc-fast" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd92aca2c6001b1bf5ba0ff84ee74ec8501b52bbef0cac80bf25a6c1d87a83d" +dependencies = [ + "crc", + "digest", + "rustversion", + "spin 0.10.0", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -779,6 +1220,17 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "cron" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f8c3e73077b4b4a6ab1ea5047c37c57aee77657bc8ecd6f29b0af082d0b0c07" +dependencies = [ + "chrono", + "nom", + "once_cell", +] + [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -1089,6 +1541,12 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "dyn-clone" version = "1.0.20" @@ -1286,7 +1744,7 @@ checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" dependencies = [ "futures-core", "futures-sink", - "spin", + "spin 0.9.8", ] [[package]] @@ -1301,6 +1759,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + [[package]] name = "foreign-types" version = "0.3.2" @@ -1325,6 +1789,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures" version = "0.3.31" @@ -1519,6 +1989,25 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "h2" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap 2.11.4", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "h2" version = "0.4.12" @@ -1530,7 +2019,7 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http", + "http 1.3.1", "indexmap 2.11.4", "slab", "tokio", @@ -1563,7 +2052,7 @@ checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.1.5", ] [[package]] @@ -1571,6 +2060,11 @@ name = "hashbrown" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash 0.2.0", +] [[package]] name = "hashlink" @@ -1637,6 +2131,17 @@ dependencies = [ "windows-link", ] +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http" version = "1.3.1" @@ -1648,6 +2153,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + [[package]] name = "http-body" version = "1.0.1" @@ -1655,7 +2171,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http", + "http 1.3.1", ] [[package]] @@ -1666,8 +2182,8 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "pin-project-lite", ] @@ -1683,6 +2199,30 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.27", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "hyper" version = "1.7.0" @@ -1693,9 +2233,9 @@ dependencies = [ "bytes", "futures-channel", "futures-core", - "h2", - "http", - "http-body", + "h2 0.4.12", + "http 1.3.1", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -1713,7 +2253,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" dependencies = [ "hex", - "hyper", + "hyper 1.7.0", "hyper-util", "pin-project-lite", "tokio", @@ -1721,19 +2261,35 @@ dependencies = [ "winapi", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.32", + "log", + "rustls 0.21.12", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "http", - "hyper", + "http 1.3.1", + "hyper 1.7.0", "hyper-util", - "rustls", + "rustls 0.23.32", + "rustls-native-certs", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.3", "tower-service", "webpki-roots", ] @@ -1749,14 +2305,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "http", - "http-body", - "hyper", + "http 1.3.1", + "http-body 1.0.1", + "hyper 1.7.0", "ipnet", "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.0", "tokio", "tower-service", "tracing", @@ -1770,7 +2326,7 @@ checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" dependencies = [ "hex", "http-body-util", - "hyper", + "hyper 1.7.0", "hyper-util", "pin-project-lite", "tokio", @@ -2087,7 +2643,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin", + "spin 0.9.8", ] [[package]] @@ -2188,6 +2744,15 @@ dependencies = [ "hashbrown 0.15.5", ] +[[package]] +name = "lru" +version = "0.16.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f66e8d5d03f609abc3a39e6f08e4164ebf1447a732906d39eb9b99b7919ef39" +dependencies = [ + "hashbrown 0.16.0", +] + [[package]] name = "lru-slab" version = "0.1.2" @@ -2202,10 +2767,16 @@ dependencies = [ "anyhow", "argon2", "async-trait", + "aws-config", + "aws-credential-types", + "aws-sdk-s3", + "aws-smithy-types", + "aws-types", "axum", "base64 0.22.1", "bollard", "chrono", + "cron", "dotenvy", "ed25519-dalek", "futures", @@ -2214,6 +2785,7 @@ dependencies = [ "image", "metrics", "mockito", + "nexus-backup", "nexus-storage", "nexus-types", "openidconnect", @@ -2353,10 +2925,10 @@ dependencies = [ "bytes", "colored", "futures-core", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.7.0", "hyper-util", "log", "pin-project-lite", @@ -2377,11 +2949,11 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http", + "http 1.3.1", "httparse", "memchr", "mime", - "spin", + "spin 0.9.8", "version_check", ] @@ -2587,7 +3159,7 @@ dependencies = [ "base64 0.22.1", "chrono", "getrandom 0.2.16", - "http", + "http 1.3.1", "rand 0.8.5", "reqwest", "serde", @@ -2636,7 +3208,7 @@ dependencies = [ "dyn-clone", "ed25519-dalek", "hmac", - "http", + "http 1.3.1", "itertools 0.10.5", "log", "oauth2", @@ -2688,6 +3260,12 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + [[package]] name = "openssl-src" version = "300.6.0+3.6.2" @@ -2719,6 +3297,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "outref" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" + [[package]] name = "p256" version = "0.13.2" @@ -3026,8 +3610,8 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", - "rustls", - "socket2", + "rustls 0.23.32", + "socket2 0.6.0", "thiserror 2.0.16", "tokio", "tracing", @@ -3046,7 +3630,7 @@ dependencies = [ "rand 0.9.2", "ring", "rustc-hash 2.1.1", - "rustls", + "rustls 0.23.32", "rustls-pki-types", "slab", "thiserror 2.0.16", @@ -3064,7 +3648,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.6.0", "tracing", "windows-sys 0.59.0", ] @@ -3165,7 +3749,7 @@ dependencies = [ "indoc", "instability", "itertools 0.13.0", - "lru", + "lru 0.12.5", "paste", "strum", "unicode-segmentation", @@ -3245,6 +3829,12 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-lite" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973" + [[package]] name = "regex-syntax" version = "0.8.6" @@ -3262,25 +3852,25 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", - "hyper", - "hyper-rustls", + "hyper 1.7.0", + "hyper-rustls 0.27.7", "hyper-util", "js-sys", "log", "percent-encoding", "pin-project-lite", "quinn", - "rustls", + "rustls 0.23.32", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.3", "tower", "tower-http 0.6.6", "tower-service", @@ -3422,20 +4012,45 @@ dependencies = [ "windows-sys 0.61.1", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ + "aws-lc-rs", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.103.13", "subtle", "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" +dependencies = [ + "openssl-probe 0.2.1", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pki-types" version = "1.12.0" @@ -3446,12 +4061,23 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.103.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c429a8649f110dddef65e2a5ad240f747e85f7758a6bccc7e5777bd33f756e" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -3494,7 +4120,7 @@ dependencies = [ "derive_builder", "flate2", "openssl", - "openssl-probe", + "openssl-probe 0.1.6", "openssl-sys", "pkg-config", "quick-xml", @@ -3523,6 +4149,15 @@ dependencies = [ "sdd", ] +[[package]] +name = "schannel" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91c1b7e4904c873ef0710c1f407dde2e6287de2bebc1bbbf7d430bb7cbffd939" +dependencies = [ + "windows-sys 0.61.1", +] + [[package]] name = "schemars" version = "0.9.0" @@ -3553,6 +4188,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "sdd" version = "3.0.10" @@ -3573,6 +4218,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "security-framework" +version = "3.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" +dependencies = [ + "bitflags 2.11.1", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "semver" version = "1.0.27" @@ -3859,6 +4527,16 @@ dependencies = [ "serde", ] +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "socket2" version = "0.6.0" @@ -3878,6 +4556,12 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" + [[package]] name = "spki" version = "0.7.3" @@ -4340,7 +5024,7 @@ dependencies = [ "pin-project-lite", "signal-hook-registry", "slab", - "socket2", + "socket2 0.6.0", "tokio-macros", "windows-sys 0.59.0", ] @@ -4356,13 +5040,23 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f63835928ca123f1bef57abbcd23bb2ba0ac9ae1235f1e65bda0d06e7786bd" dependencies = [ - "rustls", + "rustls 0.23.32", "tokio", ] @@ -4509,8 +5203,8 @@ checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "bitflags 2.11.1", "bytes", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", "pin-project-lite", "tower-layer", @@ -4526,8 +5220,8 @@ dependencies = [ "bitflags 2.11.1", "bytes", "futures-util", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "iri-string", "pin-project-lite", "tower", @@ -4624,7 +5318,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 1.3.1", "httparse", "log", "rand 0.8.5", @@ -4643,7 +5337,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 1.3.1", "httparse", "log", "rand 0.8.5", @@ -4893,6 +5587,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "vsimd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" + [[package]] name = "wait-timeout" version = "0.2.1" @@ -5399,9 +6099,9 @@ dependencies = [ "base64 0.22.1", "deadpool", "futures", - "http", + "http 1.3.1", "http-body-util", - "hyper", + "hyper 1.7.0", "hyper-util", "log", "once_cell", @@ -5424,6 +6124,12 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + [[package]] name = "yoke" version = "0.8.0" diff --git a/apps/agent/Cargo.toml b/apps/agent/Cargo.toml index 1647a34..64f8fa4 100644 --- a/apps/agent/Cargo.toml +++ b/apps/agent/Cargo.toml @@ -28,6 +28,12 @@ reqwest = { workspace = true } uuid = { workspace = true } futures = { workspace = true } libc = "0.2" +nexus-backup = { path = "../../crates/nexus-backup" } +aws-sdk-s3 = { version = "1", default-features = false, features = ["rustls", "rt-tokio"] } +aws-credential-types = "1" +aws-config = { version = "1", default-features = false, features = ["rustls", "rt-tokio"] } +aws-types = "1" +hex = "0.4" [dev-dependencies] tempfile = "3" diff --git a/apps/manager/Cargo.toml b/apps/manager/Cargo.toml index 433c922..a1ca935 100644 --- a/apps/manager/Cargo.toml +++ b/apps/manager/Cargo.toml @@ -50,6 +50,13 @@ samael = "0.0.17" openssl = { version = "0.10", features = ["vendored"] } aes-gcm = "0.10" url = "2" +nexus-backup = { path = "../../crates/nexus-backup" } +aws-sdk-s3 = { version = "1", default-features = false, features = ["rustls", "rt-tokio"] } +aws-credential-types = "1" +aws-config = { version = "1", default-features = false, features = ["rustls", "rt-tokio"] } +aws-types = "1" +aws-smithy-types = "1" +cron = "0.12" [dev-dependencies] wiremock = "0.6" From ce7f7c748bac558260133fc84b3215db9cab7ca9 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:44:00 +0700 Subject: [PATCH 14/37] feat(backup): envelope wrap/unwrap for target secrets Co-Authored-By: Claude Sonnet 4.6 --- .../src/features/backup_targets/envelope.rs | 96 +++++++++++++++++++ .../src/features/backup_targets/mod.rs | 1 + apps/manager/src/features/mod.rs | 1 + 3 files changed, 98 insertions(+) create mode 100644 apps/manager/src/features/backup_targets/envelope.rs create mode 100644 apps/manager/src/features/backup_targets/mod.rs diff --git a/apps/manager/src/features/backup_targets/envelope.rs b/apps/manager/src/features/backup_targets/envelope.rs new file mode 100644 index 0000000..52f97fd --- /dev/null +++ b/apps/manager/src/features/backup_targets/envelope.rs @@ -0,0 +1,96 @@ +//! AES-GCM(envelope_key) wrap/unwrap for backup target secrets. +//! Reuses the same MANAGER_ENVELOPE_KEY env var the SSO module uses. + +use aes_gcm::{aead::Aead, Aes256Gcm, Key, KeyInit, Nonce}; +use anyhow::{anyhow, Context, Result}; + +const NONCE_LEN: usize = 12; + +fn cipher() -> Result { + let raw = std::env::var("MANAGER_ENVELOPE_KEY") + .context("MANAGER_ENVELOPE_KEY not set")?; + let bytes = hex::decode(raw) + .context("MANAGER_ENVELOPE_KEY must be hex-encoded")?; + if bytes.len() != 32 { + return Err(anyhow!( + "MANAGER_ENVELOPE_KEY must be 32 bytes (64 hex chars), got {}", + bytes.len() + )); + } + let key = Key::::from_slice(&bytes); + Ok(Aes256Gcm::new(key)) +} + +pub fn wrap(plaintext: &[u8]) -> Result> { + use rand::RngCore; + let c = cipher()?; + let mut nonce_bytes = [0u8; NONCE_LEN]; + rand::thread_rng().fill_bytes(&mut nonce_bytes); + let nonce = Nonce::from_slice(&nonce_bytes); + let ct = c.encrypt(nonce, plaintext).map_err(|e| anyhow!("aes-gcm encrypt: {e}"))?; + let mut out = Vec::with_capacity(NONCE_LEN + ct.len()); + out.extend_from_slice(&nonce_bytes); + out.extend_from_slice(&ct); + Ok(out) +} + +pub fn unwrap_to_string(blob: &[u8]) -> Result { + let bytes = unwrap(blob)?; + String::from_utf8(bytes).context("decrypted secret is not utf-8") +} + +pub fn unwrap_to_array(blob: &[u8]) -> Result<[u8; N]> { + let bytes = unwrap(blob)?; + if bytes.len() != N { + return Err(anyhow!("decrypted blob is {} bytes, expected {}", bytes.len(), N)); + } + let mut out = [0u8; N]; + out.copy_from_slice(&bytes); + Ok(out) +} + +fn unwrap(blob: &[u8]) -> Result> { + if blob.len() < NONCE_LEN { + return Err(anyhow!("envelope blob too short")); + } + let c = cipher()?; + let nonce = Nonce::from_slice(&blob[..NONCE_LEN]); + c.decrypt(nonce, &blob[NONCE_LEN..]).map_err(|_| anyhow!("envelope decrypt: auth failed")) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn with_key(f: F) { + std::env::set_var("MANAGER_ENVELOPE_KEY", "00".repeat(32)); + f(); + } + + #[test] + fn wrap_unwrap_string() { + with_key(|| { + let blob = wrap(b"secret-access-key").unwrap(); + let s = unwrap_to_string(&blob).unwrap(); + assert_eq!(s, "secret-access-key"); + }); + } + + #[test] + fn wrap_unwrap_array() { + with_key(|| { + let blob = wrap(&[0xAAu8; 32]).unwrap(); + let a: [u8; 32] = unwrap_to_array(&blob).unwrap(); + assert_eq!(a, [0xAAu8; 32]); + }); + } + + #[test] + fn tampered_blob_rejected() { + with_key(|| { + let mut blob = wrap(b"hello").unwrap(); + blob[20] ^= 1; + assert!(unwrap_to_string(&blob).is_err()); + }); + } +} diff --git a/apps/manager/src/features/backup_targets/mod.rs b/apps/manager/src/features/backup_targets/mod.rs new file mode 100644 index 0000000..788ab12 --- /dev/null +++ b/apps/manager/src/features/backup_targets/mod.rs @@ -0,0 +1 @@ +pub mod envelope; diff --git a/apps/manager/src/features/mod.rs b/apps/manager/src/features/mod.rs index f622fcf..38d530e 100644 --- a/apps/manager/src/features/mod.rs +++ b/apps/manager/src/features/mod.rs @@ -2,6 +2,7 @@ use crate::AppState; use axum::{Extension, Json, Router}; use serde::Serialize; +pub mod backup_targets; pub mod containers; pub mod functions; pub mod hosts; From 0f9555d6cbb760092de1085de46c77909f84d6c4 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:49:07 +0700 Subject: [PATCH 15/37] feat(backup): backup_targets repo + CRUD routes Co-Authored-By: Claude Sonnet 4.6 --- .../src/features/backup_targets/envelope.rs | 22 +- .../src/features/backup_targets/mod.rs | 19 ++ .../src/features/backup_targets/repo.rs | 102 +++++++++ .../src/features/backup_targets/routes.rs | 193 ++++++++++++++++++ apps/manager/src/features/mod.rs | 1 + 5 files changed, 330 insertions(+), 7 deletions(-) create mode 100644 apps/manager/src/features/backup_targets/repo.rs create mode 100644 apps/manager/src/features/backup_targets/routes.rs diff --git a/apps/manager/src/features/backup_targets/envelope.rs b/apps/manager/src/features/backup_targets/envelope.rs index 52f97fd..9c91ecb 100644 --- a/apps/manager/src/features/backup_targets/envelope.rs +++ b/apps/manager/src/features/backup_targets/envelope.rs @@ -7,10 +7,8 @@ use anyhow::{anyhow, Context, Result}; const NONCE_LEN: usize = 12; fn cipher() -> Result { - let raw = std::env::var("MANAGER_ENVELOPE_KEY") - .context("MANAGER_ENVELOPE_KEY not set")?; - let bytes = hex::decode(raw) - .context("MANAGER_ENVELOPE_KEY must be hex-encoded")?; + let raw = std::env::var("MANAGER_ENVELOPE_KEY").context("MANAGER_ENVELOPE_KEY not set")?; + let bytes = hex::decode(raw).context("MANAGER_ENVELOPE_KEY must be hex-encoded")?; if bytes.len() != 32 { return Err(anyhow!( "MANAGER_ENVELOPE_KEY must be 32 bytes (64 hex chars), got {}", @@ -27,35 +25,45 @@ pub fn wrap(plaintext: &[u8]) -> Result> { let mut nonce_bytes = [0u8; NONCE_LEN]; rand::thread_rng().fill_bytes(&mut nonce_bytes); let nonce = Nonce::from_slice(&nonce_bytes); - let ct = c.encrypt(nonce, plaintext).map_err(|e| anyhow!("aes-gcm encrypt: {e}"))?; + let ct = c + .encrypt(nonce, plaintext) + .map_err(|e| anyhow!("aes-gcm encrypt: {e}"))?; let mut out = Vec::with_capacity(NONCE_LEN + ct.len()); out.extend_from_slice(&nonce_bytes); out.extend_from_slice(&ct); Ok(out) } +#[allow(dead_code)] pub fn unwrap_to_string(blob: &[u8]) -> Result { let bytes = unwrap(blob)?; String::from_utf8(bytes).context("decrypted secret is not utf-8") } +#[allow(dead_code)] pub fn unwrap_to_array(blob: &[u8]) -> Result<[u8; N]> { let bytes = unwrap(blob)?; if bytes.len() != N { - return Err(anyhow!("decrypted blob is {} bytes, expected {}", bytes.len(), N)); + return Err(anyhow!( + "decrypted blob is {} bytes, expected {}", + bytes.len(), + N + )); } let mut out = [0u8; N]; out.copy_from_slice(&bytes); Ok(out) } +#[allow(dead_code)] fn unwrap(blob: &[u8]) -> Result> { if blob.len() < NONCE_LEN { return Err(anyhow!("envelope blob too short")); } let c = cipher()?; let nonce = Nonce::from_slice(&blob[..NONCE_LEN]); - c.decrypt(nonce, &blob[NONCE_LEN..]).map_err(|_| anyhow!("envelope decrypt: auth failed")) + c.decrypt(nonce, &blob[NONCE_LEN..]) + .map_err(|_| anyhow!("envelope decrypt: auth failed")) } #[cfg(test)] diff --git a/apps/manager/src/features/backup_targets/mod.rs b/apps/manager/src/features/backup_targets/mod.rs index 788ab12..0031914 100644 --- a/apps/manager/src/features/backup_targets/mod.rs +++ b/apps/manager/src/features/backup_targets/mod.rs @@ -1 +1,20 @@ pub mod envelope; +pub mod repo; +pub mod routes; + +use axum::{ + routing::{get, post}, + Router, +}; + +pub fn router() -> Router { + Router::new() + .route("/", post(routes::create).get(routes::list)) + .route( + "/:id", + get(routes::get_one) + .patch(routes::update) + .delete(routes::soft_delete), + ) + .route("/:id/gc", post(routes::trigger_gc)) +} diff --git a/apps/manager/src/features/backup_targets/repo.rs b/apps/manager/src/features/backup_targets/repo.rs new file mode 100644 index 0000000..e0f7967 --- /dev/null +++ b/apps/manager/src/features/backup_targets/repo.rs @@ -0,0 +1,102 @@ +use chrono::{DateTime, Utc}; +use sqlx::PgPool; +use uuid::Uuid; + +#[derive(Clone)] +pub struct BackupTargetRepository { + pool: PgPool, +} + +#[derive(Debug, Clone, sqlx::FromRow)] +#[allow(dead_code)] +pub struct BackupTargetRow { + pub id: Uuid, + pub name: String, + pub endpoint: String, + pub region: Option, + pub bucket: String, + pub prefix: String, + pub access_key_id: String, + pub encrypted_secret_access_key: Vec, + pub encrypted_target_key: Vec, + pub gc_hour: i16, + pub created_at: DateTime, + pub deleted_at: Option>, +} + +pub struct CreateParams<'a> { + pub name: &'a str, + pub endpoint: &'a str, + pub region: Option<&'a str>, + pub bucket: &'a str, + pub prefix: &'a str, + pub access_key_id: &'a str, + pub encrypted_secret_access_key: &'a [u8], + pub encrypted_target_key: &'a [u8], + pub gc_hour: i16, +} + +impl BackupTargetRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } + + pub async fn list_active(&self) -> sqlx::Result> { + sqlx::query_as::<_, BackupTargetRow>( + r#"SELECT * FROM backup_target WHERE deleted_at IS NULL ORDER BY name"#, + ) + .fetch_all(&self.pool) + .await + } + + pub async fn get(&self, id: Uuid) -> sqlx::Result> { + sqlx::query_as::<_, BackupTargetRow>( + r#"SELECT * FROM backup_target WHERE id = $1 AND deleted_at IS NULL"#, + ) + .bind(id) + .fetch_optional(&self.pool) + .await + } + + pub async fn create(&self, p: CreateParams<'_>) -> sqlx::Result { + sqlx::query_as::<_, BackupTargetRow>( + r#" + INSERT INTO backup_target + (name, endpoint, region, bucket, prefix, access_key_id, + encrypted_secret_access_key, encrypted_target_key, gc_hour) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + RETURNING * + "#, + ) + .bind(p.name) + .bind(p.endpoint) + .bind(p.region) + .bind(p.bucket) + .bind(p.prefix) + .bind(p.access_key_id) + .bind(p.encrypted_secret_access_key) + .bind(p.encrypted_target_key) + .bind(p.gc_hour) + .fetch_one(&self.pool) + .await + } + + pub async fn soft_delete(&self, id: Uuid) -> sqlx::Result<()> { + sqlx::query( + r#"UPDATE backup_target SET deleted_at = now() WHERE id = $1 AND deleted_at IS NULL"#, + ) + .bind(id) + .execute(&self.pool) + .await?; + Ok(()) + } + + pub async fn count_backups_for_target(&self, id: Uuid) -> sqlx::Result { + sqlx::query_scalar::<_, i64>( + r#"SELECT COUNT(*) FROM backup WHERE target_id = $1 AND status IN ('running','completed')"#, + ) + .bind(id) + .fetch_one(&self.pool) + .await + } +} diff --git a/apps/manager/src/features/backup_targets/routes.rs b/apps/manager/src/features/backup_targets/routes.rs new file mode 100644 index 0000000..4f974a2 --- /dev/null +++ b/apps/manager/src/features/backup_targets/routes.rs @@ -0,0 +1,193 @@ +use crate::features::backup_targets::envelope; +use crate::features::backup_targets::repo::{ + BackupTargetRepository, BackupTargetRow, CreateParams, +}; +use crate::AppState; +use axum::{extract::Path, http::StatusCode, response::IntoResponse, Extension, Json}; +use nexus_types::{BackupTarget, CreateBackupTargetRequest}; +use rand::RngCore; +use uuid::Uuid; + +fn row_to_wire(row: BackupTargetRow) -> BackupTarget { + BackupTarget { + id: row.id, + name: row.name, + endpoint: row.endpoint, + region: row.region, + bucket: row.bucket, + prefix: row.prefix, + access_key_id: row.access_key_id, + gc_hour: row.gc_hour as u8, + created_at: row.created_at, + deleted_at: row.deleted_at, + } +} + +#[derive(serde::Serialize, utoipa::ToSchema)] +pub struct BackupTargetListResponse { + pub items: Vec, +} + +pub async fn list(Extension(st): Extension) -> impl IntoResponse { + let repo = BackupTargetRepository::new(st.db.clone()); + match repo.list_active().await { + Ok(rows) => ( + StatusCode::OK, + Json(BackupTargetListResponse { + items: rows.into_iter().map(row_to_wire).collect(), + }), + ) + .into_response(), + Err(e) => { + tracing::error!("backup_targets list: {e}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error": "db"})), + ) + .into_response() + } + } +} + +pub async fn get_one( + Extension(st): Extension, + Path(id): Path, +) -> impl IntoResponse { + let repo = BackupTargetRepository::new(st.db.clone()); + match repo.get(id).await { + Ok(Some(row)) => (StatusCode::OK, Json(row_to_wire(row))).into_response(), + Ok(None) => ( + StatusCode::NOT_FOUND, + Json(serde_json::json!({"error":"not found"})), + ) + .into_response(), + Err(e) => { + tracing::error!("backup_targets get: {e}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response() + } + } +} + +pub async fn create( + Extension(st): Extension, + Json(req): Json, +) -> impl IntoResponse { + let repo = BackupTargetRepository::new(st.db.clone()); + + let mut target_key = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut target_key); + + let enc_secret = match envelope::wrap(req.secret_access_key.as_bytes()) { + Ok(b) => b, + Err(e) => { + tracing::error!("envelope wrap secret: {e:#}"); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"envelope"})), + ) + .into_response(); + } + }; + let enc_target = match envelope::wrap(&target_key) { + Ok(b) => b, + Err(e) => { + tracing::error!("envelope wrap target_key: {e:#}"); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"envelope"})), + ) + .into_response(); + } + }; + + match repo + .create(CreateParams { + name: &req.name, + endpoint: &req.endpoint, + region: req.region.as_deref(), + bucket: &req.bucket, + prefix: &req.prefix, + access_key_id: &req.access_key_id, + encrypted_secret_access_key: &enc_secret, + encrypted_target_key: &enc_target, + gc_hour: req.gc_hour as i16, + }) + .await + { + Ok(row) => (StatusCode::CREATED, Json(row_to_wire(row))).into_response(), + Err(sqlx::Error::Database(e)) if e.code().as_deref() == Some("23505") => ( + StatusCode::CONFLICT, + Json(serde_json::json!({"error":"name already exists"})), + ) + .into_response(), + Err(e) => { + tracing::error!("backup_targets create: {e}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response() + } + } +} + +pub async fn update( + Extension(_st): Extension, + Path(_id): Path, + Json(_req): Json, +) -> impl IntoResponse { + ( + StatusCode::NOT_IMPLEMENTED, + Json(serde_json::json!({"error":"update not in v1"})), + ) +} + +pub async fn soft_delete( + Extension(st): Extension, + Path(id): Path, +) -> impl IntoResponse { + let repo = BackupTargetRepository::new(st.db.clone()); + match repo.count_backups_for_target(id).await { + Ok(n) if n > 0 => ( + StatusCode::CONFLICT, + Json(serde_json::json!({ + "error": format!("target has {n} backups; delete them first"), + })), + ) + .into_response(), + Ok(_) => match repo.soft_delete(id).await { + Ok(()) => (StatusCode::NO_CONTENT, ()).into_response(), + Err(e) => { + tracing::error!("backup_targets soft_delete: {e}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response() + } + }, + Err(e) => { + tracing::error!("backup_targets count: {e}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response() + } + } +} + +pub async fn trigger_gc( + Extension(_st): Extension, + Path(_id): Path, +) -> impl IntoResponse { + // Wired by Task B.T17 once the GC task exists. + ( + StatusCode::ACCEPTED, + Json(serde_json::json!({"queued": true})), + ) +} diff --git a/apps/manager/src/features/mod.rs b/apps/manager/src/features/mod.rs index 38d530e..e48fde8 100644 --- a/apps/manager/src/features/mod.rs +++ b/apps/manager/src/features/mod.rs @@ -88,6 +88,7 @@ pub fn router(state: AppState) -> Router { .nest("/v1/metrics", metrics::router()) .nest("/v1/volumes", volumes::router()) .nest("/v1/storage_backends", storage_backends::router()) + .nest("/v1/backup_targets", backup_targets::router()) // SSO public routes (no auth — these ARE the auth flow) .nest("/v1/sso", sso::public_router()) // SSO admin routes (auth + admin required) From 7b490af9138720fb5e7588e7dfa4c6153dcf2fbf Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:54:07 +0700 Subject: [PATCH 16/37] feat(backup): manager agent_rpc helpers + RPC types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add BackupReq/BackupResp/RestoreReq/RestoreResp RPC types in a new features/backups/types.rs, and extend storage/agent_rpc.rs with agent_backup and agent_restore HTTP helpers (dead_code allowed — wired in T16). Co-Authored-By: Claude Sonnet 4.6 --- apps/manager/src/features/backups/mod.rs | 1 + apps/manager/src/features/backups/types.rs | 66 +++++++++++++++++++ apps/manager/src/features/mod.rs | 1 + .../manager/src/features/storage/agent_rpc.rs | 33 ++++++++++ 4 files changed, 101 insertions(+) create mode 100644 apps/manager/src/features/backups/mod.rs create mode 100644 apps/manager/src/features/backups/types.rs diff --git a/apps/manager/src/features/backups/mod.rs b/apps/manager/src/features/backups/mod.rs new file mode 100644 index 0000000..cd40856 --- /dev/null +++ b/apps/manager/src/features/backups/mod.rs @@ -0,0 +1 @@ +pub mod types; diff --git a/apps/manager/src/features/backups/types.rs b/apps/manager/src/features/backups/types.rs new file mode 100644 index 0000000..04e3f52 --- /dev/null +++ b/apps/manager/src/features/backups/types.rs @@ -0,0 +1,66 @@ +//! RPC request/response types between manager and agent for backup ops. + +use nexus_storage::{AttachedPath, BackendKind, VolumeHandle, VolumeSnapshotHandle}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct BackupTargetConfig { + pub endpoint: String, + pub region: Option, + pub bucket: String, + pub prefix: String, + pub access_key_id: String, + pub secret_access_key: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Copy)] +pub struct ChunkerParams { + pub min_size: u32, + pub avg_size: u32, + pub max_size: u32, +} + +impl Default for ChunkerParams { + fn default() -> Self { + Self { + min_size: 4096, + avg_size: 65536, + max_size: 1048576, + } + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct BackupReq { + pub backup_id: Uuid, + pub snapshot: VolumeSnapshotHandle, + pub backend_kind: BackendKind, + pub target: BackupTargetConfig, + pub encryption_key: [u8; 32], + pub chunker_params: ChunkerParams, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct BackupResp { + pub manifest_object_key: String, + pub chunk_count: u64, + pub bytes_written: u64, + pub bytes_unique: u64, + pub duration_ms: u64, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct RestoreReq { + pub target_volume: VolumeHandle, + pub target_attached: AttachedPath, + pub manifest_object_key: String, + pub target: BackupTargetConfig, + pub encryption_key: [u8; 32], +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct RestoreResp { + pub bytes_written: u64, + pub duration_ms: u64, +} diff --git a/apps/manager/src/features/mod.rs b/apps/manager/src/features/mod.rs index e48fde8..2e02185 100644 --- a/apps/manager/src/features/mod.rs +++ b/apps/manager/src/features/mod.rs @@ -3,6 +3,7 @@ use axum::{Extension, Json, Router}; use serde::Serialize; pub mod backup_targets; +pub mod backups; pub mod containers; pub mod functions; pub mod hosts; diff --git a/apps/manager/src/features/storage/agent_rpc.rs b/apps/manager/src/features/storage/agent_rpc.rs index 0bac287..2ae9ecc 100644 --- a/apps/manager/src/features/storage/agent_rpc.rs +++ b/apps/manager/src/features/storage/agent_rpc.rs @@ -1,3 +1,4 @@ +use crate::features::backups::types::{BackupReq, BackupResp, RestoreReq, RestoreResp}; use anyhow::{anyhow, Context, Result}; use nexus_storage::{AttachedPath, BackendKind, VolumeHandle}; use reqwest::Client; @@ -114,3 +115,35 @@ pub async fn agent_resize2fs(host_addr: &str, attached: &AttachedPath) -> Result } Ok(()) } + +#[allow(dead_code)] +pub async fn agent_backup(host_addr: &str, req: BackupReq) -> Result { + let resp = Client::new() + .post(agent_url(host_addr, "/v1/storage/backup")) + .json(&req) + .send() + .await + .with_context(|| format!("POST /v1/storage/backup to {host_addr}"))?; + if !resp.status().is_success() { + let s = resp.status(); + let body = resp.text().await.unwrap_or_default(); + return Err(anyhow!("agent backup: {s}: {body}")); + } + Ok(resp.json::().await?) +} + +#[allow(dead_code)] +pub async fn agent_restore(host_addr: &str, req: RestoreReq) -> Result { + let resp = Client::new() + .post(agent_url(host_addr, "/v1/storage/restore")) + .json(&req) + .send() + .await + .with_context(|| format!("POST /v1/storage/restore to {host_addr}"))?; + if !resp.status().is_success() { + let s = resp.status(); + let body = resp.text().await.unwrap_or_default(); + return Err(anyhow!("agent restore: {s}: {body}")); + } + Ok(resp.json::().await?) +} From 1c0d364d076b183ffde21797eab157682bc230ae Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:54:55 +0700 Subject: [PATCH 17/37] feat(backup): agent S3 client wrapper (head/put/get) Introduce apps/agent/src/features/storage/s3.rs with make_client() using aws-sdk-s3 1.x, force_path_style for MinIO/Ceph compatibility, and async head_object/put_object/get_object helpers used by the backup pipeline. Co-Authored-By: Claude Sonnet 4.6 --- apps/agent/src/features/storage/mod.rs | 1 + apps/agent/src/features/storage/s3.rs | 93 ++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 apps/agent/src/features/storage/s3.rs diff --git a/apps/agent/src/features/storage/mod.rs b/apps/agent/src/features/storage/mod.rs index 6b57753..5bd67ae 100644 --- a/apps/agent/src/features/storage/mod.rs +++ b/apps/agent/src/features/storage/mod.rs @@ -2,3 +2,4 @@ pub mod iscsi; pub mod local_file; pub mod registry; pub mod routes; +pub mod s3; diff --git a/apps/agent/src/features/storage/s3.rs b/apps/agent/src/features/storage/s3.rs new file mode 100644 index 0000000..68e2f37 --- /dev/null +++ b/apps/agent/src/features/storage/s3.rs @@ -0,0 +1,93 @@ +//! Thin async wrapper over aws-sdk-s3 for the backup pipeline. + +use aws_credential_types::Credentials; +use aws_sdk_s3::{ + config::{Builder, Region}, + error::SdkError, + operation::head_object::HeadObjectError, + Client, +}; +use std::time::Duration; + +#[derive(Clone)] +pub struct BackupTargetConfig { + pub endpoint: String, + pub region: Option, + pub bucket: String, + pub prefix: String, + pub access_key_id: String, + pub secret_access_key: String, +} + +pub fn make_client(cfg: &BackupTargetConfig) -> Client { + let creds = Credentials::new( + cfg.access_key_id.clone(), + cfg.secret_access_key.clone(), + None, + None, + "nqrust-backup", + ); + let region = Region::new(cfg.region.clone().unwrap_or_else(|| "us-east-1".into())); + let cfg_built = Builder::new() + .behavior_version_latest() + .endpoint_url(&cfg.endpoint) + .credentials_provider(creds) + .region(region) + .force_path_style(true) + .timeout_config( + aws_sdk_s3::config::timeout::TimeoutConfig::builder() + .operation_timeout(Duration::from_secs(120)) + .build(), + ) + .build(); + Client::from_conf(cfg_built) +} + +#[derive(Debug, thiserror::Error)] +pub enum S3Error { + #[error("s3: {0}")] + Other(String), +} + +pub async fn head_object(client: &Client, bucket: &str, key: &str) -> Result { + match client.head_object().bucket(bucket).key(key).send().await { + Ok(_) => Ok(true), + Err(SdkError::ServiceError(svc)) if matches!(svc.err(), HeadObjectError::NotFound(_)) => { + Ok(false) + } + Err(e) => Err(S3Error::Other(format!("head: {e}"))), + } +} + +pub async fn put_object( + client: &Client, + bucket: &str, + key: &str, + body: Vec, +) -> Result<(), S3Error> { + client + .put_object() + .bucket(bucket) + .key(key) + .body(body.into()) + .send() + .await + .map_err(|e| S3Error::Other(format!("put: {e}")))?; + Ok(()) +} + +pub async fn get_object(client: &Client, bucket: &str, key: &str) -> Result, S3Error> { + let resp = client + .get_object() + .bucket(bucket) + .key(key) + .send() + .await + .map_err(|e| S3Error::Other(format!("get: {e}")))?; + let body = resp + .body + .collect() + .await + .map_err(|e| S3Error::Other(format!("get body: {e}")))?; + Ok(body.into_bytes().to_vec()) +} From 6aa566bfb285d644e61df96cb676ea1475a5ae98 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:57:02 +0700 Subject: [PATCH 18/37] feat(backup): agent backup + restore pipeline + HTTP routes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add FastCDC→encrypt→HEAD-or-PUT backup pipeline (backup.rs), extend routes.rs with POST /v1/storage/backup and /v1/storage/restore handlers, and add blake3 dep to agent for direct hashing in the pipeline. Co-Authored-By: Claude Sonnet 4.6 --- apps/agent/Cargo.toml | 1 + apps/agent/src/features/storage/backup.rs | 174 ++++++++++++++++++++++ apps/agent/src/features/storage/mod.rs | 1 + apps/agent/src/features/storage/routes.rs | 144 ++++++++++++++++++ 4 files changed, 320 insertions(+) create mode 100644 apps/agent/src/features/storage/backup.rs diff --git a/apps/agent/Cargo.toml b/apps/agent/Cargo.toml index 64f8fa4..e2f33bd 100644 --- a/apps/agent/Cargo.toml +++ b/apps/agent/Cargo.toml @@ -34,6 +34,7 @@ aws-credential-types = "1" aws-config = { version = "1", default-features = false, features = ["rustls", "rt-tokio"] } aws-types = "1" hex = "0.4" +blake3 = "1" [dev-dependencies] tempfile = "3" diff --git a/apps/agent/src/features/storage/backup.rs b/apps/agent/src/features/storage/backup.rs new file mode 100644 index 0000000..03173f4 --- /dev/null +++ b/apps/agent/src/features/storage/backup.rs @@ -0,0 +1,174 @@ +//! The chunker pipeline: read snapshot bytes → FastCDC → encrypt → HEAD-or-PUT. + +use crate::features::storage::registry::HostBackendRegistry; +use crate::features::storage::s3::{self, BackupTargetConfig}; +use anyhow::{Context, Result}; +use chrono::Utc; +use nexus_backup::{ + chunk_object_key, decrypt_chunk, decrypt_manifest, encrypt_chunk, encrypt_manifest, + manifest_object_key, ChunkKey, ChunkRef, Chunker, ChunkerParams, Manifest, MANIFEST_VERSION, +}; +use nexus_storage::{AttachedPath, VolumeHandle, VolumeSnapshotHandle}; +use std::sync::Arc; +use std::time::Instant; +use tokio::io::{AsyncSeekExt, AsyncWriteExt}; +use uuid::Uuid; + +pub struct BackupParams { + pub backup_id: Uuid, + pub snapshot: VolumeSnapshotHandle, + pub target: BackupTargetConfig, + pub encryption_key: [u8; 32], + pub chunker_params: ChunkerParams, +} + +pub struct BackupOutcome { + pub manifest_object_key: String, + pub chunk_count: u64, + pub bytes_written: u64, + pub bytes_unique: u64, + pub duration_ms: u64, +} + +pub async fn run_backup( + registry: Arc, + params: BackupParams, +) -> Result { + let start = Instant::now(); + let backend = registry + .get(params.snapshot.backend_kind) + .ok_or_else(|| { + anyhow::anyhow!( + "no host backend for kind {:?}", + params.snapshot.backend_kind + ) + })? + .clone(); + + let mut reader = backend + .read_snapshot(¶ms.snapshot) + .await + .context("read_snapshot")?; + let mut chunker = Chunker::new(&mut reader, params.chunker_params); + let s3 = s3::make_client(¶ms.target); + let key = ChunkKey::from_bytes(params.encryption_key); + + let mut chunks: Vec = Vec::new(); + let mut bytes_written: u64 = 0; + let mut bytes_unique: u64 = 0; + let mut total_plaintext: u64 = 0; + + while let Some(chunk) = chunker.next_chunk().await? { + let plaintext_hash: [u8; 32] = *blake3::hash(&chunk.plaintext_bytes).as_bytes(); + let ciphertext = encrypt_chunk(&key, &chunk.plaintext_bytes).context("encrypt_chunk")?; + let chunk_id: [u8; 32] = *blake3::hash(&ciphertext).as_bytes(); + let object_key = chunk_object_key(¶ms.target.prefix, &chunk_id); + + let exists = s3::head_object(&s3, ¶ms.target.bucket, &object_key) + .await + .context("HEAD chunk")?; + bytes_written += ciphertext.len() as u64; + if !exists { + let cipher_len = ciphertext.len() as u64; + s3::put_object( + &s3, + ¶ms.target.bucket, + &object_key, + ciphertext.clone(), + ) + .await + .context("PUT chunk")?; + bytes_unique += cipher_len; + } + + chunks.push(ChunkRef { + plaintext_offset: chunk.plaintext_offset, + plaintext_length: chunk.plaintext_length, + plaintext_hash, + chunk_id, + ciphertext_length: ciphertext.len() as u32, + }); + total_plaintext += chunk.plaintext_length as u64; + } + + let manifest = Manifest { + version: MANIFEST_VERSION, + backup_id: params.backup_id, + source_volume_id: params.snapshot.source_volume_id, + source_snapshot_id: Some(params.snapshot.snapshot_id), + total_plaintext_size: total_plaintext, + created_at_unix_seconds: Utc::now().timestamp(), + chunks: chunks.clone(), + }; + let manifest_compressed = manifest + .serialize_compressed() + .context("manifest serialize")?; + let manifest_blob = encrypt_manifest(&key, &manifest_compressed).context("encrypt manifest")?; + let mkey = manifest_object_key(¶ms.target.prefix, ¶ms.backup_id); + s3::put_object(&s3, ¶ms.target.bucket, &mkey, manifest_blob) + .await + .context("PUT manifest")?; + + Ok(BackupOutcome { + manifest_object_key: mkey, + chunk_count: chunks.len() as u64, + bytes_written, + bytes_unique, + duration_ms: start.elapsed().as_millis() as u64, + }) +} + +pub struct RestoreParams { + pub target_volume: VolumeHandle, + pub target_attached: AttachedPath, + pub manifest_object_key: String, + pub target: BackupTargetConfig, + pub encryption_key: [u8; 32], +} + +pub struct RestoreOutcome { + pub bytes_written: u64, + pub duration_ms: u64, +} + +pub async fn run_restore(params: RestoreParams) -> Result { + let start = Instant::now(); + let s3 = s3::make_client(¶ms.target); + let key = ChunkKey::from_bytes(params.encryption_key); + + let blob = s3::get_object(&s3, ¶ms.target.bucket, ¶ms.manifest_object_key) + .await + .context("GET manifest")?; + let compressed = decrypt_manifest(&key, &blob).context("decrypt manifest")?; + let manifest = + Manifest::deserialize_compressed(&compressed).context("deserialize manifest")?; + + let mut dst = tokio::fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(false) + .open(params.target_attached.path()) + .await?; + + let mut bytes_written: u64 = 0; + for chunk_ref in &manifest.chunks { + let object_key = chunk_object_key(¶ms.target.prefix, &chunk_ref.chunk_id); + let ciphertext = s3::get_object(&s3, ¶ms.target.bucket, &object_key) + .await + .with_context(|| format!("GET chunk {}", hex::encode(chunk_ref.chunk_id)))?; + let plaintext = decrypt_chunk(&key, &ciphertext, &chunk_ref.plaintext_hash) + .context("decrypt chunk")?; + dst.seek(std::io::SeekFrom::Start(chunk_ref.plaintext_offset)) + .await?; + dst.write_all(&plaintext).await?; + bytes_written += plaintext.len() as u64; + } + dst.flush().await?; + + let _ = params.target_volume; // suppress unused warning; kept for future logging + + Ok(RestoreOutcome { + bytes_written, + duration_ms: start.elapsed().as_millis() as u64, + }) +} diff --git a/apps/agent/src/features/storage/mod.rs b/apps/agent/src/features/storage/mod.rs index 5bd67ae..8d12ecc 100644 --- a/apps/agent/src/features/storage/mod.rs +++ b/apps/agent/src/features/storage/mod.rs @@ -1,3 +1,4 @@ +pub mod backup; pub mod iscsi; pub mod local_file; pub mod registry; diff --git a/apps/agent/src/features/storage/routes.rs b/apps/agent/src/features/storage/routes.rs index 778d246..d99a26f 100644 --- a/apps/agent/src/features/storage/routes.rs +++ b/apps/agent/src/features/storage/routes.rs @@ -1,4 +1,6 @@ +use crate::features::storage::backup::{run_backup, run_restore, BackupParams, RestoreParams}; use crate::features::storage::registry::HostBackendRegistry; +use crate::features::storage::s3::BackupTargetConfig as S3Config; use axum::{ extract::State, http::StatusCode, @@ -6,6 +8,7 @@ use axum::{ routing::{get, post}, Json, Router, }; +use nexus_backup::ChunkerParams as NexusChunkerParams; use nexus_storage::{AttachedPath, BackendKind, VolumeHandle}; use serde::{Deserialize, Serialize}; use std::path::PathBuf; @@ -162,6 +165,145 @@ pub async fn supported_kinds(State(s): State>) -> impl IntoRes (StatusCode::OK, Json(serde_json::json!({"kinds": kinds}))).into_response() } +#[derive(Deserialize)] +pub struct BackupReq { + pub backup_id: uuid::Uuid, + pub snapshot: nexus_storage::VolumeSnapshotHandle, + #[allow(dead_code)] // wire field; backend_kind is read from snapshot.backend_kind + pub backend_kind: nexus_storage::BackendKind, + pub target: BackupTargetWire, + pub encryption_key: [u8; 32], + pub chunker_params: ChunkerParamsWire, +} + +#[derive(Deserialize)] +pub struct BackupTargetWire { + pub endpoint: String, + #[serde(default)] + pub region: Option, + pub bucket: String, + #[serde(default)] + pub prefix: String, + pub access_key_id: String, + pub secret_access_key: String, +} + +#[derive(Deserialize)] +pub struct ChunkerParamsWire { + pub min_size: u32, + pub avg_size: u32, + pub max_size: u32, +} + +#[derive(Serialize)] +pub struct BackupRespWire { + pub manifest_object_key: String, + pub chunk_count: u64, + pub bytes_written: u64, + pub bytes_unique: u64, + pub duration_ms: u64, +} + +pub async fn backup( + State(s): State>, + Json(req): Json, +) -> impl IntoResponse { + let target = S3Config { + endpoint: req.target.endpoint, + region: req.target.region, + bucket: req.target.bucket, + prefix: req.target.prefix, + access_key_id: req.target.access_key_id, + secret_access_key: req.target.secret_access_key, + }; + let params = BackupParams { + backup_id: req.backup_id, + snapshot: req.snapshot, + target, + encryption_key: req.encryption_key, + chunker_params: NexusChunkerParams { + min_size: req.chunker_params.min_size, + avg_size: req.chunker_params.avg_size, + max_size: req.chunker_params.max_size, + }, + }; + match run_backup(Arc::new(s.registry.clone()), params).await { + Ok(o) => ( + StatusCode::OK, + Json(BackupRespWire { + manifest_object_key: o.manifest_object_key, + chunk_count: o.chunk_count, + bytes_written: o.bytes_written, + bytes_unique: o.bytes_unique, + duration_ms: o.duration_ms, + }), + ) + .into_response(), + Err(e) => { + tracing::error!("agent backup failed: {e:#}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error": e.to_string()})), + ) + .into_response() + } + } +} + +#[derive(Deserialize)] +pub struct RestoreReq { + pub target_volume: nexus_storage::VolumeHandle, + pub target_attached: nexus_storage::AttachedPath, + pub manifest_object_key: String, + pub target: BackupTargetWire, + pub encryption_key: [u8; 32], +} + +#[derive(Serialize)] +pub struct RestoreRespWire { + pub bytes_written: u64, + pub duration_ms: u64, +} + +pub async fn restore( + State(_s): State>, + Json(req): Json, +) -> impl IntoResponse { + let target = S3Config { + endpoint: req.target.endpoint, + region: req.target.region, + bucket: req.target.bucket, + prefix: req.target.prefix, + access_key_id: req.target.access_key_id, + secret_access_key: req.target.secret_access_key, + }; + let params = RestoreParams { + target_volume: req.target_volume, + target_attached: req.target_attached, + manifest_object_key: req.manifest_object_key, + target, + encryption_key: req.encryption_key, + }; + match run_restore(params).await { + Ok(o) => ( + StatusCode::OK, + Json(RestoreRespWire { + bytes_written: o.bytes_written, + duration_ms: o.duration_ms, + }), + ) + .into_response(), + Err(e) => { + tracing::error!("agent restore failed: {e:#}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error": e.to_string()})), + ) + .into_response() + } + } +} + pub fn router(state: Arc) -> Router { Router::new() .route("/attach", post(attach)) @@ -169,5 +311,7 @@ pub fn router(state: Arc) -> Router { .route("/populate", post(populate)) .route("/resize2fs", post(resize2fs)) .route("/supported_kinds", get(supported_kinds)) + .route("/backup", post(backup)) + .route("/restore", post(restore)) .with_state(state) } From fcffebc61f4abc510a2e7f0569738125ee53088a Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 11:59:30 +0700 Subject: [PATCH 19/37] chore: cargo fmt + lock update after B.T15 Co-Authored-By: Claude Sonnet 4.6 --- Cargo.lock | 1 + apps/agent/src/features/storage/backup.rs | 18 ++++++------------ 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0980c64..6015688 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -63,6 +63,7 @@ dependencies = [ "aws-sdk-s3", "aws-types", "axum", + "blake3", "bytes", "chrono", "futures", diff --git a/apps/agent/src/features/storage/backup.rs b/apps/agent/src/features/storage/backup.rs index 03173f4..9dbfc67 100644 --- a/apps/agent/src/features/storage/backup.rs +++ b/apps/agent/src/features/storage/backup.rs @@ -70,14 +70,9 @@ pub async fn run_backup( bytes_written += ciphertext.len() as u64; if !exists { let cipher_len = ciphertext.len() as u64; - s3::put_object( - &s3, - ¶ms.target.bucket, - &object_key, - ciphertext.clone(), - ) - .await - .context("PUT chunk")?; + s3::put_object(&s3, ¶ms.target.bucket, &object_key, ciphertext.clone()) + .await + .context("PUT chunk")?; bytes_unique += cipher_len; } @@ -140,8 +135,7 @@ pub async fn run_restore(params: RestoreParams) -> Result { .await .context("GET manifest")?; let compressed = decrypt_manifest(&key, &blob).context("decrypt manifest")?; - let manifest = - Manifest::deserialize_compressed(&compressed).context("deserialize manifest")?; + let manifest = Manifest::deserialize_compressed(&compressed).context("deserialize manifest")?; let mut dst = tokio::fs::OpenOptions::new() .write(true) @@ -156,8 +150,8 @@ pub async fn run_restore(params: RestoreParams) -> Result { let ciphertext = s3::get_object(&s3, ¶ms.target.bucket, &object_key) .await .with_context(|| format!("GET chunk {}", hex::encode(chunk_ref.chunk_id)))?; - let plaintext = decrypt_chunk(&key, &ciphertext, &chunk_ref.plaintext_hash) - .context("decrypt chunk")?; + let plaintext = + decrypt_chunk(&key, &ciphertext, &chunk_ref.plaintext_hash).context("decrypt chunk")?; dst.seek(std::io::SeekFrom::Start(chunk_ref.plaintext_offset)) .await?; dst.write_all(&plaintext).await?; From e4abe6dd77c9db9a7f4763dfa2e05149cb51f3e7 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:06:56 +0700 Subject: [PATCH 20/37] feat(backup): BackupRepository for DB access Adds BackupRepository with insert_running, mark_completed, mark_failed, get, list_for_volume, list_completed_oldest_first, delete_row, and list_stale_running (for future GC). Co-Authored-By: Claude Sonnet 4.6 --- apps/manager/src/features/backups/repo.rs | 144 ++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 apps/manager/src/features/backups/repo.rs diff --git a/apps/manager/src/features/backups/repo.rs b/apps/manager/src/features/backups/repo.rs new file mode 100644 index 0000000..8488370 --- /dev/null +++ b/apps/manager/src/features/backups/repo.rs @@ -0,0 +1,144 @@ +use chrono::{DateTime, Utc}; +use sqlx::PgPool; +use uuid::Uuid; + +#[derive(Clone)] +pub struct BackupRepository { + pool: PgPool, +} + +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct BackupRow { + pub id: Uuid, + pub source_volume_id: Option, + #[allow(dead_code)] + pub source_snapshot_id: Option, + pub target_id: Uuid, + pub manifest_object_key: Option, + pub size_bytes: i64, + pub unique_bytes: i64, + pub chunk_count: i64, + pub status: String, + pub error_message: Option, + pub created_at: DateTime, + pub completed_at: Option>, + #[allow(dead_code)] + pub updated_at: DateTime, +} + +impl BackupRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } + + pub async fn insert_running( + &self, + source_volume_id: Uuid, + source_snapshot_id: Uuid, + target_id: Uuid, + ) -> sqlx::Result { + sqlx::query_as::<_, BackupRow>( + r#"INSERT INTO backup (source_volume_id, source_snapshot_id, target_id, status) + VALUES ($1, $2, $3, 'running') + RETURNING *"#, + ) + .bind(source_volume_id) + .bind(source_snapshot_id) + .bind(target_id) + .fetch_one(&self.pool) + .await + } + + pub async fn mark_completed( + &self, + id: Uuid, + manifest_object_key: &str, + size_bytes: i64, + unique_bytes: i64, + chunk_count: i64, + ) -> sqlx::Result<()> { + sqlx::query( + r#"UPDATE backup + SET status = 'completed', + manifest_object_key = $1, + size_bytes = $2, + unique_bytes = $3, + chunk_count = $4, + completed_at = now(), + updated_at = now() + WHERE id = $5"#, + ) + .bind(manifest_object_key) + .bind(size_bytes) + .bind(unique_bytes) + .bind(chunk_count) + .bind(id) + .execute(&self.pool) + .await?; + Ok(()) + } + + pub async fn mark_failed(&self, id: Uuid, error: &str) -> sqlx::Result<()> { + sqlx::query( + r#"UPDATE backup + SET status = 'failed', error_message = $1, updated_at = now() + WHERE id = $2"#, + ) + .bind(error) + .bind(id) + .execute(&self.pool) + .await?; + Ok(()) + } + + pub async fn get(&self, id: Uuid) -> sqlx::Result> { + sqlx::query_as::<_, BackupRow>(r#"SELECT * FROM backup WHERE id = $1"#) + .bind(id) + .fetch_optional(&self.pool) + .await + } + + pub async fn list_for_volume(&self, volume_id: Uuid) -> sqlx::Result> { + sqlx::query_as::<_, BackupRow>( + r#"SELECT * FROM backup WHERE source_volume_id = $1 ORDER BY created_at DESC"#, + ) + .bind(volume_id) + .fetch_all(&self.pool) + .await + } + + pub async fn list_completed_oldest_first( + &self, + volume_id: Uuid, + ) -> sqlx::Result> { + sqlx::query_as::<_, BackupRow>( + r#"SELECT * FROM backup WHERE source_volume_id = $1 AND status = 'completed' ORDER BY created_at ASC"#, + ) + .bind(volume_id) + .fetch_all(&self.pool) + .await + } + + pub async fn delete_row(&self, id: Uuid) -> sqlx::Result<()> { + sqlx::query("DELETE FROM backup WHERE id = $1") + .bind(id) + .execute(&self.pool) + .await?; + Ok(()) + } + + #[allow(dead_code)] + pub async fn list_stale_running( + &self, + older_than_minutes: i64, + ) -> sqlx::Result> { + sqlx::query_as::<_, BackupRow>( + r#"SELECT * FROM backup + WHERE status = 'running' + AND updated_at < now() - make_interval(mins => $1)"#, + ) + .bind(older_than_minutes) + .fetch_all(&self.pool) + .await + } +} From 9b9122bd4a4ca319791ba8a20931dda5d0f4b67a Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:07:05 +0700 Subject: [PATCH 21/37] feat(backup): manager backup service (create + restore + retention) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Orchestrates snapshot → agent_backup → DB update + S3 manifest pruning (enforce_retention). Restore provisions a new volume via the target backend, delegates to agent_restore, and records the resulting volume. Removes #[allow(dead_code)] from agent_backup/agent_restore now that they are called by the service. Co-Authored-By: Claude Sonnet 4.6 --- apps/manager/src/features/backups/service.rs | 270 ++++++++++++++++++ .../manager/src/features/storage/agent_rpc.rs | 2 - 2 files changed, 270 insertions(+), 2 deletions(-) create mode 100644 apps/manager/src/features/backups/service.rs diff --git a/apps/manager/src/features/backups/service.rs b/apps/manager/src/features/backups/service.rs new file mode 100644 index 0000000..c2ddc22 --- /dev/null +++ b/apps/manager/src/features/backups/service.rs @@ -0,0 +1,270 @@ +use crate::features::backup_targets::envelope; +use crate::features::backup_targets::repo::BackupTargetRepository; +use crate::features::backups::repo::BackupRepository; +use crate::features::backups::types::{BackupReq, BackupTargetConfig, ChunkerParams, RestoreReq}; +use crate::features::storage::agent_rpc; +use crate::features::volumes::repo::VolumeRepository; +use crate::AppState; +use anyhow::{anyhow, Context, Result}; +use nexus_storage::{BackendInstanceId, VolumeHandle}; +use uuid::Uuid; + +pub async fn create_backup(st: &AppState, volume_id: Uuid, target_id: Uuid) -> Result { + let backup_repo = BackupRepository::new(st.db.clone()); + let target_repo = BackupTargetRepository::new(st.db.clone()); + let target_row = target_repo + .get(target_id) + .await? + .ok_or_else(|| anyhow!("target {target_id} not found"))?; + + // Resolve volume → backend → host. + let vol: (Uuid, String, i64, Option, Uuid) = sqlx::query_as( + r#"SELECT v.id, v.path, v.size_bytes, v.host_id, v.backend_id FROM volume v WHERE v.id = $1"#, + ) + .bind(volume_id) + .fetch_one(&st.db) + .await + .context("looking up volume")?; + let (vol_id, locator, size_bytes, host_id_opt, backend_id) = + (vol.0, vol.1, vol.2, vol.3, vol.4); + + let backend = st + .registry + .get(backend_id) + .ok_or_else(|| anyhow!("registry has no backend with id {backend_id}"))? + .clone(); + let host_id = host_id_opt.ok_or_else(|| { + anyhow!("volume has no home host (network-attached not supported by backup service yet)") + })?; + let host = st.hosts.get(host_id).await.context("getting host row")?; + + let volume_handle = VolumeHandle { + volume_id: vol_id, + backend_id: BackendInstanceId(backend_id), + backend_kind: backend.kind(), + locator, + size_bytes: size_bytes as u64, + }; + let snap_name = format!("backup-{}", Uuid::new_v4()); + let snap = backend + .snapshot(&volume_handle, &snap_name) + .await + .context("control-plane snapshot")?; + + let backup_row = backup_repo + .insert_running(volume_id, snap.snapshot_id, target_id) + .await?; + + let secret_access_key = envelope::unwrap_to_string(&target_row.encrypted_secret_access_key) + .context("decrypt secret_access_key")?; + let target_key = envelope::unwrap_to_array::<32>(&target_row.encrypted_target_key) + .context("decrypt target_key")?; + + let target_config = BackupTargetConfig { + endpoint: target_row.endpoint.clone(), + region: target_row.region.clone(), + bucket: target_row.bucket.clone(), + prefix: target_row.prefix.clone(), + access_key_id: target_row.access_key_id.clone(), + secret_access_key, + }; + + let req = BackupReq { + backup_id: backup_row.id, + snapshot: snap.clone(), + backend_kind: backend.kind(), + target: target_config, + encryption_key: target_key, + chunker_params: ChunkerParams::default(), + }; + + match agent_rpc::agent_backup(&host.addr, req).await { + Ok(resp) => { + backup_repo + .mark_completed( + backup_row.id, + &resp.manifest_object_key, + resp.bytes_written as i64, + resp.bytes_unique as i64, + resp.chunk_count as i64, + ) + .await?; + let _ = backend.delete_snapshot(snap).await; + let _ = enforce_retention(st, volume_id, &backup_repo).await; + Ok(backup_row.id) + } + Err(e) => { + backup_repo + .mark_failed(backup_row.id, &format!("{e:#}")) + .await?; + let _ = backend.delete_snapshot(snap).await; + Err(e) + } + } +} + +async fn enforce_retention( + st: &AppState, + volume_id: Uuid, + backup_repo: &BackupRepository, +) -> Result<()> { + let retain: Option = + sqlx::query_scalar(r#"SELECT backup_retain_count FROM volume WHERE id = $1"#) + .bind(volume_id) + .fetch_one(&st.db) + .await?; + let Some(retain) = retain else { + return Ok(()); + }; + if retain <= 0 { + return Ok(()); + } + + let mut completed = backup_repo.list_completed_oldest_first(volume_id).await?; + while completed.len() as i32 > retain { + let oldest = completed.remove(0); + // Best-effort delete the manifest from S3. + if let Some(mkey) = oldest.manifest_object_key.as_deref() { + if let Some(t) = BackupTargetRepository::new(st.db.clone()) + .get(oldest.target_id) + .await + .ok() + .flatten() + { + if let Ok(secret) = envelope::unwrap_to_string(&t.encrypted_secret_access_key) { + let creds = aws_credential_types::Credentials::new( + &t.access_key_id, + &secret, + None, + None, + "nqrust-mgr-prune", + ); + let region = aws_sdk_s3::config::Region::new( + t.region.clone().unwrap_or_else(|| "us-east-1".into()), + ); + let s3_cfg = aws_sdk_s3::config::Builder::new() + .behavior_version_latest() + .endpoint_url(&t.endpoint) + .credentials_provider(creds) + .region(region) + .force_path_style(true) + .build(); + let client = aws_sdk_s3::Client::from_conf(s3_cfg); + let _ = client + .delete_object() + .bucket(&t.bucket) + .key(mkey) + .send() + .await; + } + } + } + backup_repo.delete_row(oldest.id).await?; + } + Ok(()) +} + +pub async fn restore_backup( + st: &AppState, + backup_id: Uuid, + target_backend_id: Uuid, +) -> Result { + let backup_repo = BackupRepository::new(st.db.clone()); + let target_repo = BackupTargetRepository::new(st.db.clone()); + let backup = backup_repo + .get(backup_id) + .await? + .ok_or_else(|| anyhow!("backup {backup_id} not found"))?; + if backup.status != "completed" { + return Err(anyhow!( + "backup is in status '{}', expected 'completed'", + backup.status + )); + } + let manifest_key = backup + .manifest_object_key + .ok_or_else(|| anyhow!("backup has no manifest_object_key"))?; + let target_row = target_repo + .get(backup.target_id) + .await? + .ok_or_else(|| anyhow!("target {} no longer exists", backup.target_id))?; + + let backend = st + .registry + .get(target_backend_id) + .ok_or_else(|| anyhow!("registry has no backend {target_backend_id}"))? + .clone(); + let new_volume = backend + .provision(nexus_storage::CreateOpts { + name: format!("restore-{}", backup_id), + size_bytes: backup.size_bytes as u64, + description: Some(format!("restored from backup {backup_id}")), + }) + .await + .context("provision restore target")?; + + let kind_str = backend.kind().as_db_str(); + // Pick a host that supports the chosen backend. + let candidate_host_id: Option = { + let active = st.hosts.list_healthy().await?; + let mut chosen = None; + for h in active { + let kinds = st + .hosts + .supported_backend_kinds(h.id) + .await + .unwrap_or_default(); + if kinds.is_empty() || kinds.iter().any(|k| k == kind_str) { + chosen = Some(h.id); + break; + } + } + chosen + }; + let host_id = + candidate_host_id.ok_or_else(|| anyhow!("no host supports backend kind '{kind_str}'"))?; + let host = st.hosts.get(host_id).await?; + + let attached = agent_rpc::agent_attach(&host.addr, &new_volume).await?; + + let secret = envelope::unwrap_to_string(&target_row.encrypted_secret_access_key)?; + let target_key = envelope::unwrap_to_array::<32>(&target_row.encrypted_target_key)?; + + let req = RestoreReq { + target_volume: new_volume.clone(), + target_attached: attached.clone(), + manifest_object_key: manifest_key.clone(), + target: BackupTargetConfig { + endpoint: target_row.endpoint, + region: target_row.region, + bucket: target_row.bucket, + prefix: target_row.prefix, + access_key_id: target_row.access_key_id, + secret_access_key: secret, + }, + encryption_key: target_key, + }; + + match agent_rpc::agent_restore(&host.addr, req).await { + Ok(_) => { + let volume_repo = VolumeRepository::new(st.db.clone()); + volume_repo + .create( + &format!("restore-{}", backup_id), + Some(&format!("Restored from backup {backup_id}")), + &new_volume.locator, + new_volume.size_bytes as i64, + "raw", + Some(host_id), + target_backend_id, + ) + .await?; + Ok(new_volume.volume_id) + } + Err(e) => { + let _ = agent_rpc::agent_detach(&host.addr, &new_volume, &attached).await; + let _ = backend.destroy(new_volume.clone()).await; + Err(e) + } + } +} diff --git a/apps/manager/src/features/storage/agent_rpc.rs b/apps/manager/src/features/storage/agent_rpc.rs index 2ae9ecc..95e776c 100644 --- a/apps/manager/src/features/storage/agent_rpc.rs +++ b/apps/manager/src/features/storage/agent_rpc.rs @@ -116,7 +116,6 @@ pub async fn agent_resize2fs(host_addr: &str, attached: &AttachedPath) -> Result Ok(()) } -#[allow(dead_code)] pub async fn agent_backup(host_addr: &str, req: BackupReq) -> Result { let resp = Client::new() .post(agent_url(host_addr, "/v1/storage/backup")) @@ -132,7 +131,6 @@ pub async fn agent_backup(host_addr: &str, req: BackupReq) -> Result Ok(resp.json::().await?) } -#[allow(dead_code)] pub async fn agent_restore(host_addr: &str, req: RestoreReq) -> Result { let resp = Client::new() .post(agent_url(host_addr, "/v1/storage/restore")) From 7cfb3b6705103cd3ae781634e6ccae10f9b702b7 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:07:11 +0700 Subject: [PATCH 22/37] feat(backup): /v1/backups CRUD + /v1/volumes/:id/backup endpoints Wires GET/DELETE /v1/backups, GET /v1/backups/:id, POST /v1/backups/:id/restore, and POST /v1/volumes/:id/backup into the feature router. Co-Authored-By: Claude Sonnet 4.6 --- apps/manager/src/features/backups/mod.rs | 5 + apps/manager/src/features/backups/routes.rs | 171 ++++++++++++++++++++ apps/manager/src/features/mod.rs | 2 + 3 files changed, 178 insertions(+) create mode 100644 apps/manager/src/features/backups/routes.rs diff --git a/apps/manager/src/features/backups/mod.rs b/apps/manager/src/features/backups/mod.rs index cd40856..135a140 100644 --- a/apps/manager/src/features/backups/mod.rs +++ b/apps/manager/src/features/backups/mod.rs @@ -1 +1,6 @@ +pub mod repo; +pub mod routes; +pub mod service; pub mod types; + +pub use routes::{router, volume_backup_router}; diff --git a/apps/manager/src/features/backups/routes.rs b/apps/manager/src/features/backups/routes.rs new file mode 100644 index 0000000..ea9c195 --- /dev/null +++ b/apps/manager/src/features/backups/routes.rs @@ -0,0 +1,171 @@ +use crate::features::backups::repo::{BackupRepository, BackupRow}; +use crate::features::backups::service; +use crate::AppState; +use axum::{ + extract::{Path, Query}, + http::StatusCode, + response::IntoResponse, + routing::{get, post}, + Extension, Json, Router, +}; +use nexus_types::{Backup, BackupStatus, CreateBackupRequest, RestoreRequest}; +use serde::Deserialize; +use uuid::Uuid; + +fn row_to_wire(row: BackupRow) -> Backup { + Backup { + id: row.id, + source_volume_id: row.source_volume_id, + target_id: row.target_id, + size_bytes: row.size_bytes, + unique_bytes: row.unique_bytes, + chunk_count: row.chunk_count, + status: match row.status.as_str() { + "running" => BackupStatus::Running, + "completed" => BackupStatus::Completed, + "failed" => BackupStatus::Failed, + "pruning" => BackupStatus::Pruning, + _ => BackupStatus::Failed, + }, + error_message: row.error_message, + created_at: row.created_at, + completed_at: row.completed_at, + } +} + +#[derive(Deserialize)] +pub struct ListQuery { + pub volume_id: Option, +} + +pub async fn list( + Extension(st): Extension, + Query(q): Query, +) -> impl IntoResponse { + let repo = BackupRepository::new(st.db.clone()); + let rows = if let Some(vid) = q.volume_id { + repo.list_for_volume(vid).await + } else { + sqlx::query_as::<_, BackupRow>(r#"SELECT * FROM backup ORDER BY created_at DESC LIMIT 200"#) + .fetch_all(&st.db) + .await + }; + match rows { + Ok(rs) => ( + StatusCode::OK, + Json(serde_json::json!({ + "items": rs.into_iter().map(row_to_wire).collect::>(), + })), + ) + .into_response(), + Err(e) => { + tracing::error!("backups list: {e}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response() + } + } +} + +pub async fn get_one( + Extension(st): Extension, + Path(id): Path, +) -> impl IntoResponse { + let repo = BackupRepository::new(st.db.clone()); + match repo.get(id).await { + Ok(Some(row)) => (StatusCode::OK, Json(row_to_wire(row))).into_response(), + Ok(None) => ( + StatusCode::NOT_FOUND, + Json(serde_json::json!({"error":"not found"})), + ) + .into_response(), + Err(e) => { + tracing::error!("backups get: {e}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response() + } + } +} + +pub async fn create_for_volume( + Extension(st): Extension, + Path(volume_id): Path, + Json(req): Json, +) -> impl IntoResponse { + match service::create_backup(&st, volume_id, req.target_id).await { + Ok(id) => ( + StatusCode::CREATED, + Json(serde_json::json!({"backup_id": id})), + ) + .into_response(), + Err(e) => { + tracing::error!("create_backup: {e:#}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error": e.to_string()})), + ) + .into_response() + } + } +} + +pub async fn restore( + Extension(st): Extension, + Path(backup_id): Path, + Json(req): Json, +) -> impl IntoResponse { + match service::restore_backup(&st, backup_id, req.target_backend_id).await { + Ok(volume_id) => ( + StatusCode::CREATED, + Json(serde_json::json!({"volume_id": volume_id})), + ) + .into_response(), + Err(e) => { + tracing::error!("restore_backup: {e:#}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error": e.to_string()})), + ) + .into_response() + } + } +} + +pub async fn delete_one( + Extension(st): Extension, + Path(id): Path, +) -> impl IntoResponse { + let repo = BackupRepository::new(st.db.clone()); + sqlx::query(r#"UPDATE backup SET status = 'pruning', updated_at = now() WHERE id = $1"#) + .bind(id) + .execute(&st.db) + .await + .ok(); + match repo.delete_row(id).await { + Ok(()) => (StatusCode::NO_CONTENT, ()).into_response(), + Err(e) => { + tracing::error!("backups delete: {e}"); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response() + } + } +} + +pub fn router() -> Router { + Router::new() + .route("/", get(list)) + .route("/:id", get(get_one).delete(delete_one)) + .route("/:id/restore", post(restore)) +} + +pub fn volume_backup_router() -> Router { + Router::new().route("/", post(create_for_volume)) +} diff --git a/apps/manager/src/features/mod.rs b/apps/manager/src/features/mod.rs index 2e02185..5efcd0b 100644 --- a/apps/manager/src/features/mod.rs +++ b/apps/manager/src/features/mod.rs @@ -90,6 +90,8 @@ pub fn router(state: AppState) -> Router { .nest("/v1/volumes", volumes::router()) .nest("/v1/storage_backends", storage_backends::router()) .nest("/v1/backup_targets", backup_targets::router()) + .nest("/v1/backups", backups::router()) + .nest("/v1/volumes/:id/backup", backups::volume_backup_router()) // SSO public routes (no auth — these ARE the auth flow) .nest("/v1/sso", sso::public_router()) // SSO admin routes (auth + admin required) From b4de2a4bd8475652ef18e915b621654223d3d2ab Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:15:17 +0700 Subject: [PATCH 23/37] feat(backup): daily mark-and-sweep GC per target Add gc.rs with gc_loop (wakes once per hour, runs at each target's configured gc_hour) and run_gc (records to backup_gc_run table). Wire POST /v1/backup_targets/:id/gc for ad-hoc trigger. Spawn gc_loop from main.rs after AppState construction. Co-Authored-By: Claude Sonnet 4.6 --- .../src/features/backup_targets/routes.rs | 32 ++- apps/manager/src/features/backups/gc.rs | 227 ++++++++++++++++++ apps/manager/src/features/backups/mod.rs | 3 + apps/manager/src/main.rs | 24 ++ 4 files changed, 283 insertions(+), 3 deletions(-) create mode 100644 apps/manager/src/features/backups/gc.rs diff --git a/apps/manager/src/features/backup_targets/routes.rs b/apps/manager/src/features/backup_targets/routes.rs index 4f974a2..2717892 100644 --- a/apps/manager/src/features/backup_targets/routes.rs +++ b/apps/manager/src/features/backup_targets/routes.rs @@ -182,12 +182,38 @@ pub async fn soft_delete( } pub async fn trigger_gc( - Extension(_st): Extension, - Path(_id): Path, + Extension(st): Extension, + Path(id): Path, ) -> impl IntoResponse { - // Wired by Task B.T17 once the GC task exists. + let target_repo = + crate::features::backup_targets::repo::BackupTargetRepository::new(st.db.clone()); + let target = match target_repo.get(id).await { + Ok(Some(t)) => t, + Ok(None) => { + return ( + StatusCode::NOT_FOUND, + Json(serde_json::json!({"error":"not found"})), + ) + .into_response() + } + Err(e) => { + tracing::error!("backup_targets trigger_gc lookup: {e}"); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response(); + } + }; + let pool = st.db.clone(); + tokio::spawn(async move { + if let Err(e) = crate::features::backups::gc::run_gc(&pool, &target).await { + tracing::error!("ad-hoc GC failed: {e:#}"); + } + }); ( StatusCode::ACCEPTED, Json(serde_json::json!({"queued": true})), ) + .into_response() } diff --git a/apps/manager/src/features/backups/gc.rs b/apps/manager/src/features/backups/gc.rs new file mode 100644 index 0000000..617e2b6 --- /dev/null +++ b/apps/manager/src/features/backups/gc.rs @@ -0,0 +1,227 @@ +//! Daily mark-and-sweep GC per backup_target. + +use crate::features::backup_targets::envelope; +use crate::features::backup_targets::repo::{BackupTargetRepository, BackupTargetRow}; +use anyhow::{Context, Result}; +use aws_sdk_s3::{ + config::{Builder as S3ConfBuilder, Region}, + types::{Delete, ObjectIdentifier}, + Client, +}; +use chrono::{Timelike, Utc}; +use nexus_backup::{decrypt_manifest, ChunkKey, Manifest}; +use sqlx::PgPool; +use std::collections::HashSet; + +pub async fn gc_loop(pool: PgPool) { + loop { + let now = Utc::now(); + let next_check = (60 - now.minute()) as u64 * 60 - now.second() as u64; + tokio::time::sleep(std::time::Duration::from_secs(next_check.max(60))).await; + + let now = Utc::now(); + let repo = BackupTargetRepository::new(pool.clone()); + match repo.list_active().await { + Ok(targets) => { + for t in targets { + if (t.gc_hour as u32) == now.hour() { + if let Err(e) = run_gc(&pool, &t).await { + tracing::error!(target=%t.name, "GC run failed: {e:#}"); + } + } + } + } + Err(e) => tracing::error!("gc_loop list_active: {e}"), + } + } +} + +pub async fn run_gc(pool: &PgPool, target: &BackupTargetRow) -> Result<()> { + let run_id: uuid::Uuid = sqlx::query_scalar( + r#"INSERT INTO backup_gc_run (target_id, status) VALUES ($1, 'running') RETURNING id"#, + ) + .bind(target.id) + .fetch_one(pool) + .await?; + + let result = sweep(target).await; + match result { + Ok((bytes, chunks)) => { + sqlx::query( + r#"UPDATE backup_gc_run SET status='completed', completed_at=now(), + bytes_freed=$1, chunks_deleted=$2 WHERE id = $3"#, + ) + .bind(bytes as i64) + .bind(chunks as i64) + .bind(run_id) + .execute(pool) + .await?; + tracing::info!(target=%target.name, bytes_freed=bytes, chunks_deleted=chunks, "GC complete"); + Ok(()) + } + Err(e) => { + sqlx::query( + r#"UPDATE backup_gc_run SET status='failed', completed_at=now(), error_message=$1 WHERE id=$2"#, + ) + .bind(e.to_string()) + .bind(run_id) + .execute(pool) + .await + .ok(); + Err(e) + } + } +} + +async fn sweep(target: &BackupTargetRow) -> Result<(u64, u64)> { + let secret = envelope::unwrap_to_string(&target.encrypted_secret_access_key)?; + let target_key = envelope::unwrap_to_array::<32>(&target.encrypted_target_key)?; + + let creds = aws_credential_types::Credentials::new( + &target.access_key_id, + &secret, + None, + None, + "nqrust-gc", + ); + let region = Region::new(target.region.clone().unwrap_or_else(|| "us-east-1".into())); + let s3_cfg = S3ConfBuilder::new() + .behavior_version_latest() + .endpoint_url(&target.endpoint) + .credentials_provider(creds) + .region(region) + .force_path_style(true) + .build(); + let client = Client::from_conf(s3_cfg); + + // 1. Mark: walk manifests, collect referenced chunk ids. + let prefix_manifests = if target.prefix.is_empty() { + "manifests/".to_string() + } else { + format!("{}/manifests/", target.prefix.trim_end_matches('/')) + }; + let mut referenced: HashSet<[u8; 32]> = HashSet::new(); + let mut continuation: Option = None; + loop { + let mut req = client + .list_objects_v2() + .bucket(&target.bucket) + .prefix(&prefix_manifests); + if let Some(c) = continuation.as_deref() { + req = req.continuation_token(c); + } + let resp = req.send().await.context("LIST manifests")?; + for obj in resp.contents() { + let Some(k) = obj.key() else { continue }; + let blob = client + .get_object() + .bucket(&target.bucket) + .key(k) + .send() + .await + .with_context(|| format!("GET {k}"))? + .body + .collect() + .await + .context("body collect")? + .into_bytes() + .to_vec(); + let chunk_key = ChunkKey::from_bytes(target_key); + let compressed = decrypt_manifest(&chunk_key, &blob).context("decrypt manifest")?; + let m: Manifest = + Manifest::deserialize_compressed(&compressed).context("deserialize manifest")?; + for c in m.chunks { + referenced.insert(c.chunk_id); + } + } + if resp.is_truncated().unwrap_or(false) { + continuation = resp.next_continuation_token().map(String::from); + } else { + break; + } + } + + // 2. Sweep: walk chunks, delete unreferenced ones older than 24h. + let prefix_chunks = if target.prefix.is_empty() { + "chunks/".to_string() + } else { + format!("{}/chunks/", target.prefix.trim_end_matches('/')) + }; + let cutoff = aws_smithy_types::DateTime::from_secs( + (Utc::now() - chrono::Duration::hours(24)).timestamp(), + ); + let mut bytes_freed: u64 = 0; + let mut chunks_deleted: u64 = 0; + + let mut continuation: Option = None; + loop { + let mut req = client + .list_objects_v2() + .bucket(&target.bucket) + .prefix(&prefix_chunks); + if let Some(c) = continuation.as_deref() { + req = req.continuation_token(c); + } + let resp = req.send().await.context("LIST chunks")?; + let mut to_delete: Vec = Vec::new(); + for obj in resp.contents() { + let Some(k) = obj.key() else { continue }; + let too_recent = obj.last_modified().map(|lm| lm > &cutoff).unwrap_or(true); + if too_recent { + continue; + } + let parts: Vec<&str> = k.rsplit('/').collect(); + if parts.is_empty() { + continue; + } + let id_hex = parts[0]; + if id_hex.len() != 64 { + continue; + } + let mut chunk_id = [0u8; 32]; + if hex::decode_to_slice(id_hex, &mut chunk_id).is_err() { + continue; + } + if referenced.contains(&chunk_id) { + continue; + } + bytes_freed += obj.size().unwrap_or(0) as u64; + chunks_deleted += 1; + if let Ok(oid) = ObjectIdentifier::builder().key(k).build() { + to_delete.push(oid); + } + if to_delete.len() == 1000 { + if let Ok(del) = Delete::builder() + .set_objects(Some(std::mem::take(&mut to_delete))) + .build() + { + client + .delete_objects() + .bucket(&target.bucket) + .delete(del) + .send() + .await + .context("DELETE chunks batch")?; + } + } + } + if !to_delete.is_empty() { + if let Ok(del) = Delete::builder().set_objects(Some(to_delete)).build() { + client + .delete_objects() + .bucket(&target.bucket) + .delete(del) + .send() + .await + .context("DELETE chunks final")?; + } + } + if resp.is_truncated().unwrap_or(false) { + continuation = resp.next_continuation_token().map(String::from); + } else { + break; + } + } + + Ok((bytes_freed, chunks_deleted)) +} diff --git a/apps/manager/src/features/backups/mod.rs b/apps/manager/src/features/backups/mod.rs index 135a140..273847a 100644 --- a/apps/manager/src/features/backups/mod.rs +++ b/apps/manager/src/features/backups/mod.rs @@ -1,5 +1,8 @@ +pub mod gc; +pub mod reconciler; pub mod repo; pub mod routes; +pub mod scheduler; pub mod service; pub mod types; diff --git a/apps/manager/src/main.rs b/apps/manager/src/main.rs index 9438d18..4dbd9b4 100644 --- a/apps/manager/src/main.rs +++ b/apps/manager/src/main.rs @@ -240,6 +240,30 @@ async fn main() -> anyhow::Result<()> { }); } + // Backup GC loop: daily mark-and-sweep per target. + { + let pool = state.db.clone(); + tokio::spawn(async move { + crate::features::backups::gc::gc_loop(pool).await; + }); + } + + // Backup reconciler: ages stuck 'running' rows after 24h. + { + let pool = state.db.clone(); + tokio::spawn(async move { + crate::features::backups::reconciler::reconcile_loop(pool).await; + }); + } + + // Backup scheduler: per-volume cron-triggered backups. + { + let st = state.clone(); + tokio::spawn(async move { + crate::features::backups::scheduler::schedule_loop(st).await; + }); + } + let openapi = docs::ApiDoc::openapi(); if let Err(err) = docs::write_openapi_yaml(&openapi).await { warn!(error = ?err, "failed to write OpenAPI specification to disk"); From c72d782f810ed6ce2996bc3c938f0b7bf6e6b70c Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:15:24 +0700 Subject: [PATCH 24/37] feat(backup): reconciler ages stuck 'running' rows after 24h Add reconciler.rs with reconcile_loop that wakes every 5 minutes, queries list_stale_running(1440), and marks them failed with an explanatory message. Remove #[allow(dead_code)] from BackupRepository::list_stale_running (now used). Spawn from main.rs. Co-Authored-By: Claude Sonnet 4.6 --- .../src/features/backups/reconciler.rs | 29 +++++++++++++++++++ apps/manager/src/features/backups/repo.rs | 1 - 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 apps/manager/src/features/backups/reconciler.rs diff --git a/apps/manager/src/features/backups/reconciler.rs b/apps/manager/src/features/backups/reconciler.rs new file mode 100644 index 0000000..9b46b86 --- /dev/null +++ b/apps/manager/src/features/backups/reconciler.rs @@ -0,0 +1,29 @@ +//! Periodically marks `running` backups older than 24h as `failed`. + +use crate::features::backups::repo::BackupRepository; +use sqlx::PgPool; + +const STALE_MINUTES: i64 = 24 * 60; + +pub async fn reconcile_loop(pool: PgPool) { + loop { + tokio::time::sleep(std::time::Duration::from_secs(5 * 60)).await; + let repo = BackupRepository::new(pool.clone()); + match repo.list_stale_running(STALE_MINUTES).await { + Ok(rows) => { + for r in rows { + let _ = repo + .mark_failed( + r.id, + &format!( + "marked failed by reconciler: status was 'running' for >{STALE_MINUTES} minutes" + ), + ) + .await; + tracing::warn!(backup_id=%r.id, "reconciler aged stuck 'running' to 'failed'"); + } + } + Err(e) => tracing::error!("reconciler: {e}"), + } + } +} diff --git a/apps/manager/src/features/backups/repo.rs b/apps/manager/src/features/backups/repo.rs index 8488370..1518b74 100644 --- a/apps/manager/src/features/backups/repo.rs +++ b/apps/manager/src/features/backups/repo.rs @@ -127,7 +127,6 @@ impl BackupRepository { Ok(()) } - #[allow(dead_code)] pub async fn list_stale_running( &self, older_than_minutes: i64, From 0ba23c859e0a8c1e6abff93e7125a9e8d9013986 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:15:48 +0700 Subject: [PATCH 25/37] feat(backup): per-volume cron scheduler Add scheduler.rs with schedule_loop that wakes once per minute, queries volumes with backup_cron + backup_target_id set, computes the next fire time after the last successful backup, and spawns create_backup when due. Uses the cron 0.12 crate's Schedule::after API. Co-Authored-By: Claude Sonnet 4.6 --- .../manager/src/features/backups/scheduler.rs | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 apps/manager/src/features/backups/scheduler.rs diff --git a/apps/manager/src/features/backups/scheduler.rs b/apps/manager/src/features/backups/scheduler.rs new file mode 100644 index 0000000..bf0d660 --- /dev/null +++ b/apps/manager/src/features/backups/scheduler.rs @@ -0,0 +1,64 @@ +//! Per-volume cron scheduler. Wakes once a minute, checks every volume +//! that has backup_cron + backup_target_id set, dispatches a backup if due. + +use crate::AppState; +use chrono::Utc; +use cron::Schedule; +use std::str::FromStr; +use std::time::Duration; +use uuid::Uuid; + +pub async fn schedule_loop(state: AppState) { + loop { + tokio::time::sleep(Duration::from_secs(60)).await; + if let Err(e) = tick(&state).await { + tracing::error!("scheduler tick: {e:#}"); + } + } +} + +type VolumeScheduleRow = ( + Uuid, + Option, + Option, + Option>, +); + +async fn tick(st: &AppState) -> anyhow::Result<()> { + let rows: Vec = sqlx::query_as( + r#"SELECT v.id, v.backup_cron, v.backup_target_id, + (SELECT MAX(created_at) FROM backup b WHERE b.source_volume_id = v.id) AS last_backup + FROM volume v + WHERE v.backup_cron IS NOT NULL AND v.backup_target_id IS NOT NULL"#, + ) + .fetch_all(&st.db) + .await?; + + let now = Utc::now(); + for (volume_id, cron_str, target_id, last) in rows { + let (Some(cron_str), Some(target_id)) = (cron_str, target_id) else { + continue; + }; + let Ok(schedule) = Schedule::from_str(&cron_str) else { + tracing::warn!(volume_id=%volume_id, "invalid cron: {cron_str}"); + continue; + }; + let after = last.unwrap_or(now - chrono::Duration::days(365)); + if let Some(next_fire) = schedule.after(&after).next() { + if next_fire <= now { + tracing::info!(volume_id=%volume_id, "scheduler firing backup"); + let st_cl = st.clone(); + tokio::spawn(async move { + if let Err(e) = crate::features::backups::service::create_backup( + &st_cl, volume_id, target_id, + ) + .await + { + tracing::error!(volume_id=%volume_id, "scheduled backup failed: {e:#}"); + } + }); + } + } + } + Ok(()) +} From ef1ff5ddb35eb6bfd79c161c7ff2fad6888c8b52 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:18:54 +0700 Subject: [PATCH 26/37] feat(backup): PATCH /v1/volumes/:id/backup_schedule Adds the patch_backup_schedule handler that validates cron syntax via the cron crate and performs a partial UPDATE on backup_cron, backup_retain_count, and backup_target_id using COALESCE semantics. Routes registered under /:id/backup_schedule with axum::routing::patch. Co-Authored-By: Claude Sonnet 4.6 --- apps/manager/src/features/volumes/mod.rs | 6 ++- apps/manager/src/features/volumes/routes.rs | 43 +++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/apps/manager/src/features/volumes/mod.rs b/apps/manager/src/features/volumes/mod.rs index 9e6998b..b694cd3 100644 --- a/apps/manager/src/features/volumes/mod.rs +++ b/apps/manager/src/features/volumes/mod.rs @@ -1,5 +1,5 @@ use axum::{ - routing::{get, post}, + routing::{get, patch, post}, Router, }; @@ -12,4 +12,8 @@ pub fn router() -> Router { .route("/:id", get(routes::get).delete(routes::delete)) .route("/:id/attach", post(routes::attach)) .route("/:id/detach", post(routes::detach)) + .route( + "/:id/backup_schedule", + patch(routes::patch_backup_schedule), + ) } diff --git a/apps/manager/src/features/volumes/routes.rs b/apps/manager/src/features/volumes/routes.rs index a2284b3..1a75b5c 100644 --- a/apps/manager/src/features/volumes/routes.rs +++ b/apps/manager/src/features/volumes/routes.rs @@ -10,6 +10,49 @@ use serde::{Deserialize, Serialize}; use tracing::error; use uuid::Uuid; +#[derive(serde::Deserialize, utoipa::ToSchema)] +pub struct PatchBackupScheduleRequest { + pub cron: Option, + pub retain_count: Option, + pub target_id: Option, +} + +pub async fn patch_backup_schedule( + Extension(st): Extension, + Path(id): Path, + Json(req): Json, +) -> impl IntoResponse { + use std::str::FromStr as _; + if let Some(c) = &req.cron { + if let Err(e) = cron::Schedule::from_str(c) { + return ( + StatusCode::BAD_REQUEST, + Json(serde_json::json!({"error": format!("invalid cron: {e}")})), + ) + .into_response(); + } + } + let res = sqlx::query( + r#"UPDATE volume SET backup_cron = COALESCE($1, backup_cron), + backup_retain_count = COALESCE($2, backup_retain_count), + backup_target_id = COALESCE($3, backup_target_id) + WHERE id = $4"#, + ) + .bind(req.cron) + .bind(req.retain_count) + .bind(req.target_id) + .bind(id) + .execute(&st.db) + .await; + match res { + Ok(_) => (StatusCode::NO_CONTENT, ()).into_response(), + Err(e) => { + tracing::error!("patch_backup_schedule: {e}"); + (StatusCode::INTERNAL_SERVER_ERROR, Json(serde_json::json!({"error":"db"}))).into_response() + } + } +} + #[derive(Debug, Deserialize)] pub struct CreateVolumeRequest { pub name: String, From 6199f61f2715a9422f1b239c1c7b81916226f96f Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:20:05 +0700 Subject: [PATCH 27/37] feat(backup): index-rebuild subcommand for DR Adds backups::index_rebuild::run() that pages through S3 manifest objects for a target, decrypts each with the target key, and INSERT-or-skips into the backup table. Wired as `manager backup index-rebuild --target ` via raw args parsed in main() before AppState construction, so the binary doubles as a DR CLI tool without a separate binary. Co-Authored-By: Claude Sonnet 4.6 --- .../src/features/backups/index_rebuild.rs | 93 +++++++++++++++++++ apps/manager/src/features/backups/mod.rs | 1 + apps/manager/src/main.rs | 16 ++++ 3 files changed, 110 insertions(+) create mode 100644 apps/manager/src/features/backups/index_rebuild.rs diff --git a/apps/manager/src/features/backups/index_rebuild.rs b/apps/manager/src/features/backups/index_rebuild.rs new file mode 100644 index 0000000..9035120 --- /dev/null +++ b/apps/manager/src/features/backups/index_rebuild.rs @@ -0,0 +1,93 @@ +//! Reconstruct the `backup` table from S3 manifests. +//! Run via: `manager backup index-rebuild --target `. + +use crate::features::backup_targets::envelope; +use crate::features::backup_targets::repo::BackupTargetRepository; +use anyhow::{Context, Result}; +use aws_sdk_s3::{config::{Builder, Region}, Client}; +use chrono::TimeZone; +use nexus_backup::{decrypt_manifest, ChunkKey, Manifest}; +use sqlx::PgPool; +use uuid::Uuid; + +pub async fn run(pool: PgPool, target_id: Uuid) -> Result<()> { + let repo = BackupTargetRepository::new(pool.clone()); + let target = repo.get(target_id).await? + .ok_or_else(|| anyhow::anyhow!("target {target_id} not found"))?; + + let secret = envelope::unwrap_to_string(&target.encrypted_secret_access_key)?; + let target_key = envelope::unwrap_to_array::<32>(&target.encrypted_target_key)?; + + let creds = aws_credential_types::Credentials::new( + &target.access_key_id, &secret, None, None, "nqrust-rebuild"); + let region = Region::new(target.region.clone().unwrap_or_else(|| "us-east-1".into())); + let s3_cfg = Builder::new() + .behavior_version_latest() + .endpoint_url(&target.endpoint) + .credentials_provider(creds) + .region(region) + .force_path_style(true) + .build(); + let client = Client::from_conf(s3_cfg); + + let prefix = if target.prefix.is_empty() { + "manifests/".to_string() + } else { + format!("{}/manifests/", target.prefix.trim_end_matches('/')) + }; + + let mut reconstructed = 0usize; + let mut skipped = 0usize; + let mut continuation: Option = None; + loop { + let mut req = client.list_objects_v2().bucket(&target.bucket).prefix(&prefix); + if let Some(c) = continuation.as_deref() { req = req.continuation_token(c); } + let resp = req.send().await.context("LIST manifests")?; + for obj in resp.contents() { + let Some(k) = obj.key() else { continue }; + let blob = client.get_object().bucket(&target.bucket).key(k).send().await + .with_context(|| format!("GET {k}"))? + .body.collect().await? + .into_bytes().to_vec(); + let key = ChunkKey::from_bytes(target_key); + let compressed = decrypt_manifest(&key, &blob)?; + let m = Manifest::deserialize_compressed(&compressed)?; + let existed: Option = sqlx::query_scalar( + r#"SELECT id FROM backup WHERE id = $1"#, + ) + .bind(m.backup_id) + .fetch_optional(&pool) + .await?; + if existed.is_some() { skipped += 1; continue; } + let total_size: i64 = m.chunks.iter().map(|c| c.ciphertext_length as i64).sum(); + let chunk_count = m.chunks.len() as i64; + let created_at = chrono::Utc.timestamp_opt(m.created_at_unix_seconds, 0).single() + .unwrap_or_else(chrono::Utc::now); + sqlx::query( + r#"INSERT INTO backup + (id, source_volume_id, source_snapshot_id, target_id, + manifest_object_key, size_bytes, unique_bytes, chunk_count, + status, created_at, completed_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'completed', $9, $9, now())"#, + ) + .bind(m.backup_id) + .bind(m.source_volume_id) + .bind(m.source_snapshot_id) + .bind(target_id) + .bind(k) + .bind(total_size) + .bind(0i64) + .bind(chunk_count) + .bind(created_at) + .execute(&pool) + .await + .ok(); + reconstructed += 1; + } + if resp.is_truncated().unwrap_or(false) { + continuation = resp.next_continuation_token().map(String::from); + } else { break; } + } + println!("index-rebuild: reconstructed {reconstructed}, skipped (already in DB) {skipped}"); + Ok(()) +} diff --git a/apps/manager/src/features/backups/mod.rs b/apps/manager/src/features/backups/mod.rs index 273847a..ded3b79 100644 --- a/apps/manager/src/features/backups/mod.rs +++ b/apps/manager/src/features/backups/mod.rs @@ -1,4 +1,5 @@ pub mod gc; +pub mod index_rebuild; pub mod reconciler; pub mod repo; pub mod routes; diff --git a/apps/manager/src/main.rs b/apps/manager/src/main.rs index 4dbd9b4..f27884f 100644 --- a/apps/manager/src/main.rs +++ b/apps/manager/src/main.rs @@ -69,6 +69,22 @@ async fn main() -> anyhow::Result<()> { let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); tracing_subscriber::fmt().with_env_filter(filter).init(); + let args: Vec = std::env::args().collect(); + if args.len() >= 4 && args[1] == "backup" && args[2] == "index-rebuild" { + if args.len() != 5 || args[3] != "--target" { + eprintln!("usage: manager backup index-rebuild --target "); + std::process::exit(2); + } + let target_id: uuid::Uuid = match args[4].parse() { + Ok(u) => u, + Err(e) => { eprintln!("bad uuid: {e}"); std::process::exit(2); } + }; + let database_url = std::env::var("DATABASE_URL").map_err(|e| anyhow::anyhow!("DATABASE_URL: {e}"))?; + let pool = sqlx::PgPool::connect(&database_url).await?; + crate::features::backups::index_rebuild::run(pool, target_id).await?; + return Ok(()); + } + let db = PgPool::connect(&std::env::var("DATABASE_URL")?).await?; sqlx::migrate!("./migrations").run(&db).await?; From 7347a19679d76148d20f71fad59aa831f36f746b Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:25:42 +0700 Subject: [PATCH 28/37] feat(backup): UI types + facade methods + query hooks Add BackupTarget, Backup, BackupSchedule types; facade API methods for backup targets, backups, restore, and schedule; useBackupTargets and useBackups TanStack Query hooks (B.T22). Co-Authored-By: Claude Sonnet 4.6 --- apps/ui/lib/api/facade.ts | 45 ++++++++++++++++++++++++++++++++++ apps/ui/lib/queries.ts | 23 ++++++++++++++++++ apps/ui/lib/types/index.ts | 49 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 117 insertions(+) diff --git a/apps/ui/lib/api/facade.ts b/apps/ui/lib/api/facade.ts index be7611f..1acc1b3 100644 --- a/apps/ui/lib/api/facade.ts +++ b/apps/ui/lib/api/facade.ts @@ -101,6 +101,10 @@ import type { ContainerMetric, MetricsQueryParams, StorageBackendListResponse, + BackupTarget, + CreateBackupTargetRequest, + Backup, + BackupSchedule, } from "@/lib/types" /** @@ -915,6 +919,47 @@ export class FacadeApi { async activateLicenseFile(fileContent: string): Promise { return apiClient.post("/licensing/license/activate-file", { file_content: fileContent }); } + + // ============== + // Backup Targets + // ============== + + async listBackupTargets(): Promise<{ items: BackupTarget[] }> { + return apiClient.get<{ items: BackupTarget[] }>("/backup_targets"); + } + + async createBackupTarget(req: CreateBackupTargetRequest): Promise { + return apiClient.post("/backup_targets", req); + } + + async deleteBackupTarget(id: string): Promise { + return apiClient.delete(`/backup_targets/${id}`); + } + + // ============== + // Backups + // ============== + + async listBackups(volumeId?: string): Promise<{ items: Backup[] }> { + const q = volumeId ? `?volume_id=${volumeId}` : ""; + return apiClient.get<{ items: Backup[] }>(`/backups${q}`); + } + + async createBackup(volumeId: string, targetId: string): Promise<{ backup_id: string }> { + return apiClient.post<{ backup_id: string }>(`/volumes/${volumeId}/backup`, { target_id: targetId }); + } + + async restoreBackup(backupId: string, targetBackendId: string): Promise<{ volume_id: string }> { + return apiClient.post<{ volume_id: string }>(`/backups/${backupId}/restore`, { target_backend_id: targetBackendId }); + } + + async deleteBackup(backupId: string): Promise { + return apiClient.delete(`/backups/${backupId}`); + } + + async patchBackupSchedule(volumeId: string, req: Partial): Promise { + return apiClient.patch(`/volumes/${volumeId}/backup_schedule`, req); + } } // Export singleton instance diff --git a/apps/ui/lib/queries.ts b/apps/ui/lib/queries.ts index 8554da5..ef621b0 100644 --- a/apps/ui/lib/queries.ts +++ b/apps/ui/lib/queries.ts @@ -122,6 +122,10 @@ export const queryKeys = { // storage backends storageBackends: () => ["storage_backends"] as const, + + // backups + backupTargets: () => ["backup_targets"] as const, + backups: (vid?: string) => ["backups", vid ?? "all"] as const, } // Function Query @@ -1412,3 +1416,22 @@ export function useStorageBackends() { staleTime: 60_000, }); } + +// ============== +// Backups +// ============== + +export function useBackupTargets() { + return useQuery({ + queryKey: queryKeys.backupTargets(), + queryFn: async () => (await facadeApi.listBackupTargets()).items, + }); +} + +export function useBackups(volumeId?: string) { + return useQuery({ + queryKey: queryKeys.backups(volumeId), + queryFn: async () => (await facadeApi.listBackups(volumeId)).items, + refetchInterval: 5_000, // for in-progress backups + }); +} diff --git a/apps/ui/lib/types/index.ts b/apps/ui/lib/types/index.ts index 7e83c8e..1ff69d1 100644 --- a/apps/ui/lib/types/index.ts +++ b/apps/ui/lib/types/index.ts @@ -1085,3 +1085,52 @@ export interface StorageBackend { export interface StorageBackendListResponse { items: StorageBackend[]; } + +// ======================================== +// Backup Types +// ======================================== + +export type BackupStatus = "running" | "completed" | "failed" | "pruning"; + +export interface BackupTarget { + id: string; + name: string; + endpoint: string; + region?: string; + bucket: string; + prefix: string; + access_key_id: string; + gc_hour: number; + created_at: string; + deleted_at?: string | null; +} + +export interface CreateBackupTargetRequest { + name: string; + endpoint: string; + region?: string; + bucket: string; + prefix?: string; + access_key_id: string; + secret_access_key: string; + gc_hour?: number; +} + +export interface Backup { + id: string; + source_volume_id?: string; + target_id: string; + size_bytes: number; + unique_bytes: number; + chunk_count: number; + status: BackupStatus; + error_message?: string; + created_at: string; + completed_at?: string; +} + +export interface BackupSchedule { + cron: string; + retain_count: number; + target_id: string; +} From 778e81e3c29478933a5530e5de7d1fbd6632cd0d Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:29:38 +0700 Subject: [PATCH 29/37] =?UTF-8?q?feat(backup):=20UI=20components=20?= =?UTF-8?q?=E2=80=94=20target=20form,=20list,=20restore,=20schedule,=20pag?= =?UTF-8?q?e?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add backup-target-form, backup-list, restore-dialog, backup-schedule-editor components; VolumeBackupsTab for volume detail; backup-targets page route; volumes/[id] detail page with Backups tab (B.T23). Co-Authored-By: Claude Sonnet 4.6 --- .../app/(dashboard)/backup-targets/page.tsx | 27 ++++ apps/ui/app/(dashboard)/volumes/[id]/page.tsx | 136 ++++++++++++++++++ apps/ui/components/backup/backup-list.tsx | 57 ++++++++ .../backup/backup-schedule-editor.tsx | 83 +++++++++++ .../components/backup/backup-target-form.tsx | 88 ++++++++++++ apps/ui/components/backup/restore-dialog.tsx | 71 +++++++++ .../components/volume/volume-backups-tab.tsx | 61 ++++++++ 7 files changed, 523 insertions(+) create mode 100644 apps/ui/app/(dashboard)/backup-targets/page.tsx create mode 100644 apps/ui/app/(dashboard)/volumes/[id]/page.tsx create mode 100644 apps/ui/components/backup/backup-list.tsx create mode 100644 apps/ui/components/backup/backup-schedule-editor.tsx create mode 100644 apps/ui/components/backup/backup-target-form.tsx create mode 100644 apps/ui/components/backup/restore-dialog.tsx create mode 100644 apps/ui/components/volume/volume-backups-tab.tsx diff --git a/apps/ui/app/(dashboard)/backup-targets/page.tsx b/apps/ui/app/(dashboard)/backup-targets/page.tsx new file mode 100644 index 0000000..5abad45 --- /dev/null +++ b/apps/ui/app/(dashboard)/backup-targets/page.tsx @@ -0,0 +1,27 @@ +"use client"; +import { useBackupTargets } from "@/lib/queries"; +import { BackupTargetForm } from "@/components/backup/backup-target-form"; + +export default function BackupTargetsPage() { + const { data: targets, isLoading } = useBackupTargets(); + return ( +
+

Backup targets

+ +
+

Configured targets

+ {isLoading &&

Loading…

} +
    + {(targets ?? []).map((t) => ( +
  • +
    {t.name}
    +
    + {t.endpoint} → s3://{t.bucket}/{t.prefix} +
    +
  • + ))} +
+
+
+ ); +} diff --git a/apps/ui/app/(dashboard)/volumes/[id]/page.tsx b/apps/ui/app/(dashboard)/volumes/[id]/page.tsx new file mode 100644 index 0000000..859f1b5 --- /dev/null +++ b/apps/ui/app/(dashboard)/volumes/[id]/page.tsx @@ -0,0 +1,136 @@ +"use client"; + +import { use } from "react"; +import Link from "next/link"; +import { useSearchParams } from "next/navigation"; +import { ArrowLeft, HardDrive, Loader2 } from "lucide-react"; +import { Badge } from "@/components/ui/badge"; +import { Button } from "@/components/ui/button"; +import { Alert, AlertDescription } from "@/components/ui/alert"; +import { ReusableTabs, TabItem, TabContentItem } from "@/components/dashboard/tabs-new"; +import { VolumeBackupsTab } from "@/components/volume/volume-backups-tab"; +import { useVolume } from "@/lib/queries"; + +function getStatusColor(status: string) { + switch (status) { + case "available": + return "bg-green-500/10 text-green-700 border-green-200"; + case "attached": + return "bg-blue-500/10 text-blue-700 border-blue-200"; + case "creating": + return "bg-yellow-500/10 text-yellow-700 border-yellow-200"; + case "error": + return "bg-red-500/10 text-red-700 border-red-200"; + default: + return "bg-gray-500/10 text-gray-700 border-gray-200"; + } +} + +export default function VolumeDetailPage({ + params, +}: { + params: Promise<{ id: string }>; +}) { + const { id } = use(params); + const searchParams = useSearchParams(); + const tabParam = searchParams.get("tab"); + + const validTabs = ["overview", "backups"]; + const defaultTab = + tabParam && validTabs.includes(tabParam) ? tabParam : "overview"; + + const { data: volume, isLoading, error } = useVolume(id); + + const tabs: TabItem[] = [ + { value: "overview", label: "Overview" }, + { value: "backups", label: "Backups" }, + ]; + + const contents: TabContentItem[] = [ + { + value: "overview", + content: volume ? ( +
+
+
ID
+
{volume.id}
+
Name
+
{volume.name}
+ {volume.description && ( + <> +
Description
+
{volume.description}
+ + )} +
Status
+
+ + {volume.status} + +
+
Type
+
{volume.type}
+
Size
+
{volume.size_gb} GB
+
Created
+
{new Date(volume.created_at).toLocaleString()}
+
+
+ ) : null, + }, + { + value: "backups", + content: , + }, + ]; + + if (isLoading) { + return ( +
+ +
+ ); + } + + if (error || !volume) { + return ( +
+ + + Failed to load volume. + +
+ ); + } + + return ( +
+
+ +
+ +

{volume.name}

+ + {volume.status} + +
+
+ + +
+ ); +} diff --git a/apps/ui/components/backup/backup-list.tsx b/apps/ui/components/backup/backup-list.tsx new file mode 100644 index 0000000..325af1b --- /dev/null +++ b/apps/ui/components/backup/backup-list.tsx @@ -0,0 +1,57 @@ +"use client"; +import { useBackups } from "@/lib/queries"; +import { facadeApi } from "@/lib/api/facade"; +import { useMutation, useQueryClient } from "@tanstack/react-query"; +import { Button } from "@/components/ui/button"; +import { useState } from "react"; +import { RestoreDialog } from "./restore-dialog"; + +export function BackupList({ volumeId }: { volumeId: string }) { + const { data: backups, isLoading } = useBackups(volumeId); + const qc = useQueryClient(); + const del = useMutation({ + mutationFn: (id: string) => facadeApi.deleteBackup(id), + onSuccess: () => qc.invalidateQueries({ queryKey: ["backups", volumeId] }), + }); + const [restoring, setRestoring] = useState(null); + + if (isLoading) return

Loading…

; + if (!backups?.length) return

No backups yet.

; + + return ( + <> + + + + + + + + + + + + {backups.map((b) => ( + + + + + + + + ))} + +
CreatedStatusSizeChunks
{new Date(b.created_at).toLocaleString()}{b.status}{(b.size_bytes / 1024 / 1024).toFixed(1)} MiB{b.chunk_count} + {b.status === "completed" && ( + + )} + +
+ {restoring && setRestoring(null)} />} + + ); +} diff --git a/apps/ui/components/backup/backup-schedule-editor.tsx b/apps/ui/components/backup/backup-schedule-editor.tsx new file mode 100644 index 0000000..4a24cb9 --- /dev/null +++ b/apps/ui/components/backup/backup-schedule-editor.tsx @@ -0,0 +1,83 @@ +"use client"; +import { useState } from "react"; +import { facadeApi } from "@/lib/api/facade"; +import { useBackupTargets } from "@/lib/queries"; +import { useMutation, useQueryClient } from "@tanstack/react-query"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; + +export function BackupScheduleEditor({ + volumeId, + current, +}: { + volumeId: string; + current?: { cron?: string; retain_count?: number; target_id?: string }; +}) { + const { data: targets } = useBackupTargets(); + const [cron, setCron] = useState(current?.cron ?? "0 2 * * *"); + const [retain, setRetain] = useState(current?.retain_count ?? 7); + const [target, setTarget] = useState(current?.target_id); + const qc = useQueryClient(); + const mut = useMutation({ + mutationFn: () => + facadeApi.patchBackupSchedule(volumeId, { + cron, + retain_count: retain, + target_id: target, + }), + onSuccess: () => qc.invalidateQueries({ queryKey: ["volumes", volumeId] }), + }); + return ( +
{ + e.preventDefault(); + mut.mutate(); + }} + className="space-y-3 max-w-md" + > +
+ + setCron(e.target.value)} + placeholder="0 2 * * *" + /> +
+
+ + setRetain(parseInt(e.target.value))} + /> +
+
+ + +
+ +
+ ); +} diff --git a/apps/ui/components/backup/backup-target-form.tsx b/apps/ui/components/backup/backup-target-form.tsx new file mode 100644 index 0000000..cfcca00 --- /dev/null +++ b/apps/ui/components/backup/backup-target-form.tsx @@ -0,0 +1,88 @@ +"use client"; +import { useState } from "react"; +import { useMutation, useQueryClient } from "@tanstack/react-query"; +import { facadeApi } from "@/lib/api/facade"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; + +export function BackupTargetForm({ onCreated }: { onCreated?: () => void }) { + const [name, setName] = useState(""); + const [endpoint, setEndpoint] = useState(""); + const [bucket, setBucket] = useState(""); + const [prefix, setPrefix] = useState(""); + const [accessKey, setAccessKey] = useState(""); + const [secretKey, setSecretKey] = useState(""); + const [region, setRegion] = useState("us-east-1"); + const qc = useQueryClient(); + const mut = useMutation({ + mutationFn: () => + facadeApi.createBackupTarget({ + name, + endpoint, + bucket, + prefix, + access_key_id: accessKey, + secret_access_key: secretKey, + region, + }), + onSuccess: () => { + qc.invalidateQueries({ queryKey: ["backup_targets"] }); + onCreated?.(); + }, + }); + return ( +
{ + e.preventDefault(); + mut.mutate(); + }} + className="space-y-3 max-w-md" + > +
+ + setName(e.target.value)} required /> +
+
+ + setEndpoint(e.target.value)} + required + placeholder="https://seaweedfs.local:8333" + /> +
+
+ + setRegion(e.target.value)} /> +
+
+ + setBucket(e.target.value)} required /> +
+
+ + setPrefix(e.target.value)} /> +
+
+ + setAccessKey(e.target.value)} required /> +
+
+ + setSecretKey(e.target.value)} + required + /> +
+ + {mut.error && ( +

{(mut.error as Error).message}

+ )} +
+ ); +} diff --git a/apps/ui/components/backup/restore-dialog.tsx b/apps/ui/components/backup/restore-dialog.tsx new file mode 100644 index 0000000..3ee839a --- /dev/null +++ b/apps/ui/components/backup/restore-dialog.tsx @@ -0,0 +1,71 @@ +"use client"; +import { useState } from "react"; +import { useStorageBackends } from "@/lib/queries"; +import { facadeApi } from "@/lib/api/facade"; +import { useMutation } from "@tanstack/react-query"; +import { Button } from "@/components/ui/button"; +import { + Dialog, + DialogContent, + DialogFooter, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Label } from "@/components/ui/label"; + +export function RestoreDialog({ + backupId, + onClose, +}: { + backupId: string; + onClose: () => void; +}) { + const { data: backends } = useStorageBackends(); + const active = (backends ?? []).filter((b) => !b.deleted_at); + const [target, setTarget] = useState( + active.find((b) => b.is_default)?.id, + ); + const mut = useMutation({ + mutationFn: () => facadeApi.restoreBackup(backupId, target!), + onSuccess: () => onClose(), + }); + return ( + !o && onClose()}> + + + Restore backup to a new volume + +
+ + +
+ + + + +
+
+ ); +} diff --git a/apps/ui/components/volume/volume-backups-tab.tsx b/apps/ui/components/volume/volume-backups-tab.tsx new file mode 100644 index 0000000..71a002b --- /dev/null +++ b/apps/ui/components/volume/volume-backups-tab.tsx @@ -0,0 +1,61 @@ +"use client"; +import { BackupList } from "@/components/backup/backup-list"; +import { BackupScheduleEditor } from "@/components/backup/backup-schedule-editor"; +import { useBackupTargets } from "@/lib/queries"; +import { facadeApi } from "@/lib/api/facade"; +import { useMutation, useQueryClient } from "@tanstack/react-query"; +import { Button } from "@/components/ui/button"; +import { useState } from "react"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Label } from "@/components/ui/label"; + +export function VolumeBackupsTab({ volumeId }: { volumeId: string }) { + const { data: targets } = useBackupTargets(); + const [target, setTarget] = useState(); + const qc = useQueryClient(); + const back = useMutation({ + mutationFn: () => facadeApi.createBackup(volumeId, target!), + onSuccess: () => qc.invalidateQueries({ queryKey: ["backups", volumeId] }), + }); + return ( +
+
+

Back up now

+
+
+ + +
+ +
+
+
+

Schedule

+ +
+
+

History

+ +
+
+ ); +} From 40ebe38e3bd451664deeca92253b24eab90795e9 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:30:24 +0700 Subject: [PATCH 30/37] chore(backup): cargo fmt sweep --- .../src/features/backups/index_rebuild.rs | 63 ++++++++++++++----- apps/manager/src/features/volumes/mod.rs | 5 +- apps/manager/src/features/volumes/routes.rs | 6 +- apps/manager/src/main.rs | 8 ++- 4 files changed, 58 insertions(+), 24 deletions(-) diff --git a/apps/manager/src/features/backups/index_rebuild.rs b/apps/manager/src/features/backups/index_rebuild.rs index 9035120..22c1260 100644 --- a/apps/manager/src/features/backups/index_rebuild.rs +++ b/apps/manager/src/features/backups/index_rebuild.rs @@ -4,7 +4,10 @@ use crate::features::backup_targets::envelope; use crate::features::backup_targets::repo::BackupTargetRepository; use anyhow::{Context, Result}; -use aws_sdk_s3::{config::{Builder, Region}, Client}; +use aws_sdk_s3::{ + config::{Builder, Region}, + Client, +}; use chrono::TimeZone; use nexus_backup::{decrypt_manifest, ChunkKey, Manifest}; use sqlx::PgPool; @@ -12,14 +15,21 @@ use uuid::Uuid; pub async fn run(pool: PgPool, target_id: Uuid) -> Result<()> { let repo = BackupTargetRepository::new(pool.clone()); - let target = repo.get(target_id).await? + let target = repo + .get(target_id) + .await? .ok_or_else(|| anyhow::anyhow!("target {target_id} not found"))?; let secret = envelope::unwrap_to_string(&target.encrypted_secret_access_key)?; let target_key = envelope::unwrap_to_array::<32>(&target.encrypted_target_key)?; let creds = aws_credential_types::Credentials::new( - &target.access_key_id, &secret, None, None, "nqrust-rebuild"); + &target.access_key_id, + &secret, + None, + None, + "nqrust-rebuild", + ); let region = Region::new(target.region.clone().unwrap_or_else(|| "us-east-1".into())); let s3_cfg = Builder::new() .behavior_version_latest() @@ -40,28 +50,45 @@ pub async fn run(pool: PgPool, target_id: Uuid) -> Result<()> { let mut skipped = 0usize; let mut continuation: Option = None; loop { - let mut req = client.list_objects_v2().bucket(&target.bucket).prefix(&prefix); - if let Some(c) = continuation.as_deref() { req = req.continuation_token(c); } + let mut req = client + .list_objects_v2() + .bucket(&target.bucket) + .prefix(&prefix); + if let Some(c) = continuation.as_deref() { + req = req.continuation_token(c); + } let resp = req.send().await.context("LIST manifests")?; for obj in resp.contents() { let Some(k) = obj.key() else { continue }; - let blob = client.get_object().bucket(&target.bucket).key(k).send().await + let blob = client + .get_object() + .bucket(&target.bucket) + .key(k) + .send() + .await .with_context(|| format!("GET {k}"))? - .body.collect().await? - .into_bytes().to_vec(); + .body + .collect() + .await? + .into_bytes() + .to_vec(); let key = ChunkKey::from_bytes(target_key); let compressed = decrypt_manifest(&key, &blob)?; let m = Manifest::deserialize_compressed(&compressed)?; - let existed: Option = sqlx::query_scalar( - r#"SELECT id FROM backup WHERE id = $1"#, - ) - .bind(m.backup_id) - .fetch_optional(&pool) - .await?; - if existed.is_some() { skipped += 1; continue; } + let existed: Option = + sqlx::query_scalar(r#"SELECT id FROM backup WHERE id = $1"#) + .bind(m.backup_id) + .fetch_optional(&pool) + .await?; + if existed.is_some() { + skipped += 1; + continue; + } let total_size: i64 = m.chunks.iter().map(|c| c.ciphertext_length as i64).sum(); let chunk_count = m.chunks.len() as i64; - let created_at = chrono::Utc.timestamp_opt(m.created_at_unix_seconds, 0).single() + let created_at = chrono::Utc + .timestamp_opt(m.created_at_unix_seconds, 0) + .single() .unwrap_or_else(chrono::Utc::now); sqlx::query( r#"INSERT INTO backup @@ -86,7 +113,9 @@ pub async fn run(pool: PgPool, target_id: Uuid) -> Result<()> { } if resp.is_truncated().unwrap_or(false) { continuation = resp.next_continuation_token().map(String::from); - } else { break; } + } else { + break; + } } println!("index-rebuild: reconstructed {reconstructed}, skipped (already in DB) {skipped}"); Ok(()) diff --git a/apps/manager/src/features/volumes/mod.rs b/apps/manager/src/features/volumes/mod.rs index b694cd3..cd145ac 100644 --- a/apps/manager/src/features/volumes/mod.rs +++ b/apps/manager/src/features/volumes/mod.rs @@ -12,8 +12,5 @@ pub fn router() -> Router { .route("/:id", get(routes::get).delete(routes::delete)) .route("/:id/attach", post(routes::attach)) .route("/:id/detach", post(routes::detach)) - .route( - "/:id/backup_schedule", - patch(routes::patch_backup_schedule), - ) + .route("/:id/backup_schedule", patch(routes::patch_backup_schedule)) } diff --git a/apps/manager/src/features/volumes/routes.rs b/apps/manager/src/features/volumes/routes.rs index 1a75b5c..2ab21cd 100644 --- a/apps/manager/src/features/volumes/routes.rs +++ b/apps/manager/src/features/volumes/routes.rs @@ -48,7 +48,11 @@ pub async fn patch_backup_schedule( Ok(_) => (StatusCode::NO_CONTENT, ()).into_response(), Err(e) => { tracing::error!("patch_backup_schedule: {e}"); - (StatusCode::INTERNAL_SERVER_ERROR, Json(serde_json::json!({"error":"db"}))).into_response() + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response() } } } diff --git a/apps/manager/src/main.rs b/apps/manager/src/main.rs index f27884f..3cfa74f 100644 --- a/apps/manager/src/main.rs +++ b/apps/manager/src/main.rs @@ -77,9 +77,13 @@ async fn main() -> anyhow::Result<()> { } let target_id: uuid::Uuid = match args[4].parse() { Ok(u) => u, - Err(e) => { eprintln!("bad uuid: {e}"); std::process::exit(2); } + Err(e) => { + eprintln!("bad uuid: {e}"); + std::process::exit(2); + } }; - let database_url = std::env::var("DATABASE_URL").map_err(|e| anyhow::anyhow!("DATABASE_URL: {e}"))?; + let database_url = + std::env::var("DATABASE_URL").map_err(|e| anyhow::anyhow!("DATABASE_URL: {e}"))?; let pool = sqlx::PgPool::connect(&database_url).await?; crate::features::backups::index_rebuild::run(pool, target_id).await?; return Ok(()); From 71bdfd87e019987077fd30cd6ff1758e58370a2a Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:48:31 +0700 Subject: [PATCH 31/37] fix(backup): DELETE /v1/backups/:id removes manifest from S3 before DB row (C1) Look up the backup row and target, decrypt credentials, and call delete_object on the manifest key before deleting the DB row. An orphaned manifest was keeping all its chunks alive forever because GC uses the manifest set as the live reference set. Co-Authored-By: Claude Sonnet 4.6 --- apps/manager/src/features/backups/routes.rs | 66 ++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/apps/manager/src/features/backups/routes.rs b/apps/manager/src/features/backups/routes.rs index ea9c195..b5cb03a 100644 --- a/apps/manager/src/features/backups/routes.rs +++ b/apps/manager/src/features/backups/routes.rs @@ -141,15 +141,79 @@ pub async fn delete_one( Path(id): Path, ) -> impl IntoResponse { let repo = BackupRepository::new(st.db.clone()); + let backup = match repo.get(id).await { + Ok(Some(b)) => b, + Ok(None) => { + return ( + StatusCode::NOT_FOUND, + Json(serde_json::json!({"error":"not found"})), + ) + .into_response() + } + Err(e) => { + tracing::error!("backups delete lookup: {e}"); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error":"db"})), + ) + .into_response(); + } + }; + + // Mark pruning so concurrent operations see the intent. sqlx::query(r#"UPDATE backup SET status = 'pruning', updated_at = now() WHERE id = $1"#) .bind(id) .execute(&st.db) .await .ok(); + + // Delete the manifest from S3 first so GC can reclaim the chunks on its + // next pass. If S3 delete fails, log a warning but still drop the DB row; + // the rebuild tool can detect orphan manifests later. + if let Some(mkey) = backup.manifest_object_key.as_deref() { + if let Ok(Some(target)) = + crate::features::backup_targets::repo::BackupTargetRepository::new(st.db.clone()) + .get(backup.target_id) + .await + { + if let Ok(secret) = crate::features::backup_targets::envelope::unwrap_to_string( + &target.encrypted_secret_access_key, + ) { + let creds = aws_credential_types::Credentials::new( + &target.access_key_id, + &secret, + None, + None, + "nqrust-mgr-delete", + ); + let region = aws_sdk_s3::config::Region::new( + target.region.clone().unwrap_or_else(|| "us-east-1".into()), + ); + let s3_cfg = aws_sdk_s3::config::Builder::new() + .behavior_version_latest() + .endpoint_url(&target.endpoint) + .credentials_provider(creds) + .region(region) + .force_path_style(true) + .build(); + let client = aws_sdk_s3::Client::from_conf(s3_cfg); + if let Err(e) = client + .delete_object() + .bucket(&target.bucket) + .key(mkey) + .send() + .await + { + tracing::warn!(backup_id=%id, manifest=%mkey, "S3 manifest delete failed: {e:#}"); + } + } + } + } + match repo.delete_row(id).await { Ok(()) => (StatusCode::NO_CONTENT, ()).into_response(), Err(e) => { - tracing::error!("backups delete: {e}"); + tracing::error!("backups delete row: {e}"); ( StatusCode::INTERNAL_SERVER_ERROR, Json(serde_json::json!({"error":"db"})), From 7a8a1f520d12902216be8ec4571dcbe6325d3e11 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:48:36 +0700 Subject: [PATCH 32/37] fix(backup): restore_backup returns DB row id, not backend-provisioned id (C2) VolumeRepository::create inserts with DEFAULT gen_random_uuid() and returns the full row. Capture the inserted row and return inserted.id so callers receive a UUID that actually exists in the volume table. Co-Authored-By: Claude Sonnet 4.6 --- apps/manager/src/features/backups/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/manager/src/features/backups/service.rs b/apps/manager/src/features/backups/service.rs index c2ddc22..7436338 100644 --- a/apps/manager/src/features/backups/service.rs +++ b/apps/manager/src/features/backups/service.rs @@ -248,7 +248,7 @@ pub async fn restore_backup( match agent_rpc::agent_restore(&host.addr, req).await { Ok(_) => { let volume_repo = VolumeRepository::new(st.db.clone()); - volume_repo + let inserted = volume_repo .create( &format!("restore-{}", backup_id), Some(&format!("Restored from backup {backup_id}")), @@ -259,7 +259,7 @@ pub async fn restore_backup( target_backend_id, ) .await?; - Ok(new_volume.volume_id) + Ok(inserted.id) } Err(e) => { let _ = agent_rpc::agent_detach(&host.addr, &new_volume, &attached).await; From 75dafd70edaf32d06e25e59320cf296452817908 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:48:43 +0700 Subject: [PATCH 33/37] fix(backup): GC loop wakes per-minute and dedupes per-target per-day (I1) The old formula slept to the next hour boundary; if the manager started at gc_hour:00:01 the next wake landed at gc_hour+1:00:01 and the check always missed. Now we sleep only to the next minute boundary and check on every matching minute. A backup_gc_run lookup guards against launching a second GC run within the same gc_hour window. Co-Authored-By: Claude Sonnet 4.6 --- apps/manager/src/features/backups/gc.rs | 32 ++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/apps/manager/src/features/backups/gc.rs b/apps/manager/src/features/backups/gc.rs index 617e2b6..0d5ebd9 100644 --- a/apps/manager/src/features/backups/gc.rs +++ b/apps/manager/src/features/backups/gc.rs @@ -8,7 +8,7 @@ use aws_sdk_s3::{ types::{Delete, ObjectIdentifier}, Client, }; -use chrono::{Timelike, Utc}; +use chrono::{TimeZone as _, Timelike, Utc}; use nexus_backup::{decrypt_manifest, ChunkKey, Manifest}; use sqlx::PgPool; use std::collections::HashSet; @@ -16,8 +16,14 @@ use std::collections::HashSet; pub async fn gc_loop(pool: PgPool) { loop { let now = Utc::now(); - let next_check = (60 - now.minute()) as u64 * 60 - now.second() as u64; - tokio::time::sleep(std::time::Duration::from_secs(next_check.max(60))).await; + // Sleep to the next minute boundary so we check every minute. + let secs_to_next_minute = (60 - now.second() as u64) % 60; + let sleep_for = if secs_to_next_minute == 0 { + 60 + } else { + secs_to_next_minute + }; + tokio::time::sleep(std::time::Duration::from_secs(sleep_for)).await; let now = Utc::now(); let repo = BackupTargetRepository::new(pool.clone()); @@ -25,6 +31,26 @@ pub async fn gc_loop(pool: PgPool) { Ok(targets) => { for t in targets { if (t.gc_hour as u32) == now.hour() { + // Dedupe: only run once per target per gc_hour window. + let today_at_hour = now + .date_naive() + .and_hms_opt(t.gc_hour as u32, 0, 0) + .map(|dt| Utc.from_utc_datetime(&dt)) + .unwrap_or(now); + let existing: Option> = sqlx::query_scalar( + r#"SELECT started_at FROM backup_gc_run + WHERE target_id = $1 AND started_at >= $2 + ORDER BY started_at DESC LIMIT 1"#, + ) + .bind(t.id) + .bind(today_at_hour) + .fetch_optional(&pool) + .await + .ok() + .flatten(); + if existing.is_some() { + continue; + } if let Err(e) = run_gc(&pool, &t).await { tracing::error!(target=%t.name, "GC run failed: {e:#}"); } From 5163321d07f2465d66f4b8af73e9893206d3355d Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:48:49 +0700 Subject: [PATCH 34/37] fix(backup): per-chunk S3 retry with exponential backoff (I2) HEAD, PUT (chunk), PUT (manifest), and GET (manifest + chunk) calls now retry up to 5 times with 200 ms * 2^attempt backoff. A transient network blip no longer aborts the entire backup or restore. Co-Authored-By: Claude Sonnet 4.6 --- apps/agent/src/features/storage/backup.rs | 115 +++++++++++++++++++--- 1 file changed, 100 insertions(+), 15 deletions(-) diff --git a/apps/agent/src/features/storage/backup.rs b/apps/agent/src/features/storage/backup.rs index 9dbfc67..2d36e4c 100644 --- a/apps/agent/src/features/storage/backup.rs +++ b/apps/agent/src/features/storage/backup.rs @@ -64,15 +64,49 @@ pub async fn run_backup( let chunk_id: [u8; 32] = *blake3::hash(&ciphertext).as_bytes(); let object_key = chunk_object_key(¶ms.target.prefix, &chunk_id); - let exists = s3::head_object(&s3, ¶ms.target.bucket, &object_key) - .await - .context("HEAD chunk")?; + let exists = { + let mut attempt = 0u32; + loop { + match s3::head_object(&s3, ¶ms.target.bucket, &object_key).await { + Ok(v) => break v, + Err(e) if attempt < 4 => { + let backoff_ms = 200u64 * (1u64 << attempt); + tracing::warn!( + "HEAD chunk attempt {} failed: {e}; retrying in {backoff_ms}ms", + attempt + 1 + ); + tokio::time::sleep(std::time::Duration::from_millis(backoff_ms)).await; + attempt += 1; + } + Err(e) => { + return Err(anyhow::anyhow!("HEAD chunk failed after 5 attempts: {e}")) + } + } + } + }; bytes_written += ciphertext.len() as u64; if !exists { let cipher_len = ciphertext.len() as u64; - s3::put_object(&s3, ¶ms.target.bucket, &object_key, ciphertext.clone()) - .await - .context("PUT chunk")?; + let mut attempt = 0u32; + loop { + match s3::put_object(&s3, ¶ms.target.bucket, &object_key, ciphertext.clone()) + .await + { + Ok(()) => break, + Err(e) if attempt < 4 => { + let backoff_ms = 200u64 * (1u64 << attempt); + tracing::warn!( + "PUT chunk attempt {} failed: {e}; retrying in {backoff_ms}ms", + attempt + 1 + ); + tokio::time::sleep(std::time::Duration::from_millis(backoff_ms)).await; + attempt += 1; + } + Err(e) => { + return Err(anyhow::anyhow!("PUT chunk failed after 5 attempts: {e}")) + } + } + } bytes_unique += cipher_len; } @@ -100,9 +134,24 @@ pub async fn run_backup( .context("manifest serialize")?; let manifest_blob = encrypt_manifest(&key, &manifest_compressed).context("encrypt manifest")?; let mkey = manifest_object_key(¶ms.target.prefix, ¶ms.backup_id); - s3::put_object(&s3, ¶ms.target.bucket, &mkey, manifest_blob) - .await - .context("PUT manifest")?; + { + let mut attempt = 0u32; + loop { + match s3::put_object(&s3, ¶ms.target.bucket, &mkey, manifest_blob.clone()).await { + Ok(()) => break, + Err(e) if attempt < 4 => { + let backoff_ms = 200u64 * (1u64 << attempt); + tracing::warn!( + "PUT manifest attempt {} failed: {e}; retrying in {backoff_ms}ms", + attempt + 1 + ); + tokio::time::sleep(std::time::Duration::from_millis(backoff_ms)).await; + attempt += 1; + } + Err(e) => return Err(anyhow::anyhow!("PUT manifest failed after 5 attempts: {e}")), + } + } + } Ok(BackupOutcome { manifest_object_key: mkey, @@ -131,9 +180,24 @@ pub async fn run_restore(params: RestoreParams) -> Result { let s3 = s3::make_client(¶ms.target); let key = ChunkKey::from_bytes(params.encryption_key); - let blob = s3::get_object(&s3, ¶ms.target.bucket, ¶ms.manifest_object_key) - .await - .context("GET manifest")?; + let blob = { + let mut attempt = 0u32; + loop { + match s3::get_object(&s3, ¶ms.target.bucket, ¶ms.manifest_object_key).await { + Ok(v) => break v, + Err(e) if attempt < 4 => { + let backoff_ms = 200u64 * (1u64 << attempt); + tracing::warn!( + "GET manifest attempt {} failed: {e}; retrying in {backoff_ms}ms", + attempt + 1 + ); + tokio::time::sleep(std::time::Duration::from_millis(backoff_ms)).await; + attempt += 1; + } + Err(e) => return Err(anyhow::anyhow!("GET manifest failed after 5 attempts: {e}")), + } + } + }; let compressed = decrypt_manifest(&key, &blob).context("decrypt manifest")?; let manifest = Manifest::deserialize_compressed(&compressed).context("deserialize manifest")?; @@ -147,9 +211,30 @@ pub async fn run_restore(params: RestoreParams) -> Result { let mut bytes_written: u64 = 0; for chunk_ref in &manifest.chunks { let object_key = chunk_object_key(¶ms.target.prefix, &chunk_ref.chunk_id); - let ciphertext = s3::get_object(&s3, ¶ms.target.bucket, &object_key) - .await - .with_context(|| format!("GET chunk {}", hex::encode(chunk_ref.chunk_id)))?; + let ciphertext = { + let mut attempt = 0u32; + loop { + match s3::get_object(&s3, ¶ms.target.bucket, &object_key).await { + Ok(v) => break v, + Err(e) if attempt < 4 => { + let backoff_ms = 200u64 * (1u64 << attempt); + tracing::warn!( + "GET chunk {} attempt {} failed: {e}; retrying in {backoff_ms}ms", + hex::encode(chunk_ref.chunk_id), + attempt + 1 + ); + tokio::time::sleep(std::time::Duration::from_millis(backoff_ms)).await; + attempt += 1; + } + Err(e) => { + return Err(anyhow::anyhow!( + "GET chunk {} failed after 5 attempts: {e}", + hex::encode(chunk_ref.chunk_id) + )) + } + } + } + }; let plaintext = decrypt_chunk(&key, &ciphertext, &chunk_ref.plaintext_hash).context("decrypt chunk")?; dst.seek(std::io::SeekFrom::Start(chunk_ref.plaintext_offset)) From 39b9af96716b80f00bf50c5bf65b504a0d76f448 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:48:55 +0700 Subject: [PATCH 35/37] fix(backup): scheduler skips volumes with active running backups (I4) MAX(created_at) includes running rows, so a 5-minute cron with a 10-minute backup would fire a second concurrent run at T+5. Add a NOT EXISTS sub-query to exclude volumes that already have a running backup before the tick even evaluates the cron schedule. Co-Authored-By: Claude Sonnet 4.6 --- apps/manager/src/features/backups/scheduler.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/apps/manager/src/features/backups/scheduler.rs b/apps/manager/src/features/backups/scheduler.rs index bf0d660..8ce2609 100644 --- a/apps/manager/src/features/backups/scheduler.rs +++ b/apps/manager/src/features/backups/scheduler.rs @@ -29,7 +29,11 @@ async fn tick(st: &AppState) -> anyhow::Result<()> { r#"SELECT v.id, v.backup_cron, v.backup_target_id, (SELECT MAX(created_at) FROM backup b WHERE b.source_volume_id = v.id) AS last_backup FROM volume v - WHERE v.backup_cron IS NOT NULL AND v.backup_target_id IS NOT NULL"#, + WHERE v.backup_cron IS NOT NULL AND v.backup_target_id IS NOT NULL + AND NOT EXISTS ( + SELECT 1 FROM backup b2 + WHERE b2.source_volume_id = v.id AND b2.status = 'running' + )"#, ) .fetch_all(&st.db) .await?; From c67f93e88d5a81508d18cf8d2237f6fb7d6aebe4 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:49:03 +0700 Subject: [PATCH 36/37] fix(backup): IscsiSnapshotReader logs out iSCSI session on drop (M1) read_snapshot was calling iscsiadm_login but never logout, leaving a dangling session after each backup. Wrap the opened File in IscsiSnapshotReader, which spawns a detached logout task in its Drop impl. The error path (device never appeared) now also explicitly calls iscsiadm_logout before returning the error. Co-Authored-By: Claude Sonnet 4.6 --- apps/agent/src/features/storage/iscsi.rs | 45 +++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/apps/agent/src/features/storage/iscsi.rs b/apps/agent/src/features/storage/iscsi.rs index 8516c46..8f309eb 100644 --- a/apps/agent/src/features/storage/iscsi.rs +++ b/apps/agent/src/features/storage/iscsi.rs @@ -20,6 +20,39 @@ struct LocatorJson { portal: Option, } +/// An `AsyncRead` wrapper around a `tokio::fs::File` opened on an iSCSI block +/// device. On drop it spawns a best-effort logout so the session is cleaned up +/// even if the caller forgets to call `detach`. +struct IscsiSnapshotReader { + inner: tokio::fs::File, + iqn: String, + portal: String, +} + +impl tokio::io::AsyncRead for IscsiSnapshotReader { + fn poll_read( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + std::pin::Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +impl Drop for IscsiSnapshotReader { + fn drop(&mut self) { + // Best-effort logout. We cannot await in Drop so spawn a detached task. + let iqn = std::mem::take(&mut self.iqn); + let portal = std::mem::take(&mut self.portal); + tokio::spawn(async move { + let _ = tokio::process::Command::new("iscsiadm") + .args(["-m", "node", "-T", &iqn, "-p", &portal, "--logout"]) + .output() + .await; + }); + } +} + pub struct IscsiHostBackend; impl IscsiHostBackend { @@ -143,13 +176,23 @@ impl HostBackend for IscsiHostBackend { let loc = Self::parse_locator(&snap.locator)?; Self::iscsiadm_login(&loc).await?; let dev = Self::block_device_path(&loc); + let portal = loc + .portal + .clone() + .unwrap_or_else(|| "127.0.0.1".to_string()); for _ in 0..30 { if dev.exists() { let f = tokio::fs::File::open(&dev).await?; - return Ok(Box::new(f)); + return Ok(Box::new(IscsiSnapshotReader { + inner: f, + iqn: loc.iqn.clone(), + portal, + })); } tokio::time::sleep(std::time::Duration::from_millis(100)).await; } + // No device appeared — log out before erroring so we don't leak the session. + Self::iscsiadm_logout(&loc).await.ok(); Err(StorageError::Backend( format!( "snapshot device {} did not appear after iscsi login", From 8148f52cfe5767436cebea29bb4cd9e408d15041 Mon Sep 17 00:00:00 2001 From: kleopasevan Date: Wed, 29 Apr 2026 12:55:26 +0700 Subject: [PATCH 37/37] chore(ci): ignore additional advisories from aws-sdk transitive deps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - RUSTSEC-2026-0098/0099/0104: rustls-webpki 0.101.x via aws-smithy-http-client → rustls 0.21. Our other code uses 0.103.13; the AWS SDK pins old rustls. - RUSTSEC-2025-0141: bincode is unmaintained but functionally stable. Migration to postcard/ciborium is a follow-up. --- .github/workflows/ci.yml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ed0aae6..d7d3854 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -274,13 +274,26 @@ jobs: run: cargo install cargo-audit - name: Run security audit - # Ignored advisories — all pre-existing on main, not caused by this PR: + # Ignored advisories — all transitive deps, not caused by our code: # - RUSTSEC-2026-0097 (rand 0.9.x unsoundness): only triggers with a # custom logger calling rand::rng(). NQRust-MicroVM does not. Via # quinn-proto (reqwest HTTP/3) and mockito (test-only). # - RUSTSEC-2026-0002 (lru 0.12.x IterMut Stacked Borrows): transitive # via ratatui in the installer TUI; ratatui has not yet bumped lru. - run: cargo audit --ignore RUSTSEC-2026-0097 --ignore RUSTSEC-2026-0002 + # - RUSTSEC-2026-0098/0099/0104 (rustls-webpki 0.101.x): transitive via + # aws-sdk-s3 → aws-smithy-http-client → rustls 0.21. We already use + # rustls-webpki 0.103.13 in the rest of the stack; the AWS SDK pins + # the old version and will upgrade in a future release. + # - RUSTSEC-2025-0141 (bincode unmaintained): bincode is functionally + # stable; migration to postcard/ciborium is a follow-up. + run: | + cargo audit \ + --ignore RUSTSEC-2026-0097 \ + --ignore RUSTSEC-2026-0002 \ + --ignore RUSTSEC-2026-0098 \ + --ignore RUSTSEC-2026-0099 \ + --ignore RUSTSEC-2026-0104 \ + --ignore RUSTSEC-2025-0141 check-installer-syntax: name: Check Installer Syntax