From 8db3cc3222683db9bdbc2d04a6f511e599439083 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=A9=AC=E7=99=BB=E5=B1=B1?= Date: Fri, 20 Mar 2026 09:09:53 +0800 Subject: [PATCH 1/5] feat: add ILM (lifecycle/tiering) and bucket replication commands Add three major feature groups to the rc CLI: 1. Lifecycle rule management (rc ilm rule add/edit/list/remove/export/import) 2. Storage tier management (rc ilm tier add/edit/list/info/remove) 3. Object restore (rc ilm restore) 4. Bucket replication (rc replicate add/update/list/status/remove/export/import) Core changes: - Add LifecycleRule, LifecycleConfiguration types to rc-core - Add TierConfig, TierType, TierCreds types matching RustFS admin API format - Add ReplicationConfiguration, ReplicationRule, BucketTarget types - Extend ObjectStore trait with lifecycle/replication/restore methods - Extend AdminApi trait with tier and replication target methods - Add lifecycle and replication fields to Capabilities - Implement all ObjectStore methods in S3Client (aws-sdk-s3) - Implement all AdminApi methods in AdminClient (HTTP+SigV4) - Add help contract tests for all new commands - Update README with ILM, tier, and replication documentation --- .gitignore | 2 +- README.md | 88 +++ crates/cli/src/commands/ilm/mod.rs | 43 ++ crates/cli/src/commands/ilm/restore.rs | 191 +++++ crates/cli/src/commands/ilm/rule.rs | 947 ++++++++++++++++++++++++ crates/cli/src/commands/ilm/tier.rs | 563 +++++++++++++++ crates/cli/src/commands/mod.rs | 13 + crates/cli/src/commands/replicate.rs | 962 +++++++++++++++++++++++++ crates/cli/tests/help_contract.rs | 86 +++ crates/core/src/admin/mod.rs | 44 ++ crates/core/src/admin/tier.rs | 501 +++++++++++++ crates/core/src/lib.rs | 10 + crates/core/src/lifecycle.rs | 261 +++++++ crates/core/src/replication.rs | 255 +++++++ crates/core/src/traits.rs | 40 + crates/s3/src/admin.rs | 76 ++ crates/s3/src/capability.rs | 2 + crates/s3/src/client.rs | 461 +++++++++++- 18 files changed, 4542 insertions(+), 3 deletions(-) create mode 100644 crates/cli/src/commands/ilm/mod.rs create mode 100644 crates/cli/src/commands/ilm/restore.rs create mode 100644 crates/cli/src/commands/ilm/rule.rs create mode 100644 crates/cli/src/commands/ilm/tier.rs create mode 100644 crates/cli/src/commands/replicate.rs create mode 100644 crates/core/src/admin/tier.rs create mode 100644 crates/core/src/lifecycle.rs create mode 100644 crates/core/src/replication.rs diff --git a/.gitignore b/.gitignore index eb8b0b1..a6b5781 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,4 @@ coverage/ # Cargo lock for library crates (keep for binary) # Cargo.lock - +CLAUDE.md diff --git a/README.md b/README.md index 0db7cc6..735f1e4 100644 --- a/README.md +++ b/README.md @@ -144,6 +144,63 @@ rc event list local/my-bucket rc event remove local/my-bucket arn:aws:sns:us-east-1:123456789012:topic ``` +### Lifecycle (ILM) Operations + +```bash +# Add lifecycle rule: expire objects after 30 days with prefix filter +rc ilm rule add local/my-bucket --expiry-days 30 --prefix "logs/" + +# Add lifecycle rule: transition to remote tier after 90 days +rc ilm rule add local/my-bucket --transition-days 90 --storage-class WARM + +# List lifecycle rules +rc ilm rule list local/my-bucket + +# Edit an existing rule +rc ilm rule edit local/my-bucket --id rule-abc123 --expiry-days 60 + +# Remove a specific rule or all rules +rc ilm rule remove local/my-bucket --id rule-abc123 +rc ilm rule remove local/my-bucket --all + +# Export/import lifecycle configuration (JSON) +rc ilm rule export local/my-bucket > lifecycle.json +rc ilm rule import local/my-bucket lifecycle.json + +# Manage remote storage tiers +rc ilm tier add rustfs WARM local --endpoint http://remote:9000 --access-key ak --secret-key sk --bucket warm-bucket +rc ilm tier list local +rc ilm tier info WARM local +rc ilm tier remove WARM local --force + +# Restore a transitioned (archived) object +rc ilm restore local/my-bucket/archived-file.dat --days 7 +``` + +### Bucket Replication + +```bash +# Add replication rule (requires remote target setup) +rc replicate add local/my-bucket --remote-bucket remote/target-bucket --priority 1 + +# List replication rules +rc replicate list local/my-bucket + +# View replication status/metrics +rc replicate status local/my-bucket + +# Update a replication rule +rc replicate update local/my-bucket --id rule-1 --priority 2 + +# Remove replication rules +rc replicate remove local/my-bucket --id rule-1 +rc replicate remove local/my-bucket --all + +# Export/import replication configuration (JSON) +rc replicate export local/my-bucket > replication.json +rc replicate import local/my-bucket replication.json +``` + ### Admin Operations (Cluster) ```bash @@ -189,6 +246,8 @@ rc admin heal status local --json | `version` | Manage bucket versioning | | `tag` | Manage bucket and object tags | | `quota` | Manage bucket quota | +| `ilm` | Manage lifecycle rules, storage tiers, and object restore | +| `replicate` | Manage bucket replication | | `completions` | Generate shell completion scripts | ### Admin Subcommands @@ -202,6 +261,35 @@ rc admin heal status local --json | `admin info` | Display cluster information (cluster, server, disk) | | `admin heal` | Manage cluster healing operations (status, start, stop) | +### ILM Subcommands + +| Command | Description | +|---------|-------------| +| `ilm rule add` | Add a lifecycle rule to a bucket | +| `ilm rule edit` | Edit an existing lifecycle rule | +| `ilm rule list` | List lifecycle rules on a bucket | +| `ilm rule remove` | Remove lifecycle rules from a bucket | +| `ilm rule export` | Export lifecycle configuration as JSON | +| `ilm rule import` | Import lifecycle configuration from JSON | +| `ilm tier add` | Add a remote storage tier | +| `ilm tier edit` | Edit tier credentials | +| `ilm tier list` | List all configured storage tiers | +| `ilm tier info` | Show details for a specific tier | +| `ilm tier remove` | Remove a storage tier | +| `ilm restore` | Restore a transitioned (archived) object | + +### Replicate Subcommands + +| Command | Description | +|---------|-------------| +| `replicate add` | Add a new replication rule | +| `replicate update` | Update an existing replication rule | +| `replicate list` | List replication rules for a bucket | +| `replicate status` | Show replication status and metrics | +| `replicate remove` | Remove replication rules | +| `replicate export` | Export replication configuration as JSON | +| `replicate import` | Import replication configuration from JSON | + ## Output Format ### Human-Readable (default) diff --git a/crates/cli/src/commands/ilm/mod.rs b/crates/cli/src/commands/ilm/mod.rs new file mode 100644 index 0000000..10507f2 --- /dev/null +++ b/crates/cli/src/commands/ilm/mod.rs @@ -0,0 +1,43 @@ +//! ilm command - Manage bucket lifecycle (ILM) rules, tiers, and restores +//! +//! Add, edit, list, remove, export, and import lifecycle rules; +//! manage remote storage tiers; restore transitioned objects. + +pub mod restore; +pub mod rule; +pub mod tier; + +use clap::{Args, Subcommand}; + +use crate::exit_code::ExitCode; +use crate::output::OutputConfig; + +/// Manage bucket lifecycle (ILM) configuration +#[derive(Args, Debug)] +pub struct IlmArgs { + #[command(subcommand)] + pub command: IlmCommands, +} + +#[derive(Subcommand, Debug)] +pub enum IlmCommands { + /// Manage lifecycle rules on a bucket + #[command(subcommand)] + Rule(rule::RuleCommands), + + /// Manage remote storage tiers + #[command(subcommand)] + Tier(tier::TierCommands), + + /// Restore a transitioned (archived) object + Restore(restore::RestoreArgs), +} + +/// Execute the ilm command +pub async fn execute(args: IlmArgs, output_config: OutputConfig) -> ExitCode { + match args.command { + IlmCommands::Rule(cmd) => rule::execute(cmd, output_config).await, + IlmCommands::Tier(cmd) => tier::execute(cmd, output_config).await, + IlmCommands::Restore(args) => restore::execute(args, output_config).await, + } +} diff --git a/crates/cli/src/commands/ilm/restore.rs b/crates/cli/src/commands/ilm/restore.rs new file mode 100644 index 0000000..42c6fdb --- /dev/null +++ b/crates/cli/src/commands/ilm/restore.rs @@ -0,0 +1,191 @@ +//! ilm restore command - Restore a transitioned (archived) object +//! +//! Initiates a restore request for an object that has been transitioned +//! to a remote storage tier via lifecycle rules. + +use clap::Args; +use rc_core::{AliasManager, ObjectStore as _, RemotePath}; +use rc_s3::S3Client; +use serde::Serialize; + +use crate::exit_code::ExitCode; +use crate::output::{Formatter, OutputConfig}; + +/// Restore a transitioned object +#[derive(Args, Debug)] +pub struct RestoreArgs { + /// Object path (alias/bucket/key) + pub path: String, + + /// Number of days to keep the restored copy + #[arg(long, default_value = "1")] + pub days: i32, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Debug, Serialize)] +struct RestoreOutput { + path: String, + days: i32, + status: String, +} + +/// Execute the restore command +pub async fn execute(args: RestoreArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket, key) = match parse_object_path(&args.path) { + Ok(parsed) => parsed, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + if args.days < 1 { + formatter.error("--days must be at least 1"); + return ExitCode::UsageError; + } + + let alias_manager = match AliasManager::new() { + Ok(manager) => manager, + Err(error) => { + formatter.error(&format!("Failed to load aliases: {error}")); + return ExitCode::GeneralError; + } + }; + + let alias = match alias_manager.get(&alias_name) { + Ok(alias) => alias, + Err(_) => { + formatter.error(&format!("Alias '{alias_name}' not found")); + return ExitCode::NotFound; + } + }; + + let client = match S3Client::new(alias).await { + Ok(client) => client, + Err(error) => { + formatter.error(&format!("Failed to create S3 client: {error}")); + return ExitCode::NetworkError; + } + }; + + if !args.force { + match client.capabilities().await { + Ok(caps) => { + if !caps.lifecycle { + formatter.error( + "Backend does not support lifecycle. Use --force to attempt anyway.", + ); + return ExitCode::UnsupportedFeature; + } + } + Err(error) => { + formatter.error(&format!("Failed to detect capabilities: {error}")); + return ExitCode::NetworkError; + } + } + } + + let remote_path = RemotePath::new(&alias_name, &bucket, &key); + + match client.restore_object(&remote_path, args.days).await { + Ok(()) => { + if formatter.is_json() { + formatter.json(&RestoreOutput { + path: args.path, + days: args.days, + status: "initiated".to_string(), + }); + } else { + formatter.success(&format!( + "Restore initiated for '{}' ({} day(s)).", + args.path, args.days + )); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to restore object: {error}")); + ExitCode::GeneralError + } + } +} + +fn parse_object_path(path: &str) -> Result<(String, String, String), String> { + if path.is_empty() { + return Err("Path cannot be empty".to_string()); + } + + let parts: Vec<&str> = path.splitn(3, '/').collect(); + if parts.len() < 3 || parts[0].is_empty() || parts[1].is_empty() || parts[2].is_empty() { + return Err("Object path must be in format alias/bucket/key".to_string()); + } + + Ok(( + parts[0].to_string(), + parts[1].to_string(), + parts[2].to_string(), + )) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_object_path_success() { + let (alias, bucket, key) = + parse_object_path("local/my-bucket/my-key.txt").expect("should parse"); + assert_eq!(alias, "local"); + assert_eq!(bucket, "my-bucket"); + assert_eq!(key, "my-key.txt"); + } + + #[test] + fn test_parse_object_path_nested_key() { + let (alias, bucket, key) = + parse_object_path("local/my-bucket/path/to/file.txt").expect("should parse"); + assert_eq!(alias, "local"); + assert_eq!(bucket, "my-bucket"); + assert_eq!(key, "path/to/file.txt"); + } + + #[test] + fn test_parse_object_path_errors() { + assert!(parse_object_path("").is_err()); + assert!(parse_object_path("local").is_err()); + assert!(parse_object_path("local/bucket").is_err()); + assert!(parse_object_path("/bucket/key").is_err()); + assert!(parse_object_path("local//key").is_err()); + assert!(parse_object_path("local/bucket/").is_err()); + } + + #[tokio::test] + async fn test_execute_invalid_path_returns_usage_error() { + let args = RestoreArgs { + path: "invalid-path".to_string(), + days: 1, + force: false, + }; + + let code = execute(args, OutputConfig::default()).await; + assert_eq!(code, ExitCode::UsageError); + } + + #[tokio::test] + async fn test_execute_invalid_days_returns_usage_error() { + let args = RestoreArgs { + path: "local/bucket/key.txt".to_string(), + days: 0, + force: false, + }; + + let code = execute(args, OutputConfig::default()).await; + assert_eq!(code, ExitCode::UsageError); + } +} diff --git a/crates/cli/src/commands/ilm/rule.rs b/crates/cli/src/commands/ilm/rule.rs new file mode 100644 index 0000000..537610f --- /dev/null +++ b/crates/cli/src/commands/ilm/rule.rs @@ -0,0 +1,947 @@ +//! ilm rule command - Manage bucket lifecycle rules +//! +//! Add, edit, list, remove, export, and import lifecycle rules on a bucket. + +use clap::{Args, Subcommand}; +use comfy_table::{ContentArrangement, Table}; +use rc_core::{ + AliasManager, LifecycleConfiguration, LifecycleExpiration, LifecycleRule, LifecycleRuleStatus, + LifecycleTransition, NoncurrentVersionExpiration, NoncurrentVersionTransition, + ObjectStore as _, +}; +use rc_s3::S3Client; +use serde::Serialize; + +use crate::exit_code::ExitCode; +use crate::output::{Formatter, OutputConfig}; + +#[derive(Subcommand, Debug)] +pub enum RuleCommands { + /// Add a new lifecycle rule to a bucket + Add(AddRuleArgs), + + /// Edit an existing lifecycle rule + Edit(EditRuleArgs), + + /// List lifecycle rules on a bucket + List(BucketArg), + + /// Remove lifecycle rules from a bucket + Remove(RemoveRuleArgs), + + /// Export lifecycle rules as JSON + Export(BucketArg), + + /// Import lifecycle rules from a JSON file + Import(ImportRuleArgs), +} + +#[derive(Args, Debug)] +pub struct BucketArg { + /// Path to the bucket (alias/bucket) + pub path: String, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Args, Debug)] +pub struct AddRuleArgs { + /// Path to the bucket (alias/bucket) + pub path: String, + + /// Expiration days for current versions + #[arg(long)] + pub expiry_days: Option, + + /// Expiration date for current versions (ISO 8601) + #[arg(long)] + pub expiry_date: Option, + + /// Transition days for current versions + #[arg(long)] + pub transition_days: Option, + + /// Transition date for current versions (ISO 8601) + #[arg(long)] + pub transition_date: Option, + + /// Target storage class for transition + #[arg(long)] + pub storage_class: Option, + + /// Expiration days for noncurrent versions + #[arg(long)] + pub noncurrent_expiry_days: Option, + + /// Transition days for noncurrent versions + #[arg(long)] + pub noncurrent_transition_days: Option, + + /// Storage class for noncurrent version transition + #[arg(long)] + pub noncurrent_transition_storage_class: Option, + + /// Key prefix filter + #[arg(long)] + pub prefix: Option, + + /// Remove expired delete markers + #[arg(long)] + pub expired_object_delete_marker: bool, + + /// Maximum number of noncurrent versions to retain + #[arg(long)] + pub newer_noncurrent_versions: Option, + + /// Create the rule in disabled state + #[arg(long)] + pub disable: bool, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Args, Debug)] +pub struct EditRuleArgs { + /// Path to the bucket (alias/bucket) + pub path: String, + + /// ID of the rule to edit + #[arg(long)] + pub id: String, + + /// Expiration days for current versions + #[arg(long)] + pub expiry_days: Option, + + /// Expiration date for current versions (ISO 8601) + #[arg(long)] + pub expiry_date: Option, + + /// Transition days for current versions + #[arg(long)] + pub transition_days: Option, + + /// Transition date for current versions (ISO 8601) + #[arg(long)] + pub transition_date: Option, + + /// Target storage class for transition + #[arg(long)] + pub storage_class: Option, + + /// Expiration days for noncurrent versions + #[arg(long)] + pub noncurrent_expiry_days: Option, + + /// Transition days for noncurrent versions + #[arg(long)] + pub noncurrent_transition_days: Option, + + /// Storage class for noncurrent version transition + #[arg(long)] + pub noncurrent_transition_storage_class: Option, + + /// Key prefix filter + #[arg(long)] + pub prefix: Option, + + /// Remove expired delete markers + #[arg(long)] + pub expired_object_delete_marker: Option, + + /// Maximum number of noncurrent versions to retain + #[arg(long)] + pub newer_noncurrent_versions: Option, + + /// Set the rule to disabled state + #[arg(long)] + pub disable: Option, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Args, Debug)] +pub struct RemoveRuleArgs { + /// Path to the bucket (alias/bucket) + pub path: String, + + /// ID of the rule to remove + #[arg(long)] + pub id: Option, + + /// Remove all lifecycle rules + #[arg(long)] + pub all: bool, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Args, Debug)] +pub struct ImportRuleArgs { + /// Path to the bucket (alias/bucket) + pub path: String, + + /// Path to the JSON file containing lifecycle rules + pub file: String, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Debug, Serialize)] +struct RuleListOutput { + bucket: String, + rules: Vec, +} + +#[derive(Debug, Serialize)] +struct RuleOperationOutput { + bucket: String, + rule_id: String, + action: String, +} + +/// Execute a rule subcommand +pub async fn execute(cmd: RuleCommands, output_config: OutputConfig) -> ExitCode { + match cmd { + RuleCommands::Add(args) => execute_add(args, output_config).await, + RuleCommands::Edit(args) => execute_edit(args, output_config).await, + RuleCommands::List(args) => execute_list(args, output_config).await, + RuleCommands::Remove(args) => execute_remove(args, output_config).await, + RuleCommands::Export(args) => execute_export(args, output_config).await, + RuleCommands::Import(args) => execute_import(args, output_config).await, + } +} + +async fn execute_add(args: AddRuleArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let client = match setup_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + // Get existing rules + let mut rules = client + .get_bucket_lifecycle(&bucket) + .await + .unwrap_or_default(); + + // Generate rule ID + let rule_id = generate_rule_id(); + + let status = if args.disable { + LifecycleRuleStatus::Disabled + } else { + LifecycleRuleStatus::Enabled + }; + + // Build expiration + let expiration = if args.expiry_days.is_some() || args.expiry_date.is_some() { + Some(LifecycleExpiration { + days: args.expiry_days, + date: args.expiry_date, + }) + } else { + None + }; + + // Build transition + let transition = match ( + &args.transition_days, + &args.transition_date, + &args.storage_class, + ) { + (Some(_), _, Some(sc)) | (_, Some(_), Some(sc)) => Some(LifecycleTransition { + days: args.transition_days, + date: args.transition_date.clone(), + storage_class: sc.clone(), + }), + (Some(_), _, None) | (_, Some(_), None) => { + formatter.error( + "--storage-class is required when using --transition-days or --transition-date", + ); + return ExitCode::UsageError; + } + _ => None, + }; + + // Build noncurrent version expiration + let noncurrent_version_expiration = + args.noncurrent_expiry_days + .map(|days| NoncurrentVersionExpiration { + noncurrent_days: days, + newer_noncurrent_versions: args.newer_noncurrent_versions, + }); + + // Build noncurrent version transition + let noncurrent_version_transition = match ( + args.noncurrent_transition_days, + &args.noncurrent_transition_storage_class, + ) { + (Some(days), Some(sc)) => Some(NoncurrentVersionTransition { + noncurrent_days: days, + storage_class: sc.clone(), + }), + (Some(_), None) => { + formatter.error("--noncurrent-transition-storage-class is required when using --noncurrent-transition-days"); + return ExitCode::UsageError; + } + _ => None, + }; + + let expired_object_delete_marker = if args.expired_object_delete_marker { + Some(true) + } else { + None + }; + + let new_rule = LifecycleRule { + id: rule_id.clone(), + status, + prefix: args.prefix, + tags: None, + expiration, + transition, + noncurrent_version_expiration, + noncurrent_version_transition, + abort_incomplete_multipart_upload_days: None, + expired_object_delete_marker, + }; + + rules.push(new_rule); + + match client.set_bucket_lifecycle(&bucket, rules).await { + Ok(()) => { + if formatter.is_json() { + formatter.json(&RuleOperationOutput { + bucket, + rule_id, + action: "added".to_string(), + }); + } else { + formatter.success(&format!("Lifecycle rule '{rule_id}' added successfully.")); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to set lifecycle rules: {error}")); + ExitCode::GeneralError + } + } +} + +async fn execute_edit(args: EditRuleArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let client = match setup_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + let mut rules = match client.get_bucket_lifecycle(&bucket).await { + Ok(rules) => rules, + Err(error) => { + formatter.error(&format!("Failed to get lifecycle rules: {error}")); + return ExitCode::GeneralError; + } + }; + + let rule = match rules.iter_mut().find(|r| r.id == args.id) { + Some(rule) => rule, + None => { + formatter.error(&format!("Rule '{}' not found", args.id)); + return ExitCode::NotFound; + } + }; + + // Update status if requested + if let Some(disable) = args.disable { + rule.status = if disable { + LifecycleRuleStatus::Disabled + } else { + LifecycleRuleStatus::Enabled + }; + } + + // Update prefix if provided + if args.prefix.is_some() { + rule.prefix = args.prefix; + } + + // Update expiration if provided + if args.expiry_days.is_some() || args.expiry_date.is_some() { + rule.expiration = Some(LifecycleExpiration { + days: args + .expiry_days + .or_else(|| rule.expiration.as_ref().and_then(|e| e.days)), + date: args + .expiry_date + .or_else(|| rule.expiration.as_ref().and_then(|e| e.date.clone())), + }); + } + + // Update transition if provided + if args.transition_days.is_some() + || args.transition_date.is_some() + || args.storage_class.is_some() + { + let current_sc = rule + .transition + .as_ref() + .map(|t| t.storage_class.clone()) + .unwrap_or_default(); + let sc = args.storage_class.unwrap_or(current_sc); + if sc.is_empty() { + formatter.error("--storage-class is required for transition"); + return ExitCode::UsageError; + } + rule.transition = Some(LifecycleTransition { + days: args + .transition_days + .or_else(|| rule.transition.as_ref().and_then(|t| t.days)), + date: args + .transition_date + .or_else(|| rule.transition.as_ref().and_then(|t| t.date.clone())), + storage_class: sc, + }); + } + + // Update noncurrent version expiration if provided + if args.noncurrent_expiry_days.is_some() || args.newer_noncurrent_versions.is_some() { + let current = rule.noncurrent_version_expiration.as_ref(); + rule.noncurrent_version_expiration = Some(NoncurrentVersionExpiration { + noncurrent_days: args + .noncurrent_expiry_days + .unwrap_or_else(|| current.map(|c| c.noncurrent_days).unwrap_or(0)), + newer_noncurrent_versions: args + .newer_noncurrent_versions + .or_else(|| current.and_then(|c| c.newer_noncurrent_versions)), + }); + } + + // Update noncurrent version transition if provided + if args.noncurrent_transition_days.is_some() + || args.noncurrent_transition_storage_class.is_some() + { + let current = rule.noncurrent_version_transition.as_ref(); + let sc = args + .noncurrent_transition_storage_class + .or_else(|| current.map(|c| c.storage_class.clone())) + .unwrap_or_default(); + if sc.is_empty() { + formatter.error( + "--noncurrent-transition-storage-class is required for noncurrent transition", + ); + return ExitCode::UsageError; + } + rule.noncurrent_version_transition = Some(NoncurrentVersionTransition { + noncurrent_days: args + .noncurrent_transition_days + .unwrap_or_else(|| current.map(|c| c.noncurrent_days).unwrap_or(0)), + storage_class: sc, + }); + } + + // Update expired object delete marker if provided + if let Some(val) = args.expired_object_delete_marker { + rule.expired_object_delete_marker = Some(val); + } + + match client.set_bucket_lifecycle(&bucket, rules).await { + Ok(()) => { + if formatter.is_json() { + formatter.json(&RuleOperationOutput { + bucket, + rule_id: args.id, + action: "edited".to_string(), + }); + } else { + formatter.success("Lifecycle rule updated successfully."); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to set lifecycle rules: {error}")); + ExitCode::GeneralError + } + } +} + +async fn execute_list(args: BucketArg, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let client = match setup_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + let rules = client + .get_bucket_lifecycle(&bucket) + .await + .unwrap_or_default(); + + if formatter.is_json() { + formatter.json(&RuleListOutput { bucket, rules }); + return ExitCode::Success; + } + + if rules.is_empty() { + formatter.println("No lifecycle rules found."); + return ExitCode::Success; + } + + let mut table = Table::new(); + table.set_content_arrangement(ContentArrangement::Dynamic); + table.set_header(vec![ + "ID", + "Status", + "Prefix", + "Expiry", + "Transition", + "Storage Class", + ]); + + for rule in &rules { + let prefix = rule.prefix.as_deref().unwrap_or("-"); + let expiry = format_expiry(rule); + let transition = format_transition(rule); + let storage_class = rule + .transition + .as_ref() + .map(|t| t.storage_class.as_str()) + .unwrap_or("-"); + + table.add_row(vec![ + &rule.id, + &rule.status.to_string(), + prefix, + &expiry, + &transition, + storage_class, + ]); + } + + formatter.println(&table.to_string()); + ExitCode::Success +} + +async fn execute_remove(args: RemoveRuleArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + if args.id.is_none() && !args.all { + formatter.error("Either --id or --all is required"); + return ExitCode::UsageError; + } + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let client = match setup_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + if args.all { + match client.delete_bucket_lifecycle(&bucket).await { + Ok(()) => { + if formatter.is_json() { + formatter.json(&RuleOperationOutput { + bucket, + rule_id: "*".to_string(), + action: "removed_all".to_string(), + }); + } else { + formatter.success("All lifecycle rules removed."); + } + return ExitCode::Success; + } + Err(error) => { + formatter.error(&format!( + "Failed to delete lifecycle configuration: {error}" + )); + return ExitCode::GeneralError; + } + } + } + + // Remove by ID + let rule_id = args.id.as_deref().unwrap_or(""); + let mut rules = match client.get_bucket_lifecycle(&bucket).await { + Ok(rules) => rules, + Err(error) => { + formatter.error(&format!("Failed to get lifecycle rules: {error}")); + return ExitCode::GeneralError; + } + }; + + let before = rules.len(); + rules.retain(|r| r.id != rule_id); + if rules.len() == before { + formatter.error(&format!("Rule '{rule_id}' not found")); + return ExitCode::NotFound; + } + + let result = if rules.is_empty() { + client.delete_bucket_lifecycle(&bucket).await + } else { + client.set_bucket_lifecycle(&bucket, rules).await + }; + + match result { + Ok(()) => { + if formatter.is_json() { + formatter.json(&RuleOperationOutput { + bucket, + rule_id: rule_id.to_string(), + action: "removed".to_string(), + }); + } else { + formatter.success(&format!("Lifecycle rule '{rule_id}' removed.")); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to update lifecycle rules: {error}")); + ExitCode::GeneralError + } + } +} + +async fn execute_export(args: BucketArg, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let client = match setup_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + let rules = client + .get_bucket_lifecycle(&bucket) + .await + .unwrap_or_default(); + + let config = LifecycleConfiguration { rules }; + formatter.json(&config); + ExitCode::Success +} + +async fn execute_import(args: ImportRuleArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let file_contents = match std::fs::read_to_string(&args.file) { + Ok(contents) => contents, + Err(error) => { + formatter.error(&format!("Failed to read file '{}': {error}", args.file)); + return ExitCode::GeneralError; + } + }; + + let config: LifecycleConfiguration = match serde_json::from_str(&file_contents) { + Ok(config) => config, + Err(error) => { + formatter.error(&format!("Failed to parse lifecycle configuration: {error}")); + return ExitCode::UsageError; + } + }; + + let client = match setup_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + let rule_count = config.rules.len(); + match client.set_bucket_lifecycle(&bucket, config.rules).await { + Ok(()) => { + if formatter.is_json() { + formatter.json(&serde_json::json!({ + "bucket": bucket, + "rulesImported": rule_count, + "action": "imported", + })); + } else { + formatter.success(&format!( + "Imported {rule_count} lifecycle rule(s) to bucket '{bucket}'." + )); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to set lifecycle rules: {error}")); + ExitCode::GeneralError + } + } +} + +// ==================== Helpers ==================== + +async fn setup_client( + alias_name: &str, + bucket: &str, + force: bool, + formatter: &Formatter, +) -> Result { + let alias_manager = match AliasManager::new() { + Ok(manager) => manager, + Err(error) => { + formatter.error(&format!("Failed to load aliases: {error}")); + return Err(ExitCode::GeneralError); + } + }; + + let alias = match alias_manager.get(alias_name) { + Ok(alias) => alias, + Err(_) => { + formatter.error(&format!("Alias '{alias_name}' not found")); + return Err(ExitCode::NotFound); + } + }; + + let client = match S3Client::new(alias).await { + Ok(client) => client, + Err(error) => { + formatter.error(&format!("Failed to create S3 client: {error}")); + return Err(ExitCode::NetworkError); + } + }; + + let caps = match client.capabilities().await { + Ok(caps) => caps, + Err(error) => { + if force { + rc_core::Capabilities::default() + } else { + formatter.error(&format!("Failed to detect capabilities: {error}")); + return Err(ExitCode::NetworkError); + } + } + }; + + if !force && !caps.lifecycle { + formatter.error("Backend does not support lifecycle. Use --force to attempt anyway."); + return Err(ExitCode::UnsupportedFeature); + } + + match client.bucket_exists(bucket).await { + Ok(true) => {} + Ok(false) => { + formatter.error(&format!("Bucket '{bucket}' does not exist")); + return Err(ExitCode::NotFound); + } + Err(error) => { + formatter.error(&format!("Failed to check bucket: {error}")); + return Err(ExitCode::NetworkError); + } + } + + Ok(client) +} + +fn parse_bucket_path(path: &str) -> Result<(String, String), String> { + if path.is_empty() { + return Err("Path cannot be empty".to_string()); + } + + let parts: Vec<&str> = path.splitn(2, '/').collect(); + if parts.len() < 2 || parts[0].is_empty() || parts[1].is_empty() { + return Err("Bucket path must be in format alias/bucket".to_string()); + } + + Ok(( + parts[0].to_string(), + parts[1].trim_end_matches('/').to_string(), + )) +} + +fn generate_rule_id() -> String { + use std::time::{SystemTime, UNIX_EPOCH}; + + let nanos = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_nanos(); + // Use lower 32 bits of nanosecond timestamp as a simple unique suffix + let suffix = format!("{:08x}", (nanos & 0xFFFF_FFFF) as u32); + format!("rule-{suffix}") +} + +fn format_expiry(rule: &LifecycleRule) -> String { + if let Some(exp) = &rule.expiration { + if let Some(days) = exp.days { + return format!("{days} day(s)"); + } + if let Some(date) = &exp.date { + return date.clone(); + } + } + "-".to_string() +} + +fn format_transition(rule: &LifecycleRule) -> String { + if let Some(tr) = &rule.transition { + if let Some(days) = tr.days { + return format!("{days} day(s)"); + } + if let Some(date) = &tr.date { + return date.clone(); + } + } + "-".to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_bucket_path_success() { + let (alias, bucket) = parse_bucket_path("local/my-bucket").expect("should parse"); + assert_eq!(alias, "local"); + assert_eq!(bucket, "my-bucket"); + + let (alias, bucket) = parse_bucket_path("local/my-bucket/").expect("should parse"); + assert_eq!(alias, "local"); + assert_eq!(bucket, "my-bucket"); + } + + #[test] + fn test_parse_bucket_path_error() { + assert!(parse_bucket_path("").is_err()); + assert!(parse_bucket_path("local").is_err()); + assert!(parse_bucket_path("/bucket").is_err()); + } + + #[test] + fn test_generate_rule_id_format() { + let id = generate_rule_id(); + assert!(id.starts_with("rule-")); + assert_eq!(id.len(), 13); // "rule-" (5) + 8 hex chars + } + + #[test] + fn test_format_expiry_days() { + let rule = LifecycleRule { + id: "test".to_string(), + status: LifecycleRuleStatus::Enabled, + prefix: None, + tags: None, + expiration: Some(LifecycleExpiration { + days: Some(30), + date: None, + }), + transition: None, + noncurrent_version_expiration: None, + noncurrent_version_transition: None, + abort_incomplete_multipart_upload_days: None, + expired_object_delete_marker: None, + }; + assert_eq!(format_expiry(&rule), "30 day(s)"); + } + + #[test] + fn test_format_transition_none() { + let rule = LifecycleRule { + id: "test".to_string(), + status: LifecycleRuleStatus::Enabled, + prefix: None, + tags: None, + expiration: None, + transition: None, + noncurrent_version_expiration: None, + noncurrent_version_transition: None, + abort_incomplete_multipart_upload_days: None, + expired_object_delete_marker: None, + }; + assert_eq!(format_transition(&rule), "-"); + } + + #[tokio::test] + async fn test_execute_add_invalid_path_returns_usage_error() { + let args = AddRuleArgs { + path: "invalid-path".to_string(), + expiry_days: Some(30), + expiry_date: None, + transition_days: None, + transition_date: None, + storage_class: None, + noncurrent_expiry_days: None, + noncurrent_transition_days: None, + noncurrent_transition_storage_class: None, + prefix: None, + expired_object_delete_marker: false, + newer_noncurrent_versions: None, + disable: false, + force: false, + }; + + let code = execute_add(args, OutputConfig::default()).await; + assert_eq!(code, ExitCode::UsageError); + } + + #[tokio::test] + async fn test_execute_remove_no_id_or_all_returns_usage_error() { + let args = RemoveRuleArgs { + path: "local/my-bucket".to_string(), + id: None, + all: false, + force: false, + }; + + let code = execute_remove(args, OutputConfig::default()).await; + assert_eq!(code, ExitCode::UsageError); + } +} diff --git a/crates/cli/src/commands/ilm/tier.rs b/crates/cli/src/commands/ilm/tier.rs new file mode 100644 index 0000000..3ac4ef4 --- /dev/null +++ b/crates/cli/src/commands/ilm/tier.rs @@ -0,0 +1,563 @@ +//! ilm tier command - Manage remote storage tiers +//! +//! Add, edit, list, inspect, and remove remote storage tiers used by lifecycle transition rules. + +use clap::{Args, Subcommand}; +use comfy_table::{ContentArrangement, Table}; +use rc_core::admin::{AdminApi, TierConfig, TierCreds, TierType}; +use serde::Serialize; + +use crate::exit_code::ExitCode; +use crate::output::{Formatter, OutputConfig}; + +use super::super::admin::get_admin_client; + +#[derive(Subcommand, Debug)] +pub enum TierCommands { + /// Add a new remote storage tier + Add(AddTierArgs), + + /// Edit tier credentials + Edit(EditTierArgs), + + /// List configured storage tiers + List(AliasArg), + + /// Show tier statistics + Info(TierNameArg), + + /// Remove a storage tier + Remove(RemoveTierArgs), +} + +#[derive(Args, Debug)] +pub struct AliasArg { + /// Alias name + pub alias: String, +} + +#[derive(Args, Debug)] +pub struct AddTierArgs { + /// Tier type (s3, rustfs, minio, aliyun, tencent, huaweicloud, azure, gcs, r2) + pub tier_type: String, + + /// Tier name (uppercase identifier, e.g. WARM, COLD) + pub tier_name: String, + + /// Alias name + pub alias: String, + + /// Remote endpoint URL + #[arg(long)] + pub endpoint: String, + + /// Access key for the remote backend + #[arg(long)] + pub access_key: String, + + /// Secret key for the remote backend + #[arg(long)] + pub secret_key: String, + + /// Target bucket on the remote backend + #[arg(long)] + pub bucket: String, + + /// Object key prefix on the remote bucket + #[arg(long, default_value = "")] + pub prefix: String, + + /// Region of the remote backend + #[arg(long, default_value = "")] + pub region: String, + + /// Storage class on the remote backend (for S3/Azure/GCS tiers) + #[arg(long, default_value = "")] + pub storage_class: String, +} + +#[derive(Args, Debug)] +pub struct EditTierArgs { + /// Tier name to update + pub tier_name: String, + + /// Alias name + pub alias: String, + + /// New access key + #[arg(long)] + pub access_key: String, + + /// New secret key + #[arg(long)] + pub secret_key: String, +} + +#[derive(Args, Debug)] +pub struct TierNameArg { + /// Tier name to inspect + pub tier_name: String, + + /// Alias name + pub alias: String, +} + +#[derive(Args, Debug)] +pub struct RemoveTierArgs { + /// Tier name to remove + pub tier_name: String, + + /// Alias name + pub alias: String, + + /// Force removal even if tier is in use + #[arg(long)] + pub force: bool, +} + +#[derive(Debug, Serialize)] +struct TierListOutput { + tiers: Vec, +} + +#[derive(Debug, Serialize)] +struct TierOperationOutput { + tier_name: String, + action: String, +} + +/// Execute a tier subcommand +pub async fn execute(cmd: TierCommands, output_config: OutputConfig) -> ExitCode { + match cmd { + TierCommands::Add(args) => execute_add(args, output_config).await, + TierCommands::Edit(args) => execute_edit(args, output_config).await, + TierCommands::List(args) => execute_list(args, output_config).await, + TierCommands::Info(args) => execute_info(args, output_config).await, + TierCommands::Remove(args) => execute_remove(args, output_config).await, + } +} + +async fn execute_add(args: AddTierArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let tier_type: TierType = match args.tier_type.parse() { + Ok(tt) => tt, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let client = match get_admin_client(&args.alias, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + + let config = build_tier_config(&TierConfigParams { + tier_type, + name: &args.tier_name, + endpoint: &args.endpoint, + access_key: &args.access_key, + secret_key: &args.secret_key, + bucket: &args.bucket, + prefix: &args.prefix, + region: &args.region, + storage_class: &args.storage_class, + }); + + match client.add_tier(config).await { + Ok(()) => { + if formatter.is_json() { + formatter.json(&TierOperationOutput { + tier_name: args.tier_name, + action: "added".to_string(), + }); + } else { + formatter.success(&format!( + "Tier '{}' ({}) added successfully.", + args.tier_name, tier_type + )); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to add tier: {error}")); + ExitCode::GeneralError + } + } +} + +async fn execute_edit(args: EditTierArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let client = match get_admin_client(&args.alias, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + + let creds = TierCreds { + access_key: args.access_key, + secret_key: args.secret_key, + }; + + match client.edit_tier(&args.tier_name, creds).await { + Ok(()) => { + if formatter.is_json() { + formatter.json(&TierOperationOutput { + tier_name: args.tier_name, + action: "edited".to_string(), + }); + } else { + formatter.success(&format!( + "Tier '{}' credentials updated successfully.", + args.tier_name + )); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to edit tier: {error}")); + ExitCode::GeneralError + } + } +} + +async fn execute_list(args: AliasArg, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let client = match get_admin_client(&args.alias, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + + let tiers = match client.list_tiers().await { + Ok(tiers) => tiers, + Err(error) => { + formatter.error(&format!("Failed to list tiers: {error}")); + return ExitCode::GeneralError; + } + }; + + if formatter.is_json() { + formatter.json(&TierListOutput { tiers }); + return ExitCode::Success; + } + + if tiers.is_empty() { + formatter.println("No storage tiers configured."); + return ExitCode::Success; + } + + let mut table = Table::new(); + table.set_content_arrangement(ContentArrangement::Dynamic); + table.set_header(vec![ + "Name", "Type", "Endpoint", "Bucket", "Prefix", "Region", + ]); + + for tier in &tiers { + table.add_row(vec![ + tier.tier_name(), + &tier.tier_type.to_string(), + tier.endpoint(), + tier.bucket(), + tier.prefix(), + tier.region(), + ]); + } + + formatter.println(&table.to_string()); + ExitCode::Success +} + +async fn execute_info(args: TierNameArg, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let client = match get_admin_client(&args.alias, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + + let stats = match client.tier_stats().await { + Ok(stats) => stats, + Err(error) => { + formatter.error(&format!("Failed to get tier stats: {error}")); + return ExitCode::GeneralError; + } + }; + + // The tier_stats endpoint returns a JSON object keyed by tier name + let tier_name_upper = args.tier_name.to_uppercase(); + if let Some(tier_info) = stats.get(&tier_name_upper) { + if formatter.is_json() { + formatter.json(&serde_json::json!({ + "tierName": tier_name_upper, + "stats": tier_info, + })); + } else { + formatter.println(&format!("Tier: {tier_name_upper}")); + if let Some(obj) = tier_info.as_object() { + for (key, value) in obj { + formatter.println(&format!(" {key}: {value}")); + } + } + } + ExitCode::Success + } else { + formatter.error(&format!("Tier '{}' not found in stats", args.tier_name)); + ExitCode::NotFound + } +} + +async fn execute_remove(args: RemoveTierArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let client = match get_admin_client(&args.alias, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + + match client.remove_tier(&args.tier_name, args.force).await { + Ok(()) => { + if formatter.is_json() { + formatter.json(&TierOperationOutput { + tier_name: args.tier_name, + action: "removed".to_string(), + }); + } else { + formatter.success(&format!("Tier '{}' removed successfully.", args.tier_name)); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to remove tier: {error}")); + ExitCode::GeneralError + } + } +} + +// ==================== Helpers ==================== + +struct TierConfigParams<'a> { + tier_type: TierType, + name: &'a str, + endpoint: &'a str, + access_key: &'a str, + secret_key: &'a str, + bucket: &'a str, + prefix: &'a str, + region: &'a str, + storage_class: &'a str, +} + +fn build_tier_config(params: &TierConfigParams<'_>) -> TierConfig { + let mut config = TierConfig { + tier_type: params.tier_type, + name: params.name.to_string(), + ..Default::default() + }; + + match params.tier_type { + TierType::S3 => { + config.s3 = Some(rc_core::admin::TierS3 { + name: params.name.to_string(), + endpoint: params.endpoint.to_string(), + access_key: params.access_key.to_string(), + secret_key: params.secret_key.to_string(), + bucket: params.bucket.to_string(), + prefix: params.prefix.to_string(), + region: params.region.to_string(), + storage_class: params.storage_class.to_string(), + }); + } + TierType::RustFS => { + config.rustfs = Some(rc_core::admin::TierRustFS { + name: params.name.to_string(), + endpoint: params.endpoint.to_string(), + access_key: params.access_key.to_string(), + secret_key: params.secret_key.to_string(), + bucket: params.bucket.to_string(), + prefix: params.prefix.to_string(), + region: params.region.to_string(), + storage_class: params.storage_class.to_string(), + }); + } + TierType::MinIO => { + config.minio = Some(rc_core::admin::TierMinIO { + name: params.name.to_string(), + endpoint: params.endpoint.to_string(), + access_key: params.access_key.to_string(), + secret_key: params.secret_key.to_string(), + bucket: params.bucket.to_string(), + prefix: params.prefix.to_string(), + region: params.region.to_string(), + }); + } + TierType::Aliyun => { + config.aliyun = Some(rc_core::admin::TierAliyun { + name: params.name.to_string(), + endpoint: params.endpoint.to_string(), + access_key: params.access_key.to_string(), + secret_key: params.secret_key.to_string(), + bucket: params.bucket.to_string(), + prefix: params.prefix.to_string(), + region: params.region.to_string(), + }); + } + TierType::Tencent => { + config.tencent = Some(rc_core::admin::TierTencent { + name: params.name.to_string(), + endpoint: params.endpoint.to_string(), + access_key: params.access_key.to_string(), + secret_key: params.secret_key.to_string(), + bucket: params.bucket.to_string(), + prefix: params.prefix.to_string(), + region: params.region.to_string(), + }); + } + TierType::Huaweicloud => { + config.huaweicloud = Some(rc_core::admin::TierHuaweicloud { + name: params.name.to_string(), + endpoint: params.endpoint.to_string(), + access_key: params.access_key.to_string(), + secret_key: params.secret_key.to_string(), + bucket: params.bucket.to_string(), + prefix: params.prefix.to_string(), + region: params.region.to_string(), + }); + } + TierType::Azure => { + config.azure = Some(rc_core::admin::TierAzure { + name: params.name.to_string(), + endpoint: params.endpoint.to_string(), + access_key: params.access_key.to_string(), + secret_key: params.secret_key.to_string(), + bucket: params.bucket.to_string(), + prefix: params.prefix.to_string(), + region: params.region.to_string(), + storage_class: params.storage_class.to_string(), + }); + } + TierType::GCS => { + config.gcs = Some(rc_core::admin::TierGCS { + name: params.name.to_string(), + endpoint: params.endpoint.to_string(), + creds: String::new(), + bucket: params.bucket.to_string(), + prefix: params.prefix.to_string(), + region: params.region.to_string(), + storage_class: params.storage_class.to_string(), + }); + } + TierType::R2 => { + config.r2 = Some(rc_core::admin::TierR2 { + name: params.name.to_string(), + endpoint: params.endpoint.to_string(), + access_key: params.access_key.to_string(), + secret_key: params.secret_key.to_string(), + bucket: params.bucket.to_string(), + prefix: params.prefix.to_string(), + region: params.region.to_string(), + }); + } + } + + config +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tier_type_parsing() { + assert_eq!("s3".parse::().unwrap(), TierType::S3); + assert_eq!("rustfs".parse::().unwrap(), TierType::RustFS); + assert_eq!("minio".parse::().unwrap(), TierType::MinIO); + assert_eq!("azure".parse::().unwrap(), TierType::Azure); + assert_eq!("gcs".parse::().unwrap(), TierType::GCS); + assert_eq!("r2".parse::().unwrap(), TierType::R2); + assert!("invalid".parse::().is_err()); + } + + #[test] + fn test_build_tier_config_s3() { + let config = build_tier_config(&TierConfigParams { + tier_type: TierType::S3, + name: "WARM", + endpoint: "https://s3.amazonaws.com", + access_key: "AKID", + secret_key: "SECRET", + bucket: "warm-bucket", + prefix: "tier/", + region: "us-east-1", + storage_class: "STANDARD_IA", + }); + + assert_eq!(config.tier_type, TierType::S3); + assert_eq!(config.tier_name(), "WARM"); + assert!(config.s3.is_some()); + let s3 = config.s3.as_ref().unwrap(); + assert_eq!(s3.endpoint, "https://s3.amazonaws.com"); + assert_eq!(s3.bucket, "warm-bucket"); + assert_eq!(s3.prefix, "tier/"); + assert_eq!(s3.region, "us-east-1"); + assert_eq!(s3.storage_class, "STANDARD_IA"); + } + + #[test] + fn test_build_tier_config_rustfs() { + let config = build_tier_config(&TierConfigParams { + tier_type: TierType::RustFS, + name: "ARCHIVE", + endpoint: "http://remote:9000", + access_key: "admin", + secret_key: "password", + bucket: "archive-bucket", + prefix: "", + region: "", + storage_class: "", + }); + + assert_eq!(config.tier_type, TierType::RustFS); + assert_eq!(config.tier_name(), "ARCHIVE"); + assert!(config.rustfs.is_some()); + } + + #[test] + fn test_build_tier_config_minio() { + let config = build_tier_config(&TierConfigParams { + tier_type: TierType::MinIO, + name: "COLD", + endpoint: "http://minio:9000", + access_key: "key", + secret_key: "secret", + bucket: "cold-bucket", + prefix: "data/", + region: "us-west-2", + storage_class: "", + }); + + assert_eq!(config.tier_type, TierType::MinIO); + assert!(config.minio.is_some()); + assert!(config.s3.is_none()); + } + + #[tokio::test] + async fn test_execute_add_invalid_tier_type_returns_usage_error() { + let args = AddTierArgs { + tier_type: "invalid".to_string(), + tier_name: "WARM".to_string(), + alias: "local".to_string(), + endpoint: "https://example.com".to_string(), + access_key: "key".to_string(), + secret_key: "secret".to_string(), + bucket: "bucket".to_string(), + prefix: String::new(), + region: String::new(), + storage_class: String::new(), + }; + + let code = execute_add(args, OutputConfig::default()).await; + assert_eq!(code, ExitCode::UsageError); + } +} diff --git a/crates/cli/src/commands/mod.rs b/crates/cli/src/commands/mod.rs index e259d64..b913c06 100644 --- a/crates/cli/src/commands/mod.rs +++ b/crates/cli/src/commands/mod.rs @@ -19,6 +19,7 @@ pub mod diff; mod event; mod find; mod head; +mod ilm; mod ls; mod mb; mod mirror; @@ -26,6 +27,7 @@ mod mv; mod pipe; mod quota; mod rb; +mod replicate; mod rm; mod share; mod stat; @@ -145,6 +147,13 @@ pub enum Commands { #[command(subcommand)] Quota(quota::QuotaCommands), + /// Manage bucket lifecycle (ILM) rules, tiers, and restores + Ilm(ilm::IlmArgs), + + /// Manage bucket replication + #[command(subcommand)] + Replicate(replicate::ReplicateCommands), + // Phase 6: Utilities /// Generate shell completion scripts Completions(completions::CompletionsArgs), @@ -196,6 +205,10 @@ pub async fn execute(cli: Cli) -> ExitCode { Commands::Quota(cmd) => { quota::execute(quota::QuotaArgs { command: cmd }, output_config).await } + Commands::Ilm(args) => ilm::execute(args, output_config).await, + Commands::Replicate(cmd) => { + replicate::execute(replicate::ReplicateArgs { command: cmd }, output_config).await + } Commands::Completions(args) => completions::execute(args), } } diff --git a/crates/cli/src/commands/replicate.rs b/crates/cli/src/commands/replicate.rs new file mode 100644 index 0000000..2f8760d --- /dev/null +++ b/crates/cli/src/commands/replicate.rs @@ -0,0 +1,962 @@ +//! replicate command - Manage bucket replication configuration +//! +//! Add, update, list, status, remove, export, or import bucket replication rules. +//! This command orchestrates both the S3 replication API and the Admin remote-target API. + +use clap::{Args, Subcommand}; +use comfy_table::{Cell, Table}; +use rc_core::admin::AdminApi; +use rc_core::replication::{ + BucketTarget, BucketTargetCredentials, ReplicationConfiguration, ReplicationDestination, + ReplicationRule, ReplicationRuleStatus, +}; +use rc_core::{AliasManager, ObjectStore as _}; +use rc_s3::{AdminClient, S3Client}; +use serde::Serialize; + +use crate::exit_code::ExitCode; +use crate::output::{Formatter, OutputConfig}; + +/// Manage bucket replication +#[derive(Args, Debug)] +pub struct ReplicateArgs { + #[command(subcommand)] + pub command: ReplicateCommands, +} + +#[derive(Subcommand, Debug)] +pub enum ReplicateCommands { + /// Add a new replication rule + Add(AddArgs), + + /// Update an existing replication rule + Update(UpdateArgs), + + /// List replication rules for a bucket + List(BucketArg), + + /// Show replication status/metrics for a bucket + Status(BucketArg), + + /// Remove replication rules from a bucket + Remove(RemoveArgs), + + /// Export replication configuration as JSON + Export(BucketArg), + + /// Import replication configuration from a JSON file + Import(ImportArgs), +} + +#[derive(Args, Debug)] +pub struct BucketArg { + /// Source bucket path (ALIAS/BUCKET) + pub path: String, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Args, Debug)] +pub struct AddArgs { + /// Source bucket path (ALIAS/BUCKET) + pub path: String, + + /// Remote target bucket (TARGET_ALIAS/BUCKET) + #[arg(long, value_name = "TARGET_ALIAS/BUCKET")] + pub remote_bucket: String, + + /// Replication flags (comma-separated: delete,delete-marker,existing-objects) + #[arg(long, value_name = "FLAGS")] + pub replicate: Option, + + /// Rule priority (higher = more important) + #[arg(long, default_value = "1")] + pub priority: i32, + + /// Storage class override at destination + #[arg(long)] + pub storage_class: Option, + + /// Bandwidth limit in bytes/sec (0 = unlimited) + #[arg(long, default_value = "0")] + pub bandwidth: i64, + + /// Enable synchronous replication + #[arg(long)] + pub sync: bool, + + /// Key prefix filter + #[arg(long)] + pub prefix: Option, + + /// Rule identifier (auto-generated if not specified) + #[arg(long)] + pub id: Option, + + /// Health check interval in seconds + #[arg(long, value_name = "SECONDS", default_value = "60")] + pub healthcheck_seconds: u64, + + /// Disable replication proxy + #[arg(long)] + pub disable_proxy: bool, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Args, Debug)] +pub struct UpdateArgs { + /// Source bucket path (ALIAS/BUCKET) + pub path: String, + + /// Rule ID to update + #[arg(long)] + pub id: String, + + /// Replication flags (comma-separated: delete,delete-marker,existing-objects) + #[arg(long, value_name = "FLAGS")] + pub replicate: Option, + + /// Rule priority (higher = more important) + #[arg(long)] + pub priority: Option, + + /// Storage class override at destination + #[arg(long)] + pub storage_class: Option, + + /// Bandwidth limit in bytes/sec (0 = unlimited) + #[arg(long)] + pub bandwidth: Option, + + /// Enable or disable synchronous replication + #[arg(long)] + pub sync: Option, + + /// Key prefix filter + #[arg(long)] + pub prefix: Option, + + /// Health check interval in seconds + #[arg(long, value_name = "SECONDS")] + pub healthcheck_seconds: Option, + + /// Disable replication proxy + #[arg(long)] + pub disable_proxy: Option, + + /// Enable or disable the rule + #[arg(long)] + pub status: Option, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Args, Debug)] +pub struct RemoveArgs { + /// Source bucket path (ALIAS/BUCKET) + pub path: String, + + /// Rule ID to remove (omit for --all) + #[arg(long)] + pub id: Option, + + /// Remove all replication rules + #[arg(long)] + pub all: bool, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +#[derive(Args, Debug)] +pub struct ImportArgs { + /// Source bucket path (ALIAS/BUCKET) + pub path: String, + + /// Path to JSON file containing replication configuration + pub file: String, + + /// Force operation even if capability detection fails + #[arg(long)] + pub force: bool, +} + +// ==================== Output types ==================== + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct ReplicateListOutput { + bucket: String, + rules: Vec, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct ReplicateOperationOutput { + bucket: String, + rule_id: String, + action: String, +} + +// ==================== execute ==================== + +/// Execute the replicate command +pub async fn execute(args: ReplicateArgs, output_config: OutputConfig) -> ExitCode { + match args.command { + ReplicateCommands::Add(args) => execute_add(args, output_config).await, + ReplicateCommands::Update(args) => execute_update(args, output_config).await, + ReplicateCommands::List(args) => execute_list(args, output_config).await, + ReplicateCommands::Status(args) => execute_status(args, output_config).await, + ReplicateCommands::Remove(args) => execute_remove(args, output_config).await, + ReplicateCommands::Export(args) => execute_export(args, output_config).await, + ReplicateCommands::Import(args) => execute_import(args, output_config).await, + } +} + +// ==================== Add ==================== + +async fn execute_add(args: AddArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (source_alias, source_bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let (target_alias, target_bucket) = match parse_bucket_path(&args.remote_bucket) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&format!("Invalid --remote-bucket: {error}")); + return ExitCode::UsageError; + } + }; + + // Create S3 client for source + let s3_client = + match setup_s3_client(&source_alias, &source_bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + // Create admin client for source (to register remote target) + let admin_client = match setup_admin_client(&source_alias, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + + // Resolve target alias to get endpoint + credentials + let target_alias_info = match resolve_alias(&target_alias, &formatter) { + Ok(alias) => alias, + Err(code) => return code, + }; + + let secure = target_alias_info.endpoint.starts_with("https"); + + // Build BucketTarget + let target = BucketTarget { + source_bucket: source_bucket.clone(), + endpoint: target_alias_info.endpoint.clone(), + credentials: Some(BucketTargetCredentials { + access_key: target_alias_info.access_key.clone(), + secret_key: target_alias_info.secret_key.clone(), + }), + target_bucket: target_bucket.clone(), + secure, + target_type: "replication".to_string(), + region: target_alias_info.region.clone(), + bandwidth_limit: args.bandwidth, + replication_sync: args.sync, + storage_class: args.storage_class.clone().unwrap_or_default(), + health_check_duration: args.healthcheck_seconds, + disable_proxy: args.disable_proxy, + ..Default::default() + }; + + // Register remote target via admin API → get ARN + let arn = match admin_client + .set_remote_target(&source_bucket, target, false) + .await + { + Ok(arn) => arn, + Err(error) => { + formatter.error(&format!("Failed to set remote target: {error}")); + return ExitCode::GeneralError; + } + }; + + // Parse replication flags + let (delete_replication, delete_marker_replication, existing_object_replication) = + parse_replicate_flags(args.replicate.as_deref()); + + let rule_id = args + .id + .unwrap_or_else(|| format!("rule-{}", &arn[arn.len().saturating_sub(8)..])); + + let new_rule = ReplicationRule { + id: rule_id.clone(), + priority: args.priority, + status: ReplicationRuleStatus::Enabled, + prefix: args.prefix, + tags: None, + destination: ReplicationDestination { + bucket_arn: arn, + storage_class: args.storage_class, + }, + delete_marker_replication: Some(delete_marker_replication), + existing_object_replication: Some(existing_object_replication), + delete_replication: Some(delete_replication), + }; + + // Get existing config or create new + let mut config = match s3_client.get_bucket_replication(&source_bucket).await { + Ok(Some(config)) => config, + Ok(None) => ReplicationConfiguration { + role: String::new(), + rules: Vec::new(), + }, + Err(error) => { + formatter.error(&format!("Failed to get replication config: {error}")); + return ExitCode::GeneralError; + } + }; + + config.rules.push(new_rule); + + match s3_client + .set_bucket_replication(&source_bucket, config) + .await + { + Ok(()) => { + if formatter.is_json() { + formatter.json(&ReplicateOperationOutput { + bucket: source_bucket, + rule_id, + action: "added".to_string(), + }); + } else { + formatter.success(&format!( + "Replication rule '{}' added for '{}/{}'", + rule_id, source_alias, source_bucket + )); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to set replication config: {error}")); + ExitCode::GeneralError + } + } +} + +// ==================== Update ==================== + +async fn execute_update(args: UpdateArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (source_alias, source_bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let s3_client = + match setup_s3_client(&source_alias, &source_bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + let mut config = match s3_client.get_bucket_replication(&source_bucket).await { + Ok(Some(config)) => config, + Ok(None) => { + formatter.error("No replication configuration found on this bucket"); + return ExitCode::NotFound; + } + Err(error) => { + formatter.error(&format!("Failed to get replication config: {error}")); + return ExitCode::GeneralError; + } + }; + + let rule = match config.rules.iter_mut().find(|r| r.id == args.id) { + Some(rule) => rule, + None => { + formatter.error(&format!("Rule '{}' not found", args.id)); + return ExitCode::NotFound; + } + }; + + // Apply updates + if let Some(priority) = args.priority { + rule.priority = priority; + } + if let Some(status) = args.status { + rule.status = status; + } + if let Some(ref prefix) = args.prefix { + rule.prefix = Some(prefix.clone()); + } + if let Some(ref storage_class) = args.storage_class { + rule.destination.storage_class = Some(storage_class.clone()); + } + if let Some(ref flags) = args.replicate { + let (delete, delete_marker, existing) = parse_replicate_flags(Some(flags)); + rule.delete_replication = Some(delete); + rule.delete_marker_replication = Some(delete_marker); + rule.existing_object_replication = Some(existing); + } + + let rule_id = args.id.clone(); + + match s3_client + .set_bucket_replication(&source_bucket, config) + .await + { + Ok(()) => { + if formatter.is_json() { + formatter.json(&ReplicateOperationOutput { + bucket: source_bucket, + rule_id, + action: "updated".to_string(), + }); + } else { + formatter.success(&format!( + "Replication rule '{}' updated for '{}/{}'", + rule_id, source_alias, source_bucket + )); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to update replication config: {error}")); + ExitCode::GeneralError + } + } +} + +// ==================== List ==================== + +async fn execute_list(args: BucketArg, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let client = match setup_s3_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + match client.get_bucket_replication(&bucket).await { + Ok(Some(config)) => { + if formatter.is_json() { + formatter.json(&ReplicateListOutput { + bucket, + rules: config.rules, + }); + } else if config.rules.is_empty() { + formatter.println("No replication rules found."); + } else { + let mut table = Table::new(); + table.set_header(vec![ + Cell::new("ID"), + Cell::new("Priority"), + Cell::new("Status"), + Cell::new("Prefix"), + Cell::new("Destination"), + Cell::new("Storage Class"), + ]); + + for rule in &config.rules { + table.add_row(vec![ + Cell::new(&rule.id), + Cell::new(rule.priority), + Cell::new(rule.status), + Cell::new(rule.prefix.as_deref().unwrap_or("-")), + Cell::new(&rule.destination.bucket_arn), + Cell::new(rule.destination.storage_class.as_deref().unwrap_or("-")), + ]); + } + + formatter.println(&table.to_string()); + } + ExitCode::Success + } + Ok(None) => { + if formatter.is_json() { + formatter.json(&ReplicateListOutput { + bucket, + rules: Vec::new(), + }); + } else { + formatter.println("No replication configuration found."); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to get replication config: {error}")); + ExitCode::GeneralError + } + } +} + +// ==================== Status ==================== + +async fn execute_status(args: BucketArg, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let admin_client = match setup_admin_client(&alias_name, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + + match admin_client.replication_metrics(&bucket).await { + Ok(metrics) => { + if formatter.is_json() { + formatter.json(&metrics); + } else { + formatter.println(&format!("Replication metrics for '{alias_name}/{bucket}':")); + match serde_json::to_string_pretty(&metrics) { + Ok(pretty) => formatter.println(&pretty), + Err(error) => { + formatter.error(&format!("Failed to format metrics: {error}")); + return ExitCode::GeneralError; + } + } + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to get replication metrics: {error}")); + ExitCode::GeneralError + } + } +} + +// ==================== Remove ==================== + +async fn execute_remove(args: RemoveArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + if args.id.is_none() && !args.all { + formatter.error("Either --id or --all is required"); + return ExitCode::UsageError; + } + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let client = match setup_s3_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + if args.all { + match client.delete_bucket_replication(&bucket).await { + Ok(()) => { + if formatter.is_json() { + formatter.json(&ReplicateOperationOutput { + bucket, + rule_id: "*".to_string(), + action: "removed".to_string(), + }); + } else { + formatter.success("All replication rules removed."); + } + return ExitCode::Success; + } + Err(error) => { + formatter.error(&format!("Failed to remove replication config: {error}")); + return ExitCode::GeneralError; + } + } + } + + // Remove specific rule by ID + let rule_id = args.id.as_deref().unwrap_or_default(); + + let mut config = match client.get_bucket_replication(&bucket).await { + Ok(Some(config)) => config, + Ok(None) => { + formatter.error("No replication configuration found on this bucket"); + return ExitCode::NotFound; + } + Err(error) => { + formatter.error(&format!("Failed to get replication config: {error}")); + return ExitCode::GeneralError; + } + }; + + let before = config.rules.len(); + config.rules.retain(|r| r.id != rule_id); + + if config.rules.len() == before { + formatter.error(&format!("Rule '{}' not found", rule_id)); + return ExitCode::NotFound; + } + + if config.rules.is_empty() { + match client.delete_bucket_replication(&bucket).await { + Ok(()) => {} + Err(error) => { + formatter.error(&format!("Failed to remove replication config: {error}")); + return ExitCode::GeneralError; + } + } + } else { + match client.set_bucket_replication(&bucket, config).await { + Ok(()) => {} + Err(error) => { + formatter.error(&format!("Failed to update replication config: {error}")); + return ExitCode::GeneralError; + } + } + } + + if formatter.is_json() { + formatter.json(&ReplicateOperationOutput { + bucket, + rule_id: rule_id.to_string(), + action: "removed".to_string(), + }); + } else { + formatter.success(&format!("Replication rule '{}' removed.", rule_id)); + } + ExitCode::Success +} + +// ==================== Export ==================== + +async fn execute_export(args: BucketArg, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let client = match setup_s3_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + match client.get_bucket_replication(&bucket).await { + Ok(Some(config)) => { + formatter.json(&config); + ExitCode::Success + } + Ok(None) => { + formatter.error("No replication configuration found on this bucket"); + ExitCode::NotFound + } + Err(error) => { + formatter.error(&format!("Failed to get replication config: {error}")); + ExitCode::GeneralError + } + } +} + +// ==================== Import ==================== + +async fn execute_import(args: ImportArgs, output_config: OutputConfig) -> ExitCode { + let formatter = Formatter::new(output_config); + + let (alias_name, bucket) = match parse_bucket_path(&args.path) { + Ok(parts) => parts, + Err(error) => { + formatter.error(&error); + return ExitCode::UsageError; + } + }; + + let data = match std::fs::read_to_string(&args.file) { + Ok(data) => data, + Err(error) => { + formatter.error(&format!("Failed to read file '{}': {error}", args.file)); + return ExitCode::GeneralError; + } + }; + + let config: ReplicationConfiguration = match serde_json::from_str(&data) { + Ok(config) => config, + Err(error) => { + formatter.error(&format!("Invalid JSON in '{}': {error}", args.file)); + return ExitCode::UsageError; + } + }; + + let client = match setup_s3_client(&alias_name, &bucket, args.force, &formatter).await { + Ok(client) => client, + Err(code) => return code, + }; + + match client.set_bucket_replication(&bucket, config).await { + Ok(()) => { + if formatter.is_json() { + let output = serde_json::json!({ + "bucket": bucket, + "action": "imported", + "file": args.file, + }); + formatter.json(&output); + } else { + formatter.success(&format!( + "Replication configuration imported from '{}'", + args.file + )); + } + ExitCode::Success + } + Err(error) => { + formatter.error(&format!("Failed to set replication config: {error}")); + ExitCode::GeneralError + } + } +} + +// ==================== Helpers ==================== + +fn parse_bucket_path(path: &str) -> Result<(String, String), String> { + if path.trim().is_empty() { + return Err("Path cannot be empty".to_string()); + } + + let parts: Vec<&str> = path.splitn(2, '/').collect(); + + if parts.len() < 2 || parts[0].is_empty() { + return Err("Alias name is required (ALIAS/BUCKET)".to_string()); + } + + let bucket = parts[1].trim_end_matches('/'); + if bucket.is_empty() { + return Err("Bucket name is required (ALIAS/BUCKET)".to_string()); + } + + Ok((parts[0].to_string(), bucket.to_string())) +} + +fn resolve_alias(alias_name: &str, formatter: &Formatter) -> Result { + let alias_manager = match AliasManager::new() { + Ok(manager) => manager, + Err(error) => { + formatter.error(&format!("Failed to load aliases: {error}")); + return Err(ExitCode::GeneralError); + } + }; + + match alias_manager.get(alias_name) { + Ok(alias) => Ok(alias), + Err(_) => { + formatter.error(&format!("Alias '{alias_name}' not found")); + Err(ExitCode::NotFound) + } + } +} + +async fn setup_s3_client( + alias_name: &str, + bucket: &str, + force: bool, + formatter: &Formatter, +) -> Result { + let alias = match resolve_alias(alias_name, formatter) { + Ok(alias) => alias, + Err(code) => return Err(code), + }; + + let client = match S3Client::new(alias).await { + Ok(client) => client, + Err(error) => { + formatter.error(&format!("Failed to create S3 client: {error}")); + return Err(ExitCode::NetworkError); + } + }; + + let caps = match client.capabilities().await { + Ok(caps) => caps, + Err(error) => { + if force { + rc_core::Capabilities::default() + } else { + formatter.error(&format!("Failed to detect capabilities: {error}")); + return Err(ExitCode::NetworkError); + } + } + }; + + if !force && !caps.replication { + formatter.error("Backend does not support replication. Use --force to attempt anyway."); + return Err(ExitCode::UnsupportedFeature); + } + + match client.bucket_exists(bucket).await { + Ok(true) => {} + Ok(false) => { + formatter.error(&format!("Bucket '{bucket}' does not exist")); + return Err(ExitCode::NotFound); + } + Err(error) => { + formatter.error(&format!("Failed to check bucket: {error}")); + return Err(ExitCode::NetworkError); + } + } + + Ok(client) +} + +fn setup_admin_client(alias_name: &str, formatter: &Formatter) -> Result { + let alias = resolve_alias(alias_name, formatter)?; + + match AdminClient::new(&alias) { + Ok(client) => Ok(client), + Err(error) => { + formatter.error(&format!("Failed to create admin client: {error}")); + Err(ExitCode::GeneralError) + } + } +} + +/// Parse --replicate flag value into (delete, delete_marker, existing_objects) booleans. +fn parse_replicate_flags(flags: Option<&str>) -> (bool, bool, bool) { + let mut delete = false; + let mut delete_marker = false; + let mut existing_objects = false; + + if let Some(flags_str) = flags { + for flag in flags_str.split(',').map(str::trim) { + match flag.to_lowercase().as_str() { + "delete" => delete = true, + "delete-marker" => delete_marker = true, + "existing-objects" => existing_objects = true, + _ => {} + } + } + } + + (delete, delete_marker, existing_objects) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_bucket_path_success() { + let (alias, bucket) = parse_bucket_path("local/my-bucket").expect("should parse"); + assert_eq!(alias, "local"); + assert_eq!(bucket, "my-bucket"); + + let (alias, bucket) = parse_bucket_path("local/my-bucket/").expect("should parse"); + assert_eq!(alias, "local"); + assert_eq!(bucket, "my-bucket"); + } + + #[test] + fn test_parse_bucket_path_errors() { + assert!(parse_bucket_path("").is_err()); + assert!(parse_bucket_path("local").is_err()); + assert!(parse_bucket_path("/bucket").is_err()); + assert!(parse_bucket_path("local/").is_err()); + } + + #[test] + fn test_parse_replicate_flags_none() { + let (d, dm, eo) = parse_replicate_flags(None); + assert!(!d); + assert!(!dm); + assert!(!eo); + } + + #[test] + fn test_parse_replicate_flags_all() { + let (d, dm, eo) = parse_replicate_flags(Some("delete,delete-marker,existing-objects")); + assert!(d); + assert!(dm); + assert!(eo); + } + + #[test] + fn test_parse_replicate_flags_partial() { + let (d, dm, eo) = parse_replicate_flags(Some("delete-marker")); + assert!(!d); + assert!(dm); + assert!(!eo); + } + + #[test] + fn test_parse_replicate_flags_case_insensitive() { + let (d, _, _) = parse_replicate_flags(Some("DELETE")); + assert!(d); + } + + #[tokio::test] + async fn test_execute_add_invalid_path_returns_usage_error() { + let args = ReplicateArgs { + command: ReplicateCommands::Add(AddArgs { + path: "no-slash".to_string(), + remote_bucket: "target/bucket".to_string(), + replicate: None, + priority: 1, + storage_class: None, + bandwidth: 0, + sync: false, + prefix: None, + id: None, + healthcheck_seconds: 60, + disable_proxy: false, + force: false, + }), + }; + + let code = execute(args, OutputConfig::default()).await; + assert_eq!(code, ExitCode::UsageError); + } + + #[tokio::test] + async fn test_execute_remove_requires_id_or_all() { + let args = ReplicateArgs { + command: ReplicateCommands::Remove(RemoveArgs { + path: "local/bucket".to_string(), + id: None, + all: false, + force: false, + }), + }; + + let code = execute(args, OutputConfig::default()).await; + assert_eq!(code, ExitCode::UsageError); + } +} diff --git a/crates/cli/tests/help_contract.rs b/crates/cli/tests/help_contract.rs index eb93866..8b35433 100644 --- a/crates/cli/tests/help_contract.rs +++ b/crates/cli/tests/help_contract.rs @@ -122,6 +122,8 @@ fn top_level_command_help_contract() { "tag", "quota", "anonymous", + "ilm", + "replicate", "completions", ], }, @@ -528,6 +530,90 @@ fn nested_subcommand_help_contract() { usage: "Usage: rc anonymous links [OPTIONS] ", expected_tokens: &["--recursive"], }, + // ILM commands + HelpCase { + args: &["ilm"], + usage: "Usage: rc ilm [OPTIONS] ", + expected_tokens: &["rule", "tier", "restore"], + }, + HelpCase { + args: &["ilm", "rule"], + usage: "Usage: rc ilm rule [OPTIONS] ", + expected_tokens: &["add", "edit", "list", "remove", "export", "import"], + }, + HelpCase { + args: &["ilm", "rule", "add"], + usage: "Usage: rc ilm rule add [OPTIONS] ", + expected_tokens: &[ + "--expiry-days", + "--transition-days", + "--storage-class", + "--prefix", + ], + }, + HelpCase { + args: &["ilm", "rule", "list"], + usage: "Usage: rc ilm rule list [OPTIONS] ", + expected_tokens: &[], + }, + HelpCase { + args: &["ilm", "rule", "remove"], + usage: "Usage: rc ilm rule remove [OPTIONS] ", + expected_tokens: &["--id", "--all"], + }, + HelpCase { + args: &["ilm", "tier"], + usage: "Usage: rc ilm tier [OPTIONS] ", + expected_tokens: &["add", "edit", "list", "info", "remove"], + }, + HelpCase { + args: &["ilm", "tier", "add"], + usage: "Usage: rc ilm tier add [OPTIONS]", + expected_tokens: &["--endpoint", "--access-key", "--secret-key", "--bucket"], + }, + HelpCase { + args: &["ilm", "tier", "list"], + usage: "Usage: rc ilm tier list [OPTIONS] ", + expected_tokens: &[], + }, + HelpCase { + args: &["ilm", "tier", "remove"], + usage: "Usage: rc ilm tier remove [OPTIONS]", + expected_tokens: &["--force"], + }, + HelpCase { + args: &["ilm", "restore"], + usage: "Usage: rc ilm restore [OPTIONS] ", + expected_tokens: &["--days"], + }, + // Replicate commands + HelpCase { + args: &["replicate"], + usage: "Usage: rc replicate [OPTIONS] ", + expected_tokens: &[ + "add", "update", "list", "status", "remove", "export", "import", + ], + }, + HelpCase { + args: &["replicate", "add"], + usage: "Usage: rc replicate add [OPTIONS]", + expected_tokens: &["--remote-bucket", "--priority"], + }, + HelpCase { + args: &["replicate", "list"], + usage: "Usage: rc replicate list [OPTIONS] ", + expected_tokens: &[], + }, + HelpCase { + args: &["replicate", "status"], + usage: "Usage: rc replicate status [OPTIONS] ", + expected_tokens: &[], + }, + HelpCase { + args: &["replicate", "remove"], + usage: "Usage: rc replicate remove [OPTIONS] ", + expected_tokens: &["--id", "--all"], + }, ]; for case in cases { diff --git a/crates/core/src/admin/mod.rs b/crates/core/src/admin/mod.rs index a753af0..c8b16d1 100644 --- a/crates/core/src/admin/mod.rs +++ b/crates/core/src/admin/mod.rs @@ -4,6 +4,7 @@ //! IAM users, policies, groups, service accounts, and cluster operations. mod cluster; +pub mod tier; mod types; pub use cluster::{ @@ -11,6 +12,10 @@ pub use cluster::{ HealResultItem, HealScanMode, HealStartRequest, HealStatus, HealingDiskInfo, MemStats, ObjectsInfo, ServerInfo, UsageInfo, }; +pub use tier::{ + TierAliyun, TierAzure, TierConfig, TierCreds, TierGCS, TierHuaweicloud, TierMinIO, TierR2, + TierRustFS, TierS3, TierTencent, TierType, +}; pub use types::{ BucketQuota, CreateServiceAccountRequest, Group, GroupStatus, Policy, PolicyEntity, PolicyInfo, ServiceAccount, ServiceAccountCreateResponse, ServiceAccountCredentials, SetPolicyRequest, @@ -139,6 +144,45 @@ pub trait AdminApi: Send + Sync { /// Clear bucket quota async fn clear_bucket_quota(&self, bucket: &str) -> Result; + + // ==================== Tier Operations ==================== + + /// List all configured storage tiers + async fn list_tiers(&self) -> Result>; + + /// Get tier statistics + async fn tier_stats(&self) -> Result; + + /// Add a new storage tier + async fn add_tier(&self, config: TierConfig) -> Result<()>; + + /// Edit tier credentials + async fn edit_tier(&self, name: &str, creds: TierCreds) -> Result<()>; + + /// Remove a storage tier + async fn remove_tier(&self, name: &str, force: bool) -> Result<()>; + + // ==================== Replication Target Operations ==================== + + /// Set a remote replication target for a bucket, returns the ARN + async fn set_remote_target( + &self, + bucket: &str, + target: crate::replication::BucketTarget, + update: bool, + ) -> Result; + + /// List remote replication targets for a bucket + async fn list_remote_targets( + &self, + bucket: &str, + ) -> Result>; + + /// Remove a remote replication target + async fn remove_remote_target(&self, bucket: &str, arn: &str) -> Result<()>; + + /// Get replication metrics for a bucket + async fn replication_metrics(&self, bucket: &str) -> Result; } #[cfg(test)] diff --git a/crates/core/src/admin/tier.rs b/crates/core/src/admin/tier.rs new file mode 100644 index 0000000..566163b --- /dev/null +++ b/crates/core/src/admin/tier.rs @@ -0,0 +1,501 @@ +//! Tier configuration types for remote storage tiering +//! +//! These types match the RustFS admin API JSON format for tier management. +//! Tiers are used by lifecycle transition rules to move objects to +//! remote storage backends. + +use serde::{Deserialize, Serialize}; +use std::fmt; + +/// Supported remote storage tier types +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum TierType { + #[serde(rename = "s3")] + S3, + #[serde(rename = "rustfs")] + RustFS, + #[serde(rename = "minio")] + MinIO, + #[serde(rename = "aliyun")] + Aliyun, + #[serde(rename = "tencent")] + Tencent, + #[serde(rename = "huaweicloud")] + Huaweicloud, + #[serde(rename = "azure")] + Azure, + #[serde(rename = "gcs")] + GCS, + #[serde(rename = "r2")] + R2, +} + +impl fmt::Display for TierType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TierType::S3 => write!(f, "S3"), + TierType::RustFS => write!(f, "RustFS"), + TierType::MinIO => write!(f, "MinIO"), + TierType::Aliyun => write!(f, "Aliyun"), + TierType::Tencent => write!(f, "Tencent"), + TierType::Huaweicloud => write!(f, "Huaweicloud"), + TierType::Azure => write!(f, "Azure"), + TierType::GCS => write!(f, "GCS"), + TierType::R2 => write!(f, "R2"), + } + } +} + +impl std::str::FromStr for TierType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "s3" => Ok(TierType::S3), + "rustfs" => Ok(TierType::RustFS), + "minio" => Ok(TierType::MinIO), + "aliyun" => Ok(TierType::Aliyun), + "tencent" => Ok(TierType::Tencent), + "huaweicloud" => Ok(TierType::Huaweicloud), + "azure" => Ok(TierType::Azure), + "gcs" => Ok(TierType::GCS), + "r2" => Ok(TierType::R2), + _ => Err(format!( + "Invalid tier type: {s}. Valid types: s3, rustfs, minio, aliyun, tencent, huaweicloud, azure, gcs, r2" + )), + } + } +} + +/// Tier configuration matching the RustFS admin API format. +/// +/// The backend uses a polymorphic structure: the `type` field selects which +/// sub-config (s3, rustfs, minio, etc.) is active. The tier name lives +/// inside the sub-config. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct TierConfig { + #[serde(rename = "type")] + pub tier_type: TierType, + + /// Tier name — extracted from the active sub-config on the backend side. + /// Populated by the CLI when building a TierConfig for add operations. + #[serde(skip)] + pub name: String, + + #[serde(rename = "s3", skip_serializing_if = "Option::is_none")] + pub s3: Option, + #[serde(rename = "rustfs", skip_serializing_if = "Option::is_none")] + pub rustfs: Option, + #[serde(rename = "minio", skip_serializing_if = "Option::is_none")] + pub minio: Option, + #[serde(rename = "aliyun", skip_serializing_if = "Option::is_none")] + pub aliyun: Option, + #[serde(rename = "tencent", skip_serializing_if = "Option::is_none")] + pub tencent: Option, + #[serde(rename = "huaweicloud", skip_serializing_if = "Option::is_none")] + pub huaweicloud: Option, + #[serde(rename = "azure", skip_serializing_if = "Option::is_none")] + pub azure: Option, + #[serde(rename = "gcs", skip_serializing_if = "Option::is_none")] + pub gcs: Option, + #[serde(rename = "r2", skip_serializing_if = "Option::is_none")] + pub r2: Option, +} + +impl Default for TierConfig { + fn default() -> Self { + Self { + tier_type: TierType::S3, + name: String::new(), + s3: None, + rustfs: None, + minio: None, + aliyun: None, + tencent: None, + huaweicloud: None, + azure: None, + gcs: None, + r2: None, + } + } +} + +impl TierConfig { + /// Get the tier name from the active sub-config + pub fn tier_name(&self) -> &str { + if !self.name.is_empty() { + return &self.name; + } + match self.tier_type { + TierType::S3 => self.s3.as_ref().map(|c| c.name.as_str()).unwrap_or(""), + TierType::RustFS => self.rustfs.as_ref().map(|c| c.name.as_str()).unwrap_or(""), + TierType::MinIO => self.minio.as_ref().map(|c| c.name.as_str()).unwrap_or(""), + TierType::Aliyun => self.aliyun.as_ref().map(|c| c.name.as_str()).unwrap_or(""), + TierType::Tencent => self.tencent.as_ref().map(|c| c.name.as_str()).unwrap_or(""), + TierType::Huaweicloud => self + .huaweicloud + .as_ref() + .map(|c| c.name.as_str()) + .unwrap_or(""), + TierType::Azure => self.azure.as_ref().map(|c| c.name.as_str()).unwrap_or(""), + TierType::GCS => self.gcs.as_ref().map(|c| c.name.as_str()).unwrap_or(""), + TierType::R2 => self.r2.as_ref().map(|c| c.name.as_str()).unwrap_or(""), + } + } + + /// Get the endpoint from the active sub-config + pub fn endpoint(&self) -> &str { + match self.tier_type { + TierType::S3 => self.s3.as_ref().map(|c| c.endpoint.as_str()).unwrap_or(""), + TierType::RustFS => self + .rustfs + .as_ref() + .map(|c| c.endpoint.as_str()) + .unwrap_or(""), + TierType::MinIO => self + .minio + .as_ref() + .map(|c| c.endpoint.as_str()) + .unwrap_or(""), + TierType::Aliyun => self + .aliyun + .as_ref() + .map(|c| c.endpoint.as_str()) + .unwrap_or(""), + TierType::Tencent => self + .tencent + .as_ref() + .map(|c| c.endpoint.as_str()) + .unwrap_or(""), + TierType::Huaweicloud => self + .huaweicloud + .as_ref() + .map(|c| c.endpoint.as_str()) + .unwrap_or(""), + TierType::Azure => self + .azure + .as_ref() + .map(|c| c.endpoint.as_str()) + .unwrap_or(""), + TierType::GCS => self.gcs.as_ref().map(|c| c.endpoint.as_str()).unwrap_or(""), + TierType::R2 => self.r2.as_ref().map(|c| c.endpoint.as_str()).unwrap_or(""), + } + } + + /// Get the bucket from the active sub-config + pub fn bucket(&self) -> &str { + match self.tier_type { + TierType::S3 => self.s3.as_ref().map(|c| c.bucket.as_str()).unwrap_or(""), + TierType::RustFS => self + .rustfs + .as_ref() + .map(|c| c.bucket.as_str()) + .unwrap_or(""), + TierType::MinIO => self.minio.as_ref().map(|c| c.bucket.as_str()).unwrap_or(""), + TierType::Aliyun => self + .aliyun + .as_ref() + .map(|c| c.bucket.as_str()) + .unwrap_or(""), + TierType::Tencent => self + .tencent + .as_ref() + .map(|c| c.bucket.as_str()) + .unwrap_or(""), + TierType::Huaweicloud => self + .huaweicloud + .as_ref() + .map(|c| c.bucket.as_str()) + .unwrap_or(""), + TierType::Azure => self.azure.as_ref().map(|c| c.bucket.as_str()).unwrap_or(""), + TierType::GCS => self.gcs.as_ref().map(|c| c.bucket.as_str()).unwrap_or(""), + TierType::R2 => self.r2.as_ref().map(|c| c.bucket.as_str()).unwrap_or(""), + } + } + + /// Get the prefix from the active sub-config + pub fn prefix(&self) -> &str { + match self.tier_type { + TierType::S3 => self.s3.as_ref().map(|c| c.prefix.as_str()).unwrap_or(""), + TierType::RustFS => self + .rustfs + .as_ref() + .map(|c| c.prefix.as_str()) + .unwrap_or(""), + TierType::MinIO => self.minio.as_ref().map(|c| c.prefix.as_str()).unwrap_or(""), + TierType::Aliyun => self + .aliyun + .as_ref() + .map(|c| c.prefix.as_str()) + .unwrap_or(""), + TierType::Tencent => self + .tencent + .as_ref() + .map(|c| c.prefix.as_str()) + .unwrap_or(""), + TierType::Huaweicloud => self + .huaweicloud + .as_ref() + .map(|c| c.prefix.as_str()) + .unwrap_or(""), + TierType::Azure => self.azure.as_ref().map(|c| c.prefix.as_str()).unwrap_or(""), + TierType::GCS => self.gcs.as_ref().map(|c| c.prefix.as_str()).unwrap_or(""), + TierType::R2 => self.r2.as_ref().map(|c| c.prefix.as_str()).unwrap_or(""), + } + } + + /// Get the region from the active sub-config + pub fn region(&self) -> &str { + match self.tier_type { + TierType::S3 => self.s3.as_ref().map(|c| c.region.as_str()).unwrap_or(""), + TierType::RustFS => self + .rustfs + .as_ref() + .map(|c| c.region.as_str()) + .unwrap_or(""), + TierType::MinIO => self.minio.as_ref().map(|c| c.region.as_str()).unwrap_or(""), + TierType::Aliyun => self + .aliyun + .as_ref() + .map(|c| c.region.as_str()) + .unwrap_or(""), + TierType::Tencent => self + .tencent + .as_ref() + .map(|c| c.region.as_str()) + .unwrap_or(""), + TierType::Huaweicloud => self + .huaweicloud + .as_ref() + .map(|c| c.region.as_str()) + .unwrap_or(""), + TierType::Azure => self.azure.as_ref().map(|c| c.region.as_str()).unwrap_or(""), + TierType::GCS => self.gcs.as_ref().map(|c| c.region.as_str()).unwrap_or(""), + TierType::R2 => self.r2.as_ref().map(|c| c.region.as_str()).unwrap_or(""), + } + } +} + +/// Credentials for updating a tier +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(default)] +pub struct TierCreds { + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, +} + +// ==================== Per-type sub-configs ==================== +// These match the RustFS backend JSON format exactly. + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct TierS3 { + pub name: String, + pub endpoint: String, + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, + pub bucket: String, + pub prefix: String, + pub region: String, + #[serde(rename = "storageClass")] + pub storage_class: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct TierRustFS { + pub name: String, + pub endpoint: String, + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, + pub bucket: String, + pub prefix: String, + pub region: String, + #[serde(rename = "storageClass")] + pub storage_class: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct TierMinIO { + pub name: String, + pub endpoint: String, + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, + pub bucket: String, + pub prefix: String, + pub region: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct TierAliyun { + pub name: String, + pub endpoint: String, + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, + pub bucket: String, + pub prefix: String, + pub region: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct TierTencent { + pub name: String, + pub endpoint: String, + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, + pub bucket: String, + pub prefix: String, + pub region: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct TierHuaweicloud { + pub name: String, + pub endpoint: String, + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, + pub bucket: String, + pub prefix: String, + pub region: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct TierAzure { + pub name: String, + pub endpoint: String, + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, + pub bucket: String, + pub prefix: String, + pub region: String, + #[serde(rename = "storageClass")] + pub storage_class: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct TierGCS { + pub name: String, + pub endpoint: String, + #[serde(rename = "creds")] + pub creds: String, + pub bucket: String, + pub prefix: String, + pub region: String, + #[serde(rename = "storageClass")] + pub storage_class: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct TierR2 { + pub name: String, + pub endpoint: String, + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, + pub bucket: String, + pub prefix: String, + pub region: String, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tier_type_display() { + assert_eq!(TierType::S3.to_string(), "S3"); + assert_eq!(TierType::RustFS.to_string(), "RustFS"); + assert_eq!(TierType::MinIO.to_string(), "MinIO"); + assert_eq!(TierType::Azure.to_string(), "Azure"); + assert_eq!(TierType::GCS.to_string(), "GCS"); + assert_eq!(TierType::R2.to_string(), "R2"); + } + + #[test] + fn test_tier_type_from_str() { + assert_eq!("s3".parse::().unwrap(), TierType::S3); + assert_eq!("rustfs".parse::().unwrap(), TierType::RustFS); + assert_eq!("MINIO".parse::().unwrap(), TierType::MinIO); + assert_eq!("Azure".parse::().unwrap(), TierType::Azure); + assert!("invalid".parse::().is_err()); + } + + #[test] + fn test_tier_config_serialization_s3() { + let config = TierConfig { + tier_type: TierType::S3, + name: "WARM".to_string(), + s3: Some(TierS3 { + name: "WARM".to_string(), + endpoint: "https://s3.amazonaws.com".to_string(), + access_key: "AKID".to_string(), + secret_key: "REDACTED".to_string(), + bucket: "warm-bucket".to_string(), + prefix: "tier/".to_string(), + region: "us-east-1".to_string(), + storage_class: "STANDARD_IA".to_string(), + }), + ..Default::default() + }; + + let json = serde_json::to_string(&config).unwrap(); + assert!(json.contains(r#""type":"s3""#)); + assert!(json.contains("warm-bucket")); + + let decoded: TierConfig = serde_json::from_str(&json).unwrap(); + assert_eq!(decoded.tier_type, TierType::S3); + assert_eq!(decoded.tier_name(), "WARM"); + assert_eq!(decoded.bucket(), "warm-bucket"); + } + + #[test] + fn test_tier_config_deserialization_from_backend() { + // Simulates the JSON format returned by the RustFS admin API + let json = r#"{"type":"rustfs","rustfs":{"name":"ARCHIVE","endpoint":"http://remote:9000","accessKey":"admin","secretKey":"REDACTED","bucket":"archive","prefix":"","region":""}}"#; + let config: TierConfig = serde_json::from_str(json).unwrap(); + assert_eq!(config.tier_type, TierType::RustFS); + assert_eq!(config.tier_name(), "ARCHIVE"); + assert_eq!(config.endpoint(), "http://remote:9000"); + assert_eq!(config.bucket(), "archive"); + } + + #[test] + fn test_tier_creds_serialization() { + let creds = TierCreds { + access_key: "newkey".to_string(), + secret_key: "newsecret".to_string(), + }; + + let json = serde_json::to_string(&creds).unwrap(); + assert!(json.contains("accessKey")); + assert!(json.contains("secretKey")); + + let decoded: TierCreds = serde_json::from_str(&json).unwrap(); + assert_eq!(decoded.access_key, "newkey"); + } +} diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 740092c..5be1e57 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -13,14 +13,24 @@ pub mod admin; pub mod alias; pub mod config; pub mod error; +pub mod lifecycle; pub mod path; +pub mod replication; pub mod retry; pub mod traits; pub use alias::{Alias, AliasManager}; pub use config::{Config, ConfigManager}; pub use error::{Error, Result}; +pub use lifecycle::{ + LifecycleConfiguration, LifecycleExpiration, LifecycleRule, LifecycleRuleStatus, + LifecycleTransition, NoncurrentVersionExpiration, NoncurrentVersionTransition, +}; pub use path::{ParsedPath, RemotePath, parse_path}; +pub use replication::{ + BucketTarget, BucketTargetCredentials, ReplicationConfiguration, ReplicationDestination, + ReplicationRule, ReplicationRuleStatus, +}; pub use retry::{RetryBuilder, is_retryable_error, retry_with_backoff}; pub use traits::{ BucketNotification, Capabilities, ListOptions, ListResult, NotificationTarget, ObjectInfo, diff --git a/crates/core/src/lifecycle.rs b/crates/core/src/lifecycle.rs new file mode 100644 index 0000000..3ccb412 --- /dev/null +++ b/crates/core/src/lifecycle.rs @@ -0,0 +1,261 @@ +//! Lifecycle (ILM) configuration types +//! +//! Domain types for S3 bucket lifecycle rules including expiration, +//! transition, and noncurrent version management. + +use std::collections::HashMap; +use std::fmt; + +use serde::{Deserialize, Serialize}; + +/// Full lifecycle configuration for a bucket +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LifecycleConfiguration { + /// Lifecycle rules + pub rules: Vec, +} + +/// A single lifecycle rule +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LifecycleRule { + /// Rule identifier + pub id: String, + + /// Whether the rule is enabled or disabled + pub status: LifecycleRuleStatus, + + /// Key prefix filter + #[serde(skip_serializing_if = "Option::is_none")] + pub prefix: Option, + + /// Tag-based filter (key=value pairs) + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + + /// Expiration settings for current object versions + #[serde(skip_serializing_if = "Option::is_none")] + pub expiration: Option, + + /// Transition settings for current object versions + #[serde(skip_serializing_if = "Option::is_none")] + pub transition: Option, + + /// Expiration settings for noncurrent object versions + #[serde(skip_serializing_if = "Option::is_none")] + pub noncurrent_version_expiration: Option, + + /// Transition settings for noncurrent object versions + #[serde(skip_serializing_if = "Option::is_none")] + pub noncurrent_version_transition: Option, + + /// Days after initiation to abort incomplete multipart uploads + #[serde(skip_serializing_if = "Option::is_none")] + pub abort_incomplete_multipart_upload_days: Option, + + /// Whether to remove expired delete markers + #[serde(skip_serializing_if = "Option::is_none")] + pub expired_object_delete_marker: Option, +} + +/// Rule status +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum LifecycleRuleStatus { + Enabled, + Disabled, +} + +impl fmt::Display for LifecycleRuleStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + LifecycleRuleStatus::Enabled => write!(f, "Enabled"), + LifecycleRuleStatus::Disabled => write!(f, "Disabled"), + } + } +} + +impl std::str::FromStr for LifecycleRuleStatus { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "enabled" => Ok(LifecycleRuleStatus::Enabled), + "disabled" => Ok(LifecycleRuleStatus::Disabled), + _ => Err(format!("Invalid lifecycle rule status: {s}")), + } + } +} + +/// Expiration settings for current object versions +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LifecycleExpiration { + /// Number of days after creation to expire + #[serde(skip_serializing_if = "Option::is_none")] + pub days: Option, + + /// Specific date to expire (ISO 8601 format) + #[serde(skip_serializing_if = "Option::is_none")] + pub date: Option, +} + +/// Transition settings for current object versions +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LifecycleTransition { + /// Number of days after creation to transition + #[serde(skip_serializing_if = "Option::is_none")] + pub days: Option, + + /// Specific date to transition (ISO 8601 format) + #[serde(skip_serializing_if = "Option::is_none")] + pub date: Option, + + /// Target storage class (tier name) + pub storage_class: String, +} + +/// Expiration settings for noncurrent object versions +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct NoncurrentVersionExpiration { + /// Number of days after becoming noncurrent to expire + pub noncurrent_days: i32, + + /// Maximum number of noncurrent versions to retain + #[serde(skip_serializing_if = "Option::is_none")] + pub newer_noncurrent_versions: Option, +} + +/// Transition settings for noncurrent object versions +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct NoncurrentVersionTransition { + /// Number of days after becoming noncurrent to transition + pub noncurrent_days: i32, + + /// Target storage class (tier name) + pub storage_class: String, +} + +impl fmt::Display for LifecycleRule { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} ({})", self.id, self.status) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_lifecycle_rule_status_display() { + assert_eq!(LifecycleRuleStatus::Enabled.to_string(), "Enabled"); + assert_eq!(LifecycleRuleStatus::Disabled.to_string(), "Disabled"); + } + + #[test] + fn test_lifecycle_rule_status_from_str() { + assert_eq!( + "enabled".parse::().unwrap(), + LifecycleRuleStatus::Enabled + ); + assert_eq!( + "Disabled".parse::().unwrap(), + LifecycleRuleStatus::Disabled + ); + assert!("invalid".parse::().is_err()); + } + + #[test] + fn test_lifecycle_rule_serialization() { + let rule = LifecycleRule { + id: "rule-1".to_string(), + status: LifecycleRuleStatus::Enabled, + prefix: Some("logs/".to_string()), + tags: None, + expiration: Some(LifecycleExpiration { + days: Some(30), + date: None, + }), + transition: None, + noncurrent_version_expiration: None, + noncurrent_version_transition: None, + abort_incomplete_multipart_upload_days: Some(7), + expired_object_delete_marker: None, + }; + + let json = serde_json::to_string(&rule).unwrap(); + let decoded: LifecycleRule = serde_json::from_str(&json).unwrap(); + assert_eq!(decoded.id, "rule-1"); + assert_eq!(decoded.status, LifecycleRuleStatus::Enabled); + assert_eq!(decoded.prefix.as_deref(), Some("logs/")); + assert_eq!(decoded.expiration.as_ref().unwrap().days, Some(30)); + assert_eq!(decoded.abort_incomplete_multipart_upload_days, Some(7)); + } + + #[test] + fn test_lifecycle_transition_serialization() { + let transition = LifecycleTransition { + days: Some(90), + date: None, + storage_class: "WARM_TIER".to_string(), + }; + + let json = serde_json::to_string(&transition).unwrap(); + assert!(json.contains("storageClass")); + assert!(json.contains("WARM_TIER")); + + let decoded: LifecycleTransition = serde_json::from_str(&json).unwrap(); + assert_eq!(decoded.storage_class, "WARM_TIER"); + } + + #[test] + fn test_lifecycle_configuration_serialization() { + let config = LifecycleConfiguration { + rules: vec![LifecycleRule { + id: "expire-old".to_string(), + status: LifecycleRuleStatus::Enabled, + prefix: None, + tags: None, + expiration: Some(LifecycleExpiration { + days: Some(365), + date: None, + }), + transition: None, + noncurrent_version_expiration: Some(NoncurrentVersionExpiration { + noncurrent_days: 30, + newer_noncurrent_versions: Some(3), + }), + noncurrent_version_transition: None, + abort_incomplete_multipart_upload_days: None, + expired_object_delete_marker: Some(true), + }], + }; + + let json = serde_json::to_string_pretty(&config).unwrap(); + let decoded: LifecycleConfiguration = serde_json::from_str(&json).unwrap(); + assert_eq!(decoded.rules.len(), 1); + assert_eq!(decoded.rules[0].id, "expire-old"); + assert_eq!( + decoded.rules[0] + .noncurrent_version_expiration + .as_ref() + .unwrap() + .newer_noncurrent_versions, + Some(3) + ); + } + + #[test] + fn test_noncurrent_version_transition_serialization() { + let nvt = NoncurrentVersionTransition { + noncurrent_days: 60, + storage_class: "COLD_TIER".to_string(), + }; + + let json = serde_json::to_string(&nvt).unwrap(); + let decoded: NoncurrentVersionTransition = serde_json::from_str(&json).unwrap(); + assert_eq!(decoded.noncurrent_days, 60); + assert_eq!(decoded.storage_class, "COLD_TIER"); + } +} diff --git a/crates/core/src/replication.rs b/crates/core/src/replication.rs new file mode 100644 index 0000000..831a48e --- /dev/null +++ b/crates/core/src/replication.rs @@ -0,0 +1,255 @@ +//! Bucket replication configuration types +//! +//! Domain types for S3 bucket replication configuration and +//! RustFS admin API remote target management. + +use serde::{Deserialize, Serialize}; +use std::fmt; + +// ==================== S3 Replication Config Types ==================== + +/// Full replication configuration for a bucket (S3 API) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReplicationConfiguration { + /// Role ARN or empty for per-rule destination ARNs + #[serde(default)] + pub role: String, + + /// Replication rules + pub rules: Vec, +} + +/// A single replication rule +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReplicationRule { + /// Rule identifier + pub id: String, + + /// Rule priority (higher = more important) + pub priority: i32, + + /// Whether the rule is enabled or disabled + pub status: ReplicationRuleStatus, + + /// Key prefix filter + #[serde(skip_serializing_if = "Option::is_none")] + pub prefix: Option, + + /// Tag-based filter + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + + /// Destination bucket ARN and optional storage class + pub destination: ReplicationDestination, + + /// Whether to replicate delete markers + #[serde(skip_serializing_if = "Option::is_none")] + pub delete_marker_replication: Option, + + /// Whether to replicate existing objects + #[serde(skip_serializing_if = "Option::is_none")] + pub existing_object_replication: Option, + + /// Whether to replicate version deletes + #[serde(skip_serializing_if = "Option::is_none")] + pub delete_replication: Option, +} + +/// Rule status +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum ReplicationRuleStatus { + Enabled, + Disabled, +} + +impl fmt::Display for ReplicationRuleStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ReplicationRuleStatus::Enabled => write!(f, "Enabled"), + ReplicationRuleStatus::Disabled => write!(f, "Disabled"), + } + } +} + +impl std::str::FromStr for ReplicationRuleStatus { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "enabled" => Ok(ReplicationRuleStatus::Enabled), + "disabled" => Ok(ReplicationRuleStatus::Disabled), + _ => Err(format!("Invalid replication rule status: {s}")), + } + } +} + +/// Replication destination +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReplicationDestination { + /// Destination bucket ARN + pub bucket_arn: String, + + /// Optional storage class override at destination + #[serde(skip_serializing_if = "Option::is_none")] + pub storage_class: Option, +} + +// ==================== Admin API Remote Target Types ==================== + +/// Remote bucket target for replication (matches RustFS admin API JSON format) +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct BucketTarget { + #[serde(rename = "sourcebucket", default)] + pub source_bucket: String, + + #[serde(default)] + pub endpoint: String, + + #[serde(default)] + pub credentials: Option, + + #[serde(rename = "targetbucket", default)] + pub target_bucket: String, + + #[serde(default)] + pub secure: bool, + + #[serde(default)] + pub path: String, + + #[serde(default)] + pub api: String, + + #[serde(default)] + pub arn: String, + + #[serde(rename = "type", default)] + pub target_type: String, + + #[serde(default)] + pub region: String, + + #[serde(alias = "bandwidth", default)] + pub bandwidth_limit: i64, + + #[serde(rename = "replicationSync", default)] + pub replication_sync: bool, + + #[serde(default)] + pub storage_class: String, + + #[serde(rename = "healthCheckDuration", default)] + pub health_check_duration: u64, + + #[serde(rename = "disableProxy", default)] + pub disable_proxy: bool, + + #[serde(rename = "isOnline", default)] + pub online: bool, +} + +/// Credentials for a remote bucket target +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct BucketTargetCredentials { + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, +} + +impl fmt::Display for ReplicationRule { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{} (priority={}, {})", + self.id, self.priority, self.status + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_replication_rule_status_display() { + assert_eq!(ReplicationRuleStatus::Enabled.to_string(), "Enabled"); + assert_eq!(ReplicationRuleStatus::Disabled.to_string(), "Disabled"); + } + + #[test] + fn test_replication_rule_status_from_str() { + assert_eq!( + "enabled".parse::().unwrap(), + ReplicationRuleStatus::Enabled + ); + assert!("invalid".parse::().is_err()); + } + + #[test] + fn test_replication_configuration_serialization() { + let config = ReplicationConfiguration { + role: "arn:aws:iam::123456789:role/replication".to_string(), + rules: vec![ReplicationRule { + id: "rule-1".to_string(), + priority: 1, + status: ReplicationRuleStatus::Enabled, + prefix: Some("data/".to_string()), + tags: None, + destination: ReplicationDestination { + bucket_arn: "arn:aws:s3:::dest-bucket".to_string(), + storage_class: None, + }, + delete_marker_replication: Some(true), + existing_object_replication: Some(true), + delete_replication: None, + }], + }; + + let json = serde_json::to_string_pretty(&config).unwrap(); + let decoded: ReplicationConfiguration = serde_json::from_str(&json).unwrap(); + assert_eq!(decoded.rules.len(), 1); + assert_eq!(decoded.rules[0].id, "rule-1"); + assert_eq!(decoded.rules[0].priority, 1); + } + + #[test] + fn test_bucket_target_serialization() { + let target = BucketTarget { + source_bucket: "my-bucket".to_string(), + endpoint: "http://remote:9000".to_string(), + credentials: Some(BucketTargetCredentials { + access_key: "admin".to_string(), + secret_key: "secret".to_string(), + }), + target_bucket: "dest-bucket".to_string(), + secure: false, + target_type: "replication".to_string(), + region: "us-east-1".to_string(), + replication_sync: true, + ..Default::default() + }; + + let json = serde_json::to_string(&target).unwrap(); + assert!(json.contains("sourcebucket")); + assert!(json.contains("targetbucket")); + assert!(json.contains("replicationSync")); + + let decoded: BucketTarget = serde_json::from_str(&json).unwrap(); + assert_eq!(decoded.source_bucket, "my-bucket"); + assert_eq!(decoded.target_bucket, "dest-bucket"); + assert!(decoded.replication_sync); + } + + #[test] + fn test_bucket_target_deserialization_from_backend() { + let json = r#"{"sourcebucket":"src","endpoint":"http://host:9000","credentials":{"accessKey":"ak","secretKey":"sk"},"targetbucket":"dst","secure":false,"path":"","api":"","arn":"arn:rustfs:replication::id:dst","type":"replication","region":"","bandwidth":0,"replicationSync":false,"storage_class":"","healthCheckDuration":0,"disableProxy":false,"isOnline":true}"#; + let target: BucketTarget = serde_json::from_str(json).unwrap(); + assert_eq!(target.source_bucket, "src"); + assert_eq!(target.target_bucket, "dst"); + assert!(target.online); + assert_eq!(target.target_type, "replication"); + } +} diff --git a/crates/core/src/traits.rs b/crates/core/src/traits.rs index 7cb2446..8c6e315 100644 --- a/crates/core/src/traits.rs +++ b/crates/core/src/traits.rs @@ -10,7 +10,9 @@ use jiff::Timestamp; use serde::{Deserialize, Serialize}; use crate::error::Result; +use crate::lifecycle::LifecycleRule; use crate::path::RemotePath; +use crate::replication::ReplicationConfiguration; /// Metadata for an object version #[derive(Debug, Clone, Serialize, Deserialize)] @@ -178,6 +180,12 @@ pub struct Capabilities { /// Supports event notifications pub notifications: bool, + + /// Supports lifecycle configuration + pub lifecycle: bool, + + /// Supports bucket replication + pub replication: bool, } /// Bucket notification target type @@ -334,6 +342,38 @@ pub trait ObjectStore: Send + Sync { bucket: &str, notifications: Vec, ) -> Result<()>; + + // Lifecycle operations (capability-dependent) + + /// Get bucket lifecycle rules. Returns empty vec if no lifecycle config exists. + async fn get_bucket_lifecycle(&self, bucket: &str) -> Result>; + + /// Set bucket lifecycle configuration (replaces all rules). + async fn set_bucket_lifecycle(&self, bucket: &str, rules: Vec) -> Result<()>; + + /// Delete bucket lifecycle configuration. + async fn delete_bucket_lifecycle(&self, bucket: &str) -> Result<()>; + + /// Restore a transitioned (archived) object. + async fn restore_object(&self, path: &RemotePath, days: i32) -> Result<()>; + + // Replication operations (capability-dependent) + + /// Get bucket replication configuration. Returns None if not configured. + async fn get_bucket_replication( + &self, + bucket: &str, + ) -> Result>; + + /// Set bucket replication configuration. + async fn set_bucket_replication( + &self, + bucket: &str, + config: ReplicationConfiguration, + ) -> Result<()>; + + /// Delete bucket replication configuration. + async fn delete_bucket_replication(&self, bucket: &str) -> Result<()>; // async fn get_versioning(&self, bucket: &str) -> Result; // async fn set_versioning(&self, bucket: &str, enabled: bool) -> Result<()>; // async fn get_tags(&self, path: &RemotePath) -> Result>; diff --git a/crates/s3/src/admin.rs b/crates/s3/src/admin.rs index f4dafda..65f6882 100644 --- a/crates/s3/src/admin.rs +++ b/crates/s3/src/admin.rs @@ -715,6 +715,82 @@ impl AdminApi for AdminClient { let path = format!("/quota/{}", urlencoding::encode(bucket)); self.request(Method::DELETE, &path, None, None).await } + + // ==================== Tier Operations ==================== + + async fn list_tiers(&self) -> Result> { + self.request(Method::GET, "/tier", None, None).await + } + + async fn tier_stats(&self) -> Result { + self.request(Method::GET, "/tier-stats", None, None).await + } + + async fn add_tier(&self, config: rc_core::admin::TierConfig) -> Result<()> { + let body = serde_json::to_vec(&config).map_err(Error::Json)?; + self.request_no_response(Method::PUT, "/tier", None, Some(&body)) + .await + } + + async fn edit_tier(&self, name: &str, creds: rc_core::admin::TierCreds) -> Result<()> { + let path = format!("/tier/{}", urlencoding::encode(name)); + let body = serde_json::to_vec(&creds).map_err(Error::Json)?; + self.request_no_response(Method::POST, &path, None, Some(&body)) + .await + } + + async fn remove_tier(&self, name: &str, force: bool) -> Result<()> { + let path = format!("/tier/{}", urlencoding::encode(name)); + if force { + let query: &[(&str, &str)] = &[("force", "true")]; + self.request_no_response(Method::DELETE, &path, Some(query), None) + .await + } else { + self.request_no_response(Method::DELETE, &path, None, None) + .await + } + } + + // ==================== Replication Target Operations ==================== + + async fn set_remote_target( + &self, + bucket: &str, + target: rc_core::replication::BucketTarget, + update: bool, + ) -> Result { + let body = serde_json::to_vec(&target).map_err(Error::Json)?; + if update { + let query: &[(&str, &str)] = &[("bucket", bucket), ("update", "true")]; + self.request(Method::PUT, "/set-remote-target", Some(query), Some(&body)) + .await + } else { + let query: &[(&str, &str)] = &[("bucket", bucket)]; + self.request(Method::PUT, "/set-remote-target", Some(query), Some(&body)) + .await + } + } + + async fn list_remote_targets( + &self, + bucket: &str, + ) -> Result> { + let query: &[(&str, &str)] = &[("bucket", bucket)]; + self.request(Method::GET, "/list-remote-targets", Some(query), None) + .await + } + + async fn remove_remote_target(&self, bucket: &str, arn: &str) -> Result<()> { + let query: &[(&str, &str)] = &[("bucket", bucket), ("arn", arn)]; + self.request_no_response(Method::DELETE, "/remove-remote-target", Some(query), None) + .await + } + + async fn replication_metrics(&self, bucket: &str) -> Result { + let query: &[(&str, &str)] = &[("bucket", bucket)]; + self.request(Method::GET, "/replicationmetrics", Some(query), None) + .await + } } #[cfg(test)] diff --git a/crates/s3/src/capability.rs b/crates/s3/src/capability.rs index 5428ea8..cb6979d 100644 --- a/crates/s3/src/capability.rs +++ b/crates/s3/src/capability.rs @@ -64,6 +64,8 @@ pub fn require_capability(caps: &Capabilities, feature: &str) -> Result<()> { "tagging" => caps.tagging, "select" | "sql" => caps.select, "notifications" | "watch" => caps.notifications, + "lifecycle" => caps.lifecycle, + "replication" => caps.replication, _ => false, }; diff --git a/crates/s3/src/client.rs b/crates/s3/src/client.rs index 9f117ea..23c1d69 100644 --- a/crates/s3/src/client.rs +++ b/crates/s3/src/client.rs @@ -14,8 +14,9 @@ use aws_smithy_types::body::SdkBody; use bytes::Bytes; use jiff::Timestamp; use rc_core::{ - Alias, BucketNotification, Capabilities, Error, ListOptions, ListResult, NotificationTarget, - ObjectInfo, ObjectStore, ObjectVersion, RemotePath, Result, + Alias, BucketNotification, Capabilities, Error, LifecycleRule, ListOptions, ListResult, + NotificationTarget, ObjectInfo, ObjectStore, ObjectVersion, RemotePath, + ReplicationConfiguration, Result, }; use tokio::io::AsyncReadExt; @@ -905,6 +906,8 @@ impl ObjectStore for S3Client { anonymous: true, select: false, notifications: true, + lifecycle: true, + replication: true, }) } @@ -1541,6 +1544,460 @@ impl ObjectStore for S3Client { Ok(()) } + + async fn get_bucket_lifecycle(&self, bucket: &str) -> Result> { + let response = match self + .inner + .get_bucket_lifecycle_configuration() + .bucket(bucket) + .send() + .await + { + Ok(resp) => resp, + Err(error) => { + let error_text = Self::format_sdk_error(&error); + if error_text.contains("NoSuchLifecycleConfiguration") + || error_text.contains("lifecycle configuration is not found") + { + return Ok(Vec::new()); + } + return Err(Error::General(format!( + "get_bucket_lifecycle: {error_text}" + ))); + } + }; + + let mut rules = Vec::new(); + for sdk_rule in response.rules() { + let id = sdk_rule.id().unwrap_or("").to_string(); + let status = match sdk_rule.status().as_str() { + "Enabled" => rc_core::LifecycleRuleStatus::Enabled, + _ => rc_core::LifecycleRuleStatus::Disabled, + }; + + let prefix = sdk_rule + .filter() + .and_then(|f| f.prefix().map(|p| p.to_string())) + .or_else(|| { + sdk_rule + .filter() + .and_then(|f| f.and()) + .and_then(|a| a.prefix().map(|p: &str| p.to_string())) + }); + + let expiration = sdk_rule + .expiration() + .map(|exp| rc_core::LifecycleExpiration { + days: exp.days(), + date: exp.date().map(|d| d.to_string()), + }); + + let transition = sdk_rule + .transitions() + .first() + .map(|t| rc_core::LifecycleTransition { + days: t.days(), + date: t.date().map(|d| d.to_string()), + storage_class: t + .storage_class() + .map(|sc| sc.as_str().to_string()) + .unwrap_or_default(), + }); + + let noncurrent_version_expiration = + sdk_rule.noncurrent_version_expiration().map(|nve| { + rc_core::NoncurrentVersionExpiration { + noncurrent_days: nve.noncurrent_days().unwrap_or(0), + newer_noncurrent_versions: nve.newer_noncurrent_versions(), + } + }); + + let noncurrent_version_transition = sdk_rule + .noncurrent_version_transitions() + .first() + .map(|nvt| rc_core::NoncurrentVersionTransition { + noncurrent_days: nvt.noncurrent_days().unwrap_or(0), + storage_class: nvt + .storage_class() + .map(|sc| sc.as_str().to_string()) + .unwrap_or_default(), + }); + + let abort_incomplete_multipart_upload_days = sdk_rule + .abort_incomplete_multipart_upload() + .and_then(|a| a.days_after_initiation()); + + let expired_object_delete_marker = sdk_rule + .expiration() + .and_then(|e| e.expired_object_delete_marker()) + .filter(|v| *v); + + rules.push(LifecycleRule { + id, + status, + prefix, + tags: None, + expiration, + transition, + noncurrent_version_expiration, + noncurrent_version_transition, + abort_incomplete_multipart_upload_days, + expired_object_delete_marker, + }); + } + + Ok(rules) + } + + async fn set_bucket_lifecycle(&self, bucket: &str, rules: Vec) -> Result<()> { + use aws_sdk_s3::types::{ + AbortIncompleteMultipartUpload, BucketLifecycleConfiguration, ExpirationStatus, + LifecycleExpiration as SdkExpiration, LifecycleRule as SdkRule, LifecycleRuleFilter, + NoncurrentVersionExpiration as SdkNve, NoncurrentVersionTransition as SdkNvt, + Transition, TransitionStorageClass, + }; + + let mut sdk_rules = Vec::new(); + for rule in rules { + let status = match rule.status { + rc_core::LifecycleRuleStatus::Enabled => ExpirationStatus::Enabled, + rc_core::LifecycleRuleStatus::Disabled => ExpirationStatus::Disabled, + }; + + let filter = rule + .prefix + .as_ref() + .map(|p| LifecycleRuleFilter::builder().prefix(p).build()); + + let expiration = rule.expiration.map(|exp| { + let mut builder = SdkExpiration::builder(); + if let Some(days) = exp.days { + builder = builder.days(days); + } + if let Some(ref date_str) = exp.date + && let Ok(dt) = aws_smithy_types::DateTime::from_str( + date_str, + aws_smithy_types::date_time::Format::DateTime, + ) + { + builder = builder.date(dt); + } + if let Some(true) = rule.expired_object_delete_marker { + builder = builder.expired_object_delete_marker(true); + } + builder.build() + }); + + let transitions = rule.transition.map(|t| { + #[allow(deprecated)] + let sc = TransitionStorageClass::from(t.storage_class.as_str()); + let mut builder = Transition::builder().storage_class(sc); + if let Some(days) = t.days { + builder = builder.days(days); + } + if let Some(ref date_str) = t.date + && let Ok(dt) = aws_smithy_types::DateTime::from_str( + date_str, + aws_smithy_types::date_time::Format::DateTime, + ) + { + builder = builder.date(dt); + } + vec![builder.build()] + }); + + let nve = rule.noncurrent_version_expiration.map(|nve| { + let mut builder = SdkNve::builder().noncurrent_days(nve.noncurrent_days); + if let Some(newer) = nve.newer_noncurrent_versions { + builder = builder.newer_noncurrent_versions(newer); + } + builder.build() + }); + + let nvt = rule.noncurrent_version_transition.map(|nvt| { + let sc = TransitionStorageClass::from(nvt.storage_class.as_str()); + let builder = SdkNvt::builder() + .noncurrent_days(nvt.noncurrent_days) + .storage_class(sc); + vec![builder.build()] + }); + + let abort = rule.abort_incomplete_multipart_upload_days.map(|days| { + AbortIncompleteMultipartUpload::builder() + .days_after_initiation(days) + .build() + }); + + let mut builder = SdkRule::builder().id(&rule.id).status(status); + if let Some(filter) = filter { + builder = builder.filter(filter); + } + if let Some(expiration) = expiration { + builder = builder.expiration(expiration); + } + if let Some(transitions) = transitions { + builder = builder.set_transitions(Some(transitions)); + } + if let Some(nve) = nve { + builder = builder.noncurrent_version_expiration(nve); + } + if let Some(nvt) = nvt { + builder = builder.set_noncurrent_version_transitions(Some(nvt)); + } + if let Some(abort) = abort { + builder = builder.abort_incomplete_multipart_upload(abort); + } + + let sdk_rule = builder + .build() + .map_err(|e| Error::General(format!("build lifecycle rule: {e}")))?; + sdk_rules.push(sdk_rule); + } + + let config = BucketLifecycleConfiguration::builder() + .set_rules(Some(sdk_rules)) + .build() + .map_err(|e| Error::General(format!("build lifecycle config: {e}")))?; + + self.inner + .put_bucket_lifecycle_configuration() + .bucket(bucket) + .lifecycle_configuration(config) + .send() + .await + .map_err(|e| { + Error::General(format!( + "set_bucket_lifecycle: {}", + Self::format_sdk_error(&e) + )) + })?; + + Ok(()) + } + + async fn delete_bucket_lifecycle(&self, bucket: &str) -> Result<()> { + self.inner + .delete_bucket_lifecycle() + .bucket(bucket) + .send() + .await + .map_err(|e| { + Error::General(format!( + "delete_bucket_lifecycle: {}", + Self::format_sdk_error(&e) + )) + })?; + Ok(()) + } + + async fn restore_object(&self, path: &RemotePath, days: i32) -> Result<()> { + use aws_sdk_s3::types::RestoreRequest; + + let request = RestoreRequest::builder().days(days).build(); + self.inner + .restore_object() + .bucket(&path.bucket) + .key(&path.key) + .restore_request(request) + .send() + .await + .map_err(|e| { + Error::General(format!("restore_object: {}", Self::format_sdk_error(&e))) + })?; + Ok(()) + } + + async fn get_bucket_replication( + &self, + bucket: &str, + ) -> Result> { + let response = match self + .inner + .get_bucket_replication() + .bucket(bucket) + .send() + .await + { + Ok(resp) => resp, + Err(error) => { + let error_text = Self::format_sdk_error(&error); + if error_text.contains("ReplicationConfigurationNotFound") + || error_text.contains("replication configuration is not found") + || error_text.contains("replication not found") + { + return Ok(None); + } + return Err(Error::General(format!( + "get_bucket_replication: {error_text}" + ))); + } + }; + + let Some(config) = response.replication_configuration() else { + return Ok(None); + }; + + let role = config.role().to_string(); + let mut rules = Vec::new(); + + for sdk_rule in config.rules() { + let id = sdk_rule.id().unwrap_or("").to_string(); + let priority = sdk_rule.priority().unwrap_or(0); + let status = match sdk_rule.status().as_str() { + "Enabled" => rc_core::ReplicationRuleStatus::Enabled, + _ => rc_core::ReplicationRuleStatus::Disabled, + }; + + let prefix = sdk_rule + .filter() + .and_then(|f| f.prefix().map(|p| p.to_string())); + + let destination = rc_core::ReplicationDestination { + bucket_arn: sdk_rule + .destination() + .map(|d| d.bucket().to_string()) + .unwrap_or_default(), + storage_class: sdk_rule + .destination() + .and_then(|d| d.storage_class()) + .map(|sc| sc.as_str().to_string()), + }; + + let delete_marker_replication = sdk_rule + .delete_marker_replication() + .and_then(|d| d.status()) + .map(|s| s.as_str() == "Enabled"); + + let existing_object_replication = sdk_rule + .existing_object_replication() + .map(|e| e.status().as_str() == "Enabled"); + + rules.push(rc_core::ReplicationRule { + id, + priority, + status, + prefix, + tags: None, + destination, + delete_marker_replication, + existing_object_replication, + delete_replication: None, + }); + } + + Ok(Some(ReplicationConfiguration { role, rules })) + } + + async fn set_bucket_replication( + &self, + bucket: &str, + config: ReplicationConfiguration, + ) -> Result<()> { + use aws_sdk_s3::types::{ + DeleteMarkerReplication, DeleteMarkerReplicationStatus, Destination, + ExistingObjectReplication, ExistingObjectReplicationStatus, + ReplicationConfiguration as SdkConfig, ReplicationRule as SdkRule, + ReplicationRuleFilter, ReplicationRuleStatus as SdkStatus, StorageClass, + }; + + let mut sdk_rules = Vec::new(); + for rule in config.rules { + let status = match rule.status { + rc_core::ReplicationRuleStatus::Enabled => SdkStatus::Enabled, + rc_core::ReplicationRuleStatus::Disabled => SdkStatus::Disabled, + }; + + let mut dest_builder = Destination::builder().bucket(&rule.destination.bucket_arn); + if let Some(sc) = &rule.destination.storage_class { + dest_builder = dest_builder.storage_class(StorageClass::from(sc.as_str())); + } + let destination = dest_builder + .build() + .map_err(|e| Error::General(format!("build destination: {e}")))?; + + let filter = rule + .prefix + .as_ref() + .map(|p| ReplicationRuleFilter::builder().prefix(p).build()); + + let dmr = rule.delete_marker_replication.map(|enabled| { + DeleteMarkerReplication::builder() + .status(if enabled { + DeleteMarkerReplicationStatus::Enabled + } else { + DeleteMarkerReplicationStatus::Disabled + }) + .build() + }); + + let eor = rule.existing_object_replication.map(|enabled| { + ExistingObjectReplication::builder() + .status(if enabled { + ExistingObjectReplicationStatus::Enabled + } else { + ExistingObjectReplicationStatus::Disabled + }) + .build() + .expect("build existing object replication") + }); + + let mut builder = SdkRule::builder() + .id(&rule.id) + .priority(rule.priority) + .status(status) + .destination(destination); + if let Some(filter) = filter { + builder = builder.filter(filter); + } + if let Some(dmr) = dmr { + builder = builder.delete_marker_replication(dmr); + } + if let Some(eor) = eor { + builder = builder.existing_object_replication(eor); + } + + let sdk_rule = builder + .build() + .map_err(|e| Error::General(format!("build replication rule: {e}")))?; + sdk_rules.push(sdk_rule); + } + + let sdk_config = SdkConfig::builder() + .role(&config.role) + .set_rules(Some(sdk_rules)) + .build() + .map_err(|e| Error::General(format!("build replication config: {e}")))?; + + self.inner + .put_bucket_replication() + .bucket(bucket) + .replication_configuration(sdk_config) + .send() + .await + .map_err(|e| { + Error::General(format!( + "set_bucket_replication: {}", + Self::format_sdk_error(&e) + )) + })?; + + Ok(()) + } + + async fn delete_bucket_replication(&self, bucket: &str) -> Result<()> { + self.inner + .delete_bucket_replication() + .bucket(bucket) + .send() + .await + .map_err(|e| { + Error::General(format!( + "delete_bucket_replication: {}", + Self::format_sdk_error(&e) + )) + })?; + Ok(()) + } } #[cfg(test)] From afbb55224598f7f2a8da89fb94b5e47138a466e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=A9=AC=E7=99=BB=E5=B1=B1?= Date: Fri, 20 Mar 2026 10:27:11 +0800 Subject: [PATCH 2/5] feat(phase-2): fix replication target endpoint handling --- README.md | 16 ++++- crates/cli/src/commands/replicate.rs | 90 ++++++++++++++++++++++++++-- 2 files changed, 99 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 735f1e4..e58561b 100644 --- a/README.md +++ b/README.md @@ -180,8 +180,20 @@ rc ilm restore local/my-bucket/archived-file.dat --days 7 ### Bucket Replication ```bash -# Add replication rule (requires remote target setup) -rc replicate add local/my-bucket --remote-bucket remote/target-bucket --priority 1 +# Replication requires versioning on both source and destination buckets +rc version enable local/my-bucket +rc version enable remote/target-bucket + +# Configure a remote alias with the destination RustFS endpoint URL. +# rc normalizes the remote target endpoint to the host:port form expected by +# the RustFS admin API when creating replication targets. +rc alias set remote http://remote:9000 ACCESS_KEY SECRET_KEY + +# Add a replication rule +rc replicate add local/my-bucket \ + --remote-bucket remote/target-bucket \ + --priority 1 \ + --replicate delete,delete-marker,existing-objects # List replication rules rc replicate list local/my-bucket diff --git a/crates/cli/src/commands/replicate.rs b/crates/cli/src/commands/replicate.rs index 2f8760d..8d9eb8f 100644 --- a/crates/cli/src/commands/replicate.rs +++ b/crates/cli/src/commands/replicate.rs @@ -17,6 +17,10 @@ use serde::Serialize; use crate::exit_code::ExitCode; use crate::output::{Formatter, OutputConfig}; +const DEFAULT_REMOTE_TARGET_PATH: &str = "auto"; +const DEFAULT_REMOTE_TARGET_API: &str = "s3v4"; +const DEFAULT_REPLICATION_STORAGE_CLASS: &str = "STANDARD"; + /// Manage bucket replication #[derive(Args, Debug)] pub struct ReplicateArgs { @@ -261,23 +265,31 @@ async fn execute_add(args: AddArgs, output_config: OutputConfig) -> ExitCode { Err(code) => return code, }; - let secure = target_alias_info.endpoint.starts_with("https"); + let (target_endpoint, secure) = + remote_target_endpoint(&target_alias_info.endpoint, target_alias_info.insecure); + + let storage_class = args + .storage_class + .clone() + .unwrap_or_else(|| DEFAULT_REPLICATION_STORAGE_CLASS.to_string()); // Build BucketTarget let target = BucketTarget { source_bucket: source_bucket.clone(), - endpoint: target_alias_info.endpoint.clone(), + endpoint: target_endpoint, credentials: Some(BucketTargetCredentials { access_key: target_alias_info.access_key.clone(), secret_key: target_alias_info.secret_key.clone(), }), target_bucket: target_bucket.clone(), secure, + path: DEFAULT_REMOTE_TARGET_PATH.to_string(), + api: DEFAULT_REMOTE_TARGET_API.to_string(), target_type: "replication".to_string(), region: target_alias_info.region.clone(), bandwidth_limit: args.bandwidth, replication_sync: args.sync, - storage_class: args.storage_class.clone().unwrap_or_default(), + storage_class: storage_class.clone(), health_check_duration: args.healthcheck_seconds, disable_proxy: args.disable_proxy, ..Default::default() @@ -303,6 +315,8 @@ async fn execute_add(args: AddArgs, output_config: OutputConfig) -> ExitCode { .id .unwrap_or_else(|| format!("rule-{}", &arn[arn.len().saturating_sub(8)..])); + let destination_storage_class = Some(storage_class); + let new_rule = ReplicationRule { id: rule_id.clone(), priority: args.priority, @@ -311,7 +325,7 @@ async fn execute_add(args: AddArgs, output_config: OutputConfig) -> ExitCode { tags: None, destination: ReplicationDestination { bucket_arn: arn, - storage_class: args.storage_class, + storage_class: destination_storage_class, }, delete_marker_replication: Some(delete_marker_replication), existing_object_replication: Some(existing_object_replication), @@ -322,7 +336,7 @@ async fn execute_add(args: AddArgs, output_config: OutputConfig) -> ExitCode { let mut config = match s3_client.get_bucket_replication(&source_bucket).await { Ok(Some(config)) => config, Ok(None) => ReplicationConfiguration { - role: String::new(), + role: default_replication_role(&new_rule.destination.bucket_arn), rules: Vec::new(), }, Err(error) => { @@ -331,6 +345,10 @@ async fn execute_add(args: AddArgs, output_config: OutputConfig) -> ExitCode { } }; + if config.role.is_empty() { + config.role = default_replication_role(&new_rule.destination.bucket_arn); + } + config.rules.push(new_rule); match s3_client @@ -869,6 +887,28 @@ fn parse_replicate_flags(flags: Option<&str>) -> (bool, bool, bool) { (delete, delete_marker, existing_objects) } +fn default_replication_role(bucket_arn: &str) -> String { + bucket_arn.to_string() +} + +fn remote_target_endpoint(endpoint: &str, insecure: bool) -> (String, bool) { + let trimmed = endpoint.trim().trim_end_matches('/'); + + if let Some(rest) = trimmed.strip_prefix("https://") { + return (strip_endpoint_path(rest), true); + } + + if let Some(rest) = trimmed.strip_prefix("http://") { + return (strip_endpoint_path(rest), false); + } + + (strip_endpoint_path(trimmed), !insecure) +} + +fn strip_endpoint_path(endpoint: &str) -> String { + endpoint.split('/').next().unwrap_or(endpoint).to_string() +} + #[cfg(test)] mod tests { use super::*; @@ -922,6 +962,46 @@ mod tests { assert!(d); } + #[test] + fn test_default_replication_role_uses_destination_arn() { + let arn = "arn:rustfs:replication:us-east-1:123:test"; + assert_eq!(default_replication_role(arn), arn); + } + + #[test] + fn test_remote_target_endpoint_strips_scheme_and_path() { + let (endpoint, secure) = remote_target_endpoint("https://localhost:9005/path/", false); + assert_eq!(endpoint, "localhost:9005"); + assert!(secure); + } + + #[test] + fn test_remote_target_endpoint_supports_plain_host_port() { + let (endpoint, secure) = remote_target_endpoint("localhost:9005", true); + assert_eq!(endpoint, "localhost:9005"); + assert!(!secure); + } + + #[test] + fn test_add_defaults_destination_storage_class_to_standard() { + let rule = ReplicationRule { + id: "rule-1".to_string(), + priority: 1, + status: ReplicationRuleStatus::Enabled, + prefix: None, + tags: None, + destination: ReplicationDestination { + bucket_arn: "arn:rustfs:replication:us-east-1:123:test".to_string(), + storage_class: Some("STANDARD".to_string()), + }, + delete_marker_replication: Some(false), + existing_object_replication: Some(false), + delete_replication: Some(false), + }; + + assert_eq!(rule.destination.storage_class.as_deref(), Some("STANDARD")); + } + #[tokio::test] async fn test_execute_add_invalid_path_returns_usage_error() { let args = ReplicateArgs { From 6a2213d2c7ffafc1696acd58bdc67e95bdc37ced Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=A9=AC=E7=99=BB=E5=B1=B1?= Date: Fri, 20 Mar 2026 10:50:19 +0800 Subject: [PATCH 3/5] feat(phase-2): align tier info and replication parsing --- Cargo.lock | 11 + Cargo.toml | 1 + crates/cli/src/commands/ilm/tier.rs | 122 ++++- crates/cli/src/commands/replicate.rs | 45 ++ crates/s3/Cargo.toml | 2 +- crates/s3/src/client.rs | 640 +++++++++++++++++++-------- 6 files changed, 627 insertions(+), 194 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d6ccc89..849fc48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2014,6 +2014,16 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "quick-xml" +version = "0.38.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66c2058c55a409d601666cffe35f04333cf1013010882cec174a7467cd4e21c" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "quinn" version = "0.11.9" @@ -2161,6 +2171,7 @@ dependencies = [ "http 1.4.0", "jiff", "mockall", + "quick-xml", "rc-core", "reqwest", "serde", diff --git a/Cargo.toml b/Cargo.toml index 24a4fcb..a92c60a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ clap_complete = "4.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" toml = "0.9" +quick-xml = { version = "0.38", features = ["serialize"] } # Error handling thiserror = "2.0" diff --git a/crates/cli/src/commands/ilm/tier.rs b/crates/cli/src/commands/ilm/tier.rs index 3ac4ef4..465964b 100644 --- a/crates/cli/src/commands/ilm/tier.rs +++ b/crates/cli/src/commands/ilm/tier.rs @@ -126,6 +126,15 @@ struct TierOperationOutput { action: String, } +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct TierInfoOutput { + tier_name: String, + config: TierConfig, + #[serde(skip_serializing_if = "Option::is_none")] + stats: Option, +} + /// Execute a tier subcommand pub async fn execute(cmd: TierCommands, output_config: OutputConfig) -> ExitCode { match cmd { @@ -277,35 +286,61 @@ async fn execute_info(args: TierNameArg, output_config: OutputConfig) -> ExitCod Err(code) => return code, }; - let stats = match client.tier_stats().await { - Ok(stats) => stats, + let tiers = match client.list_tiers().await { + Ok(tiers) => tiers, Err(error) => { - formatter.error(&format!("Failed to get tier stats: {error}")); + formatter.error(&format!("Failed to list tiers: {error}")); return ExitCode::GeneralError; } }; - // The tier_stats endpoint returns a JSON object keyed by tier name - let tier_name_upper = args.tier_name.to_uppercase(); - if let Some(tier_info) = stats.get(&tier_name_upper) { - if formatter.is_json() { - formatter.json(&serde_json::json!({ - "tierName": tier_name_upper, - "stats": tier_info, - })); - } else { - formatter.println(&format!("Tier: {tier_name_upper}")); - if let Some(obj) = tier_info.as_object() { - for (key, value) in obj { - formatter.println(&format!(" {key}: {value}")); + let Some(config) = find_tier_config(&tiers, &args.tier_name).cloned() else { + formatter.error(&format!("Tier '{}' not found", args.tier_name)); + return ExitCode::NotFound; + }; + + let tier_name = config.tier_name().to_string(); + let stats = match client.tier_stats().await { + Ok(stats) => stats.get(&tier_name).cloned(), + Err(_) => None, + }; + + if formatter.is_json() { + formatter.json(&TierInfoOutput { + tier_name, + config, + stats, + }); + } else { + formatter.println(&format!("Tier: {}", config.tier_name())); + formatter.println(&format!("Type: {}", config.tier_type)); + formatter.println(&format!("Endpoint: {}", config.endpoint())); + formatter.println(&format!("Bucket: {}", config.bucket())); + formatter.println(&format!("Prefix: {}", display_or_dash(config.prefix()))); + formatter.println(&format!("Region: {}", display_or_dash(config.region()))); + if let Some(stats) = stats { + formatter.println("Stats:"); + match serde_json::to_string_pretty(&stats) { + Ok(pretty) => formatter.println(&pretty), + Err(error) => { + formatter.error(&format!("Failed to format tier stats: {error}")); + return ExitCode::GeneralError; } } } - ExitCode::Success - } else { - formatter.error(&format!("Tier '{}' not found in stats", args.tier_name)); - ExitCode::NotFound } + + ExitCode::Success +} + +fn find_tier_config<'a>(tiers: &'a [TierConfig], tier_name: &str) -> Option<&'a TierConfig> { + tiers + .iter() + .find(|tier| tier.tier_name().eq_ignore_ascii_case(tier_name)) +} + +fn display_or_dash(value: &str) -> &str { + if value.is_empty() { "-" } else { value } } async fn execute_remove(args: RemoveTierArgs, output_config: OutputConfig) -> ExitCode { @@ -542,6 +577,53 @@ mod tests { assert!(config.s3.is_none()); } + #[test] + fn test_find_tier_config_matches_case_insensitively() { + let warm = build_tier_config(&TierConfigParams { + tier_type: TierType::RustFS, + name: "WARM", + endpoint: "http://remote:9000", + access_key: "admin", + secret_key: "password", + bucket: "archive-bucket", + prefix: "", + region: "", + storage_class: "", + }); + let cold = build_tier_config(&TierConfigParams { + tier_type: TierType::MinIO, + name: "COLD", + endpoint: "http://minio:9000", + access_key: "key", + secret_key: "secret", + bucket: "cold-bucket", + prefix: "data/", + region: "us-west-2", + storage_class: "", + }); + + let tiers = vec![warm, cold]; + let matched = find_tier_config(&tiers, "warm").expect("tier should exist"); + assert_eq!(matched.tier_name(), "WARM"); + } + + #[test] + fn test_find_tier_config_returns_none_when_missing() { + let tiers = vec![build_tier_config(&TierConfigParams { + tier_type: TierType::RustFS, + name: "WARM", + endpoint: "http://remote:9000", + access_key: "admin", + secret_key: "password", + bucket: "archive-bucket", + prefix: "", + region: "", + storage_class: "", + })]; + + assert!(find_tier_config(&tiers, "COLD").is_none()); + } + #[tokio::test] async fn test_execute_add_invalid_tier_type_returns_usage_error() { let args = AddTierArgs { diff --git a/crates/cli/src/commands/replicate.rs b/crates/cli/src/commands/replicate.rs index 8d9eb8f..1439668 100644 --- a/crates/cli/src/commands/replicate.rs +++ b/crates/cli/src/commands/replicate.rs @@ -498,6 +498,7 @@ async fn execute_list(args: BucketArg, output_config: OutputConfig) -> ExitCode Cell::new("Priority"), Cell::new("Status"), Cell::new("Prefix"), + Cell::new("Flags"), Cell::new("Destination"), Cell::new("Storage Class"), ]); @@ -508,6 +509,7 @@ async fn execute_list(args: BucketArg, output_config: OutputConfig) -> ExitCode Cell::new(rule.priority), Cell::new(rule.status), Cell::new(rule.prefix.as_deref().unwrap_or("-")), + Cell::new(format_replication_flags(rule)), Cell::new(&rule.destination.bucket_arn), Cell::new(rule.destination.storage_class.as_deref().unwrap_or("-")), ]); @@ -891,6 +893,26 @@ fn default_replication_role(bucket_arn: &str) -> String { bucket_arn.to_string() } +fn format_replication_flags(rule: &ReplicationRule) -> String { + let mut flags = Vec::new(); + + if rule.delete_replication == Some(true) { + flags.push("delete"); + } + if rule.delete_marker_replication == Some(true) { + flags.push("delete-marker"); + } + if rule.existing_object_replication == Some(true) { + flags.push("existing-objects"); + } + + if flags.is_empty() { + "-".to_string() + } else { + flags.join(",") + } +} + fn remote_target_endpoint(endpoint: &str, insecure: bool) -> (String, bool) { let trimmed = endpoint.trim().trim_end_matches('/'); @@ -968,6 +990,29 @@ mod tests { assert_eq!(default_replication_role(arn), arn); } + #[test] + fn test_format_replication_flags_includes_delete_replication() { + let rule = ReplicationRule { + id: "rule-1".to_string(), + priority: 1, + status: ReplicationRuleStatus::Enabled, + prefix: None, + tags: None, + destination: ReplicationDestination { + bucket_arn: "arn:rustfs:replication:us-east-1:123:test".to_string(), + storage_class: Some("STANDARD".to_string()), + }, + delete_marker_replication: Some(true), + existing_object_replication: Some(true), + delete_replication: Some(true), + }; + + assert_eq!( + format_replication_flags(&rule), + "delete,delete-marker,existing-objects" + ); + } + #[test] fn test_remote_target_endpoint_strips_scheme_and_path() { let (endpoint, secure) = remote_target_endpoint("https://localhost:9005/path/", false); diff --git a/crates/s3/Cargo.toml b/crates/s3/Cargo.toml index 0b56f40..a5deba3 100644 --- a/crates/s3/Cargo.toml +++ b/crates/s3/Cargo.toml @@ -50,8 +50,8 @@ urlencoding.workspace = true # Serialization serde.workspace = true serde_json.workspace = true +quick-xml.workspace = true [dev-dependencies] tempfile.workspace = true mockall.workspace = true - diff --git a/crates/s3/src/client.rs b/crates/s3/src/client.rs index 23c1d69..dcdc2b7 100644 --- a/crates/s3/src/client.rs +++ b/crates/s3/src/client.rs @@ -3,6 +3,11 @@ //! Wraps aws-sdk-s3 and implements the ObjectStore trait from rc-core. use async_trait::async_trait; +use aws_credential_types::Credentials; +use aws_sigv4::http_request::{ + SignableBody, SignableRequest, SignatureLocation, SigningSettings, sign, +}; +use aws_sigv4::sign::v4; use aws_smithy_runtime_api::client::http::{ HttpClient, HttpConnector, HttpConnectorFuture, HttpConnectorSettings, SharedHttpConnector, }; @@ -13,16 +18,23 @@ use aws_smithy_runtime_api::http::{Response, StatusCode}; use aws_smithy_types::body::SdkBody; use bytes::Bytes; use jiff::Timestamp; +use quick_xml::de::from_str as from_xml_str; use rc_core::{ Alias, BucketNotification, Capabilities, Error, LifecycleRule, ListOptions, ListResult, NotificationTarget, ObjectInfo, ObjectStore, ObjectVersion, RemotePath, ReplicationConfiguration, Result, }; +use reqwest::Method; +use reqwest::header::{CONTENT_TYPE, HeaderMap, HeaderName, HeaderValue}; +use serde::Deserialize; +use sha2::{Digest, Sha256}; use tokio::io::AsyncReadExt; /// Keep single-part uploads small to avoid backend incompatibilities with /// streaming aws-chunked payloads. const SINGLE_PUT_OBJECT_MAX_SIZE: u64 = crate::multipart::DEFAULT_PART_SIZE; +const S3_SERVICE_NAME: &str = "s3"; +const S3_REPLICATION_XML_NAMESPACE: &str = "http://s3.amazonaws.com/doc/2006-03-01/"; #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum BucketPolicyErrorKind { @@ -40,28 +52,222 @@ struct ReqwestConnector { impl ReqwestConnector { async fn new(insecure: bool, ca_bundle: Option<&str>) -> Result { - // NOTE: When `insecure = true`, `danger_accept_invalid_certs` disables all TLS - // certificate verification. Any CA bundle provided will still be added to the - // trust store but is rendered ineffective for this connection. - let mut builder = reqwest::Client::builder().danger_accept_invalid_certs(insecure); - - if let Some(bundle_path) = ca_bundle { - // Use tokio::fs::read to avoid blocking the async runtime thread. - let pem = tokio::fs::read(bundle_path).await.map_err(|e| { - Error::Network(format!("Failed to read CA bundle '{bundle_path}': {e}")) - })?; - let cert = reqwest::Certificate::from_pem(&pem) - .map_err(|e| Error::Network(format!("Invalid CA bundle '{bundle_path}': {e}")))?; - builder = builder.add_root_certificate(cert); + let client = build_reqwest_client(insecure, ca_bundle).await?; + Ok(Self { client }) + } +} + +async fn build_reqwest_client(insecure: bool, ca_bundle: Option<&str>) -> Result { + // NOTE: When `insecure = true`, `danger_accept_invalid_certs` disables all TLS + // certificate verification. Any CA bundle provided will still be added to the + // trust store but is rendered ineffective for this connection. + let mut builder = reqwest::Client::builder().danger_accept_invalid_certs(insecure); + + if let Some(bundle_path) = ca_bundle { + // Use tokio::fs::read to avoid blocking the async runtime thread. + let pem = tokio::fs::read(bundle_path).await.map_err(|e| { + Error::Network(format!("Failed to read CA bundle '{bundle_path}': {e}")) + })?; + let cert = reqwest::Certificate::from_pem(&pem) + .map_err(|e| Error::Network(format!("Invalid CA bundle '{bundle_path}': {e}")))?; + builder = builder.add_root_certificate(cert); + } + + let client = builder + .build() + .map_err(|e| Error::Network(format!("Failed to build HTTP client: {e}")))?; + Ok(client) +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct ReplicationConfigurationXml { + role: Option, + #[serde(rename = "Rule", default)] + rules: Vec, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct ReplicationRuleXml { + #[serde(rename = "ID")] + id: Option, + priority: Option, + status: Option, + #[serde(rename = "Prefix")] + legacy_prefix: Option, + filter: Option, + destination: Option, + delete_marker_replication: Option, + existing_object_replication: Option, + delete_replication: Option, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct ReplicationFilterXml { + prefix: Option, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct ReplicationDestinationXml { + bucket: Option, + storage_class: Option, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct ReplicationStatusXml { + status: Option, +} + +fn parse_replication_status(status: Option<&ReplicationStatusXml>) -> Option { + status + .and_then(|value| value.status.as_deref()) + .map(|value| value.eq_ignore_ascii_case("enabled")) +} + +fn parse_replication_rule_status(status: Option<&str>) -> rc_core::ReplicationRuleStatus { + match status { + Some(value) if value.eq_ignore_ascii_case("enabled") => { + rc_core::ReplicationRuleStatus::Enabled } + _ => rc_core::ReplicationRuleStatus::Disabled, + } +} - let client = builder - .build() - .map_err(|e| Error::Network(format!("Failed to build HTTP client: {e}")))?; - Ok(Self { client }) +fn parse_replication_configuration_xml(body: &str) -> Result { + let config: ReplicationConfigurationXml = from_xml_str(body) + .map_err(|e| Error::General(format!("parse replication config xml: {e}")))?; + + let rules = config + .rules + .into_iter() + .map(|rule| rc_core::ReplicationRule { + id: rule.id.unwrap_or_default(), + priority: rule.priority.unwrap_or_default(), + status: parse_replication_rule_status(rule.status.as_deref()), + prefix: rule + .filter + .and_then(|filter| filter.prefix) + .or(rule.legacy_prefix), + tags: None, + destination: rc_core::ReplicationDestination { + bucket_arn: rule + .destination + .as_ref() + .and_then(|destination| destination.bucket.clone()) + .unwrap_or_default(), + storage_class: rule + .destination + .and_then(|destination| destination.storage_class), + }, + delete_marker_replication: parse_replication_status( + rule.delete_marker_replication.as_ref(), + ), + existing_object_replication: parse_replication_status( + rule.existing_object_replication.as_ref(), + ), + delete_replication: parse_replication_status(rule.delete_replication.as_ref()), + }) + .collect(); + + Ok(ReplicationConfiguration { + role: config.role.unwrap_or_default(), + rules, + }) +} + +fn xml_escape(value: &str) -> String { + value + .replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('"', """) + .replace('\'', "'") +} + +fn append_replication_status_tag(xml: &mut String, tag: &str, enabled: Option) { + if let Some(enabled) = enabled { + let status = if enabled { "Enabled" } else { "Disabled" }; + xml.push('<'); + xml.push_str(tag); + xml.push_str(">"); + xml.push_str(status); + xml.push_str("'); } } +fn build_replication_configuration_xml(config: &ReplicationConfiguration) -> String { + let mut xml = String::from(r#""#); + xml.push_str(r#""#); + + if !config.role.is_empty() { + xml.push_str(""); + xml.push_str(&xml_escape(&config.role)); + xml.push_str(""); + } + + for rule in &config.rules { + xml.push_str(""); + + xml.push_str(""); + xml.push_str(match rule.status { + rc_core::ReplicationRuleStatus::Enabled => "Enabled", + rc_core::ReplicationRuleStatus::Disabled => "Disabled", + }); + xml.push_str(""); + + xml.push_str(""); + xml.push_str(&xml_escape(&rule.destination.bucket_arn)); + xml.push_str(""); + if let Some(storage_class) = &rule.destination.storage_class { + xml.push_str(""); + xml.push_str(&xml_escape(storage_class)); + xml.push_str(""); + } + xml.push_str(""); + + if !rule.id.is_empty() { + xml.push_str(""); + xml.push_str(&xml_escape(&rule.id)); + xml.push_str(""); + } + + xml.push_str(""); + xml.push_str(&rule.priority.to_string()); + xml.push_str(""); + + if let Some(prefix) = &rule.prefix { + xml.push_str(""); + xml.push_str(&xml_escape(prefix)); + xml.push_str(""); + } + + append_replication_status_tag( + &mut xml, + "ExistingObjectReplication", + rule.existing_object_replication, + ); + append_replication_status_tag( + &mut xml, + "DeleteMarkerReplication", + rule.delete_marker_replication, + ); + append_replication_status_tag(&mut xml, "DeleteReplication", rule.delete_replication); + + xml.push_str(""); + } + + xml.push_str(""); + xml +} + impl HttpConnector for ReqwestConnector { fn call(&self, request: HttpRequest) -> HttpConnectorFuture { let client = self.client.clone(); @@ -166,6 +372,7 @@ impl HttpClient for ReqwestConnector { /// S3 client wrapper pub struct S3Client { inner: aws_sdk_s3::Client, + xml_http_client: reqwest::Client, #[allow(dead_code)] alias: Alias, } @@ -201,6 +408,8 @@ impl S3Client { config_loader = config_loader.http_client(connector); } + let xml_http_client = + build_reqwest_client(alias.insecure, alias.ca_bundle.as_deref()).await?; let config = config_loader.load().await; // Build S3 client with path-style addressing for compatibility @@ -220,6 +429,7 @@ impl S3Client { Ok(Self { inner: client, + xml_http_client, alias, }) } @@ -262,6 +472,162 @@ impl S3Client { file_size > SINGLE_PUT_OBJECT_MAX_SIZE } + fn sha256_hash(body: &[u8]) -> String { + let mut hasher = Sha256::new(); + hasher.update(body); + hex::encode(hasher.finalize()) + } + + fn request_host(&self, url: &reqwest::Url) -> Result { + let host = url + .host_str() + .ok_or_else(|| Error::Network("Missing host in request URL".to_string()))?; + Ok(match url.port() { + Some(port) => format!("{host}:{port}"), + None => host.to_string(), + }) + } + + fn replication_url(&self, bucket: &str) -> Result { + let mut url = + reqwest::Url::parse(self.alias.endpoint.trim_end_matches('/')).map_err(|e| { + Error::Network(format!("Invalid endpoint '{}': {e}", self.alias.endpoint)) + })?; + + { + let mut segments = url.path_segments_mut().map_err(|_| { + Error::Network(format!( + "Endpoint '{}' does not support path-style bucket operations", + self.alias.endpoint + )) + })?; + segments.pop_if_empty(); + segments.push(bucket); + } + + url.set_query(Some("replication=")); + Ok(url) + } + + async fn sign_xml_request( + &self, + method: &Method, + url: &str, + headers: &HeaderMap, + body: &[u8], + ) -> Result { + let credentials = Credentials::new( + &self.alias.access_key, + &self.alias.secret_key, + None, + None, + "s3-xml-client", + ); + + let identity = credentials.into(); + let mut signing_settings = SigningSettings::default(); + signing_settings.signature_location = SignatureLocation::Headers; + + let signing_params = v4::SigningParams::builder() + .identity(&identity) + .region(&self.alias.region) + .name(S3_SERVICE_NAME) + .time(std::time::SystemTime::now()) + .settings(signing_settings) + .build() + .map_err(|e| Error::Auth(format!("Failed to build signing params: {e}")))?; + + let header_pairs: Vec<(&str, &str)> = headers + .iter() + .filter_map(|(k, v)| v.to_str().ok().map(|v| (k.as_str(), v))) + .collect(); + + let signable_request = SignableRequest::new( + method.as_str(), + url, + header_pairs.into_iter(), + SignableBody::Bytes(body), + ) + .map_err(|e| Error::Auth(format!("Failed to create signable request: {e}")))?; + + let (signing_instructions, _) = sign(signable_request, &signing_params.into()) + .map_err(|e| Error::Auth(format!("Failed to sign request: {e}")))? + .into_parts(); + + let mut signed_headers = headers.clone(); + for (name, value) in signing_instructions.headers() { + let header_name = HeaderName::try_from(name.to_string()) + .map_err(|e| Error::Auth(format!("Invalid header name: {e}")))?; + let header_value = HeaderValue::try_from(value.to_string()) + .map_err(|e| Error::Auth(format!("Invalid header value: {e}")))?; + signed_headers.insert(header_name, header_value); + } + + Ok(signed_headers) + } + + async fn xml_request( + &self, + method: Method, + url: reqwest::Url, + content_type: Option<&str>, + body: Option>, + ) -> Result { + let body = body.unwrap_or_default(); + let mut headers = HeaderMap::new(); + headers.insert( + "x-amz-content-sha256", + HeaderValue::from_str(&Self::sha256_hash(&body)) + .map_err(|e| Error::Auth(format!("Invalid content hash header: {e}")))?, + ); + headers.insert( + "host", + HeaderValue::from_str(&self.request_host(&url)?) + .map_err(|e| Error::Auth(format!("Invalid host header: {e}")))?, + ); + + if let Some(content_type) = content_type { + headers.insert( + CONTENT_TYPE, + HeaderValue::from_str(content_type) + .map_err(|e| Error::Auth(format!("Invalid content type header: {e}")))?, + ); + } + + let signed_headers = self + .sign_xml_request(&method, url.as_str(), &headers, &body) + .await?; + + let mut request_builder = self.xml_http_client.request(method, url); + for (name, value) in &signed_headers { + request_builder = request_builder.header(name, value); + } + if !body.is_empty() { + request_builder = request_builder.body(body); + } + + let response = request_builder + .send() + .await + .map_err(|e| Error::Network(format!("Request failed: {e}")))?; + + let status = response.status(); + let text = response + .text() + .await + .map_err(|e| Error::Network(format!("Failed to read response: {e}")))?; + + if !status.is_success() { + return Err(Error::Network(format!( + "HTTP {}: {}", + status.as_u16(), + text + ))); + } + + Ok(text) + } + fn bucket_policy_error_kind( error_code: Option<&str>, status_code: Option, @@ -1811,81 +2177,22 @@ impl ObjectStore for S3Client { &self, bucket: &str, ) -> Result> { - let response = match self - .inner - .get_bucket_replication() - .bucket(bucket) - .send() - .await - { - Ok(resp) => resp, - Err(error) => { - let error_text = Self::format_sdk_error(&error); + let url = self.replication_url(bucket)?; + let body = match self.xml_request(Method::GET, url, None, None).await { + Ok(body) => body, + Err(Error::Network(error_text)) if error_text.contains("ReplicationConfigurationNotFound") || error_text.contains("replication configuration is not found") - || error_text.contains("replication not found") - { - return Ok(None); - } - return Err(Error::General(format!( - "get_bucket_replication: {error_text}" - ))); + || error_text.contains("replication not found") => + { + return Ok(None); + } + Err(error) => { + return Err(Error::General(format!("get_bucket_replication: {error}"))); } }; - let Some(config) = response.replication_configuration() else { - return Ok(None); - }; - - let role = config.role().to_string(); - let mut rules = Vec::new(); - - for sdk_rule in config.rules() { - let id = sdk_rule.id().unwrap_or("").to_string(); - let priority = sdk_rule.priority().unwrap_or(0); - let status = match sdk_rule.status().as_str() { - "Enabled" => rc_core::ReplicationRuleStatus::Enabled, - _ => rc_core::ReplicationRuleStatus::Disabled, - }; - - let prefix = sdk_rule - .filter() - .and_then(|f| f.prefix().map(|p| p.to_string())); - - let destination = rc_core::ReplicationDestination { - bucket_arn: sdk_rule - .destination() - .map(|d| d.bucket().to_string()) - .unwrap_or_default(), - storage_class: sdk_rule - .destination() - .and_then(|d| d.storage_class()) - .map(|sc| sc.as_str().to_string()), - }; - - let delete_marker_replication = sdk_rule - .delete_marker_replication() - .and_then(|d| d.status()) - .map(|s| s.as_str() == "Enabled"); - - let existing_object_replication = sdk_rule - .existing_object_replication() - .map(|e| e.status().as_str() == "Enabled"); - - rules.push(rc_core::ReplicationRule { - id, - priority, - status, - prefix, - tags: None, - destination, - delete_marker_replication, - existing_object_replication, - delete_replication: None, - }); - } - - Ok(Some(ReplicationConfiguration { role, rules })) + parse_replication_configuration_xml(&body).map(Some) } async fn set_bucket_replication( @@ -1893,93 +2200,11 @@ impl ObjectStore for S3Client { bucket: &str, config: ReplicationConfiguration, ) -> Result<()> { - use aws_sdk_s3::types::{ - DeleteMarkerReplication, DeleteMarkerReplicationStatus, Destination, - ExistingObjectReplication, ExistingObjectReplicationStatus, - ReplicationConfiguration as SdkConfig, ReplicationRule as SdkRule, - ReplicationRuleFilter, ReplicationRuleStatus as SdkStatus, StorageClass, - }; - - let mut sdk_rules = Vec::new(); - for rule in config.rules { - let status = match rule.status { - rc_core::ReplicationRuleStatus::Enabled => SdkStatus::Enabled, - rc_core::ReplicationRuleStatus::Disabled => SdkStatus::Disabled, - }; - - let mut dest_builder = Destination::builder().bucket(&rule.destination.bucket_arn); - if let Some(sc) = &rule.destination.storage_class { - dest_builder = dest_builder.storage_class(StorageClass::from(sc.as_str())); - } - let destination = dest_builder - .build() - .map_err(|e| Error::General(format!("build destination: {e}")))?; - - let filter = rule - .prefix - .as_ref() - .map(|p| ReplicationRuleFilter::builder().prefix(p).build()); - - let dmr = rule.delete_marker_replication.map(|enabled| { - DeleteMarkerReplication::builder() - .status(if enabled { - DeleteMarkerReplicationStatus::Enabled - } else { - DeleteMarkerReplicationStatus::Disabled - }) - .build() - }); - - let eor = rule.existing_object_replication.map(|enabled| { - ExistingObjectReplication::builder() - .status(if enabled { - ExistingObjectReplicationStatus::Enabled - } else { - ExistingObjectReplicationStatus::Disabled - }) - .build() - .expect("build existing object replication") - }); - - let mut builder = SdkRule::builder() - .id(&rule.id) - .priority(rule.priority) - .status(status) - .destination(destination); - if let Some(filter) = filter { - builder = builder.filter(filter); - } - if let Some(dmr) = dmr { - builder = builder.delete_marker_replication(dmr); - } - if let Some(eor) = eor { - builder = builder.existing_object_replication(eor); - } - - let sdk_rule = builder - .build() - .map_err(|e| Error::General(format!("build replication rule: {e}")))?; - sdk_rules.push(sdk_rule); - } - - let sdk_config = SdkConfig::builder() - .role(&config.role) - .set_rules(Some(sdk_rules)) - .build() - .map_err(|e| Error::General(format!("build replication config: {e}")))?; - - self.inner - .put_bucket_replication() - .bucket(bucket) - .replication_configuration(sdk_config) - .send() + let url = self.replication_url(bucket)?; + let body = build_replication_configuration_xml(&config).into_bytes(); + self.xml_request(Method::PUT, url, Some("application/xml"), Some(body)) .await - .map_err(|e| { - Error::General(format!( - "set_bucket_replication: {}", - Self::format_sdk_error(&e) - )) - })?; + .map_err(|e| Error::General(format!("set_bucket_replication: {e}")))?; Ok(()) } @@ -2011,6 +2236,75 @@ mod tests { assert_eq!(info.size_bytes, Some(1024)); } + #[test] + fn parse_replication_configuration_xml_reads_delete_replication() { + let body = r#" + + arn:rustfs:replication:us-east-1:123:test + + Enabled + + arn:rustfs:replication:us-east-1:123:dest + STANDARD + + rule-1 + 1 + + logs/ + + + Enabled + + + Disabled + + + Enabled + + +"#; + + let config = parse_replication_configuration_xml(body).expect("parse replication xml"); + assert_eq!(config.role, "arn:rustfs:replication:us-east-1:123:test"); + assert_eq!(config.rules.len(), 1); + assert_eq!(config.rules[0].id, "rule-1"); + assert_eq!(config.rules[0].prefix.as_deref(), Some("logs/")); + assert_eq!(config.rules[0].delete_replication, Some(true)); + assert_eq!(config.rules[0].delete_marker_replication, Some(false)); + assert_eq!(config.rules[0].existing_object_replication, Some(true)); + } + + #[test] + fn build_replication_configuration_xml_writes_delete_replication() { + let config = ReplicationConfiguration { + role: "arn:rustfs:replication:us-east-1:123:test".to_string(), + rules: vec![rc_core::ReplicationRule { + id: "rule-1".to_string(), + priority: 1, + status: rc_core::ReplicationRuleStatus::Enabled, + prefix: Some("logs/".to_string()), + tags: None, + destination: rc_core::ReplicationDestination { + bucket_arn: "arn:rustfs:replication:us-east-1:123:dest".to_string(), + storage_class: Some("STANDARD".to_string()), + }, + delete_marker_replication: Some(true), + existing_object_replication: Some(true), + delete_replication: Some(true), + }], + }; + + let xml = build_replication_configuration_xml(&config); + assert!(xml.contains("Enabled")); + assert!(xml.contains( + "Enabled" + )); + assert!(xml.contains( + "Enabled" + )); + assert!(xml.contains("logs/")); + } + #[test] fn bucket_policy_error_kind_uses_error_code() { assert_eq!( From e3a8e58f5fca89815ae2fd136ac1f339668fed5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=A9=AC=E7=99=BB=E5=B1=B1?= Date: Fri, 20 Mar 2026 13:42:15 +0800 Subject: [PATCH 4/5] feat(phase-2): fix lifecycle and replication round-trips --- crates/cli/src/commands/ilm/rule.rs | 11 +- crates/cli/src/commands/replicate.rs | 440 +++++++++++++++++++++++++-- crates/s3/src/client.rs | 301 ++++++++++++++++-- 3 files changed, 698 insertions(+), 54 deletions(-) diff --git a/crates/cli/src/commands/ilm/rule.rs b/crates/cli/src/commands/ilm/rule.rs index 537610f..872fe11 100644 --- a/crates/cli/src/commands/ilm/rule.rs +++ b/crates/cli/src/commands/ilm/rule.rs @@ -239,10 +239,13 @@ async fn execute_add(args: AddRuleArgs, output_config: OutputConfig) -> ExitCode }; // Get existing rules - let mut rules = client - .get_bucket_lifecycle(&bucket) - .await - .unwrap_or_default(); + let mut rules = match client.get_bucket_lifecycle(&bucket).await { + Ok(rules) => rules, + Err(error) => { + formatter.error(&format!("Failed to get lifecycle rules: {error}")); + return ExitCode::GeneralError; + } + }; // Generate rule ID let rule_id = generate_rule_id(); diff --git a/crates/cli/src/commands/replicate.rs b/crates/cli/src/commands/replicate.rs index 1439668..53cbf73 100644 --- a/crates/cli/src/commands/replicate.rs +++ b/crates/cli/src/commands/replicate.rs @@ -12,7 +12,8 @@ use rc_core::replication::{ }; use rc_core::{AliasManager, ObjectStore as _}; use rc_s3::{AdminClient, S3Client}; -use serde::Serialize; +use serde::{Deserialize, Serialize}; +use std::collections::{BTreeSet, HashMap}; use crate::exit_code::ExitCode; use crate::output::{Formatter, OutputConfig}; @@ -210,6 +211,15 @@ struct ReplicateOperationOutput { action: String, } +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct ReplicationExport { + #[serde(flatten)] + config: ReplicationConfiguration, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + remote_targets: Vec, +} + // ==================== execute ==================== /// Execute the replicate command @@ -408,14 +418,64 @@ async fn execute_update(args: UpdateArgs, output_config: OutputConfig) -> ExitCo } }; - let rule = match config.rules.iter_mut().find(|r| r.id == args.id) { - Some(rule) => rule, + let rule_index = match config.rules.iter().position(|rule| rule.id == args.id) { + Some(index) => index, None => { formatter.error(&format!("Rule '{}' not found", args.id)); return ExitCode::NotFound; } }; + let current_target_arn = config.rules[rule_index].destination.bucket_arn.clone(); + + if target_level_updates_requested(&args) { + let admin_client = match setup_admin_client(&source_alias, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + + let mut target = match admin_client.list_remote_targets(&source_bucket).await { + Ok(targets) => match targets + .into_iter() + .find(|target| target.arn == current_target_arn) + { + Some(target) => target, + None => { + formatter.error(&format!( + "Remote target '{}' not found for rule '{}'", + current_target_arn, args.id + )); + return ExitCode::NotFound; + } + }, + Err(error) => { + formatter.error(&format!("Failed to list remote targets: {error}")); + return ExitCode::GeneralError; + } + }; + + apply_target_updates(&mut target, &args); + + let updated_arn = match admin_client + .set_remote_target(&source_bucket, target, true) + .await + { + Ok(arn) => arn, + Err(error) => { + formatter.error(&format!("Failed to update remote target: {error}")); + return ExitCode::GeneralError; + } + }; + + if updated_arn != current_target_arn { + let mut arn_map = HashMap::new(); + arn_map.insert(current_target_arn.clone(), updated_arn); + remap_replication_arns(&mut config, &arn_map); + } + } + + let rule = &mut config.rules[rule_index]; + // Apply updates if let Some(priority) = args.priority { rule.priority = priority; @@ -601,25 +661,66 @@ async fn execute_remove(args: RemoveArgs, output_config: OutputConfig) -> ExitCo Err(code) => return code, }; + let admin_client = match setup_admin_client(&alias_name, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + if args.all { - match client.delete_bucket_replication(&bucket).await { - Ok(()) => { - if formatter.is_json() { - formatter.json(&ReplicateOperationOutput { - bucket, - rule_id: "*".to_string(), - action: "removed".to_string(), - }); - } else { - formatter.success("All replication rules removed."); - } - return ExitCode::Success; + let targets = match admin_client.list_remote_targets(&bucket).await { + Ok(targets) => targets, + Err(error) => { + formatter.error(&format!("Failed to list remote targets: {error}")); + return ExitCode::GeneralError; } + }; + + let config = match client.get_bucket_replication(&bucket).await { + Ok(config) => config, Err(error) => { - formatter.error(&format!("Failed to remove replication config: {error}")); + formatter.error(&format!("Failed to get replication config: {error}")); + return ExitCode::GeneralError; + } + }; + + if config.is_none() && targets.is_empty() { + formatter.error("No replication configuration found on this bucket"); + return ExitCode::NotFound; + } + + if config.is_some() + && let Err(error) = client.delete_bucket_replication(&bucket).await + { + formatter.error(&format!("Failed to remove replication config: {error}")); + return ExitCode::GeneralError; + } + + for target in targets { + if target.arn.is_empty() { + continue; + } + if let Err(error) = admin_client + .remove_remote_target(&bucket, &target.arn) + .await + { + formatter.error(&format!( + "Failed to remove remote target '{}': {error}", + target.arn + )); return ExitCode::GeneralError; } } + + if formatter.is_json() { + formatter.json(&ReplicateOperationOutput { + bucket, + rule_id: "*".to_string(), + action: "removed".to_string(), + }); + } else { + formatter.success("All replication rules removed."); + } + return ExitCode::Success; } // Remove specific rule by ID @@ -637,13 +738,24 @@ async fn execute_remove(args: RemoveArgs, output_config: OutputConfig) -> ExitCo } }; - let before = config.rules.len(); - config.rules.retain(|r| r.id != rule_id); + let removed_rule = match config + .rules + .iter() + .position(|rule| rule.id == rule_id) + .map(|index| config.rules.remove(index)) + { + Some(rule) => rule, + None => { + formatter.error(&format!("Rule '{}' not found", rule_id)); + return ExitCode::NotFound; + } + }; - if config.rules.len() == before { - formatter.error(&format!("Rule '{}' not found", rule_id)); - return ExitCode::NotFound; - } + let should_remove_target = !removed_rule.destination.bucket_arn.is_empty() + && !config + .rules + .iter() + .any(|rule| rule.destination.bucket_arn == removed_rule.destination.bucket_arn); if config.rules.is_empty() { match client.delete_bucket_replication(&bucket).await { @@ -663,6 +775,18 @@ async fn execute_remove(args: RemoveArgs, output_config: OutputConfig) -> ExitCo } } + if should_remove_target + && let Err(error) = admin_client + .remove_remote_target(&bucket, &removed_rule.destination.bucket_arn) + .await + { + formatter.error(&format!( + "Failed to remove remote target '{}': {error}", + removed_rule.destination.bucket_arn + )); + return ExitCode::GeneralError; + } + if formatter.is_json() { formatter.json(&ReplicateOperationOutput { bucket, @@ -693,9 +817,24 @@ async fn execute_export(args: BucketArg, output_config: OutputConfig) -> ExitCod Err(code) => return code, }; + let admin_client = match setup_admin_client(&alias_name, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + match client.get_bucket_replication(&bucket).await { Ok(Some(config)) => { - formatter.json(&config); + let remote_targets = match admin_client.list_remote_targets(&bucket).await { + Ok(targets) => relevant_remote_targets(targets, &config), + Err(error) => { + formatter.error(&format!("Failed to list remote targets: {error}")); + return ExitCode::GeneralError; + } + }; + formatter.json(&ReplicationExport { + config, + remote_targets, + }); ExitCode::Success } Ok(None) => { @@ -730,8 +869,8 @@ async fn execute_import(args: ImportArgs, output_config: OutputConfig) -> ExitCo } }; - let config: ReplicationConfiguration = match serde_json::from_str(&data) { - Ok(config) => config, + let import: ReplicationExport = match serde_json::from_str(&data) { + Ok(import) => import, Err(error) => { formatter.error(&format!("Invalid JSON in '{}': {error}", args.file)); return ExitCode::UsageError; @@ -743,6 +882,57 @@ async fn execute_import(args: ImportArgs, output_config: OutputConfig) -> ExitCo Err(code) => return code, }; + let mut config = import.config; + + if !import.remote_targets.is_empty() { + let admin_client = match setup_admin_client(&alias_name, &formatter) { + Ok(client) => client, + Err(code) => return code, + }; + + let existing_targets = match admin_client.list_remote_targets(&bucket).await { + Ok(targets) => targets, + Err(error) => { + formatter.error(&format!("Failed to list remote targets: {error}")); + return ExitCode::GeneralError; + } + }; + + let mut arn_map = HashMap::new(); + for imported_target in import.remote_targets { + let mut target = normalize_imported_target(imported_target, &bucket); + let old_arn = target.arn.clone(); + + let resolved_arn = if let Some(existing_target) = + find_matching_remote_target(&existing_targets, &target) + { + target.arn = existing_target.arn.clone(); + match admin_client.set_remote_target(&bucket, target, true).await { + Ok(arn) => arn, + Err(error) => { + formatter.error(&format!("Failed to update remote target: {error}")); + return ExitCode::GeneralError; + } + } + } else { + target.arn.clear(); + match admin_client.set_remote_target(&bucket, target, false).await { + Ok(arn) => arn, + Err(error) => { + formatter.error(&format!("Failed to create remote target: {error}")); + return ExitCode::GeneralError; + } + } + }; + + if !old_arn.is_empty() { + arn_map.insert(old_arn, resolved_arn); + } + } + + remap_replication_arns(&mut config, &arn_map); + } + match client.set_bucket_replication(&bucket, config).await { Ok(()) => { if formatter.is_json() { @@ -893,6 +1083,97 @@ fn default_replication_role(bucket_arn: &str) -> String { bucket_arn.to_string() } +fn collect_target_arns(config: &ReplicationConfiguration) -> BTreeSet { + config + .rules + .iter() + .filter_map(|rule| { + let arn = rule.destination.bucket_arn.trim(); + if arn.is_empty() { + None + } else { + Some(arn.to_string()) + } + }) + .collect() +} + +fn relevant_remote_targets( + targets: Vec, + config: &ReplicationConfiguration, +) -> Vec { + let referenced = collect_target_arns(config); + targets + .into_iter() + .filter(|target| referenced.contains(target.arn.as_str())) + .collect() +} + +fn target_level_updates_requested(args: &UpdateArgs) -> bool { + args.storage_class.is_some() + || args.bandwidth.is_some() + || args.sync.is_some() + || args.healthcheck_seconds.is_some() + || args.disable_proxy.is_some() +} + +fn apply_target_updates(target: &mut BucketTarget, args: &UpdateArgs) { + if let Some(storage_class) = &args.storage_class { + target.storage_class = storage_class.clone(); + } + if let Some(bandwidth) = args.bandwidth { + target.bandwidth_limit = bandwidth; + } + if let Some(sync) = args.sync { + target.replication_sync = sync; + } + if let Some(healthcheck_seconds) = args.healthcheck_seconds { + target.health_check_duration = healthcheck_seconds; + } + if let Some(disable_proxy) = args.disable_proxy { + target.disable_proxy = disable_proxy; + } +} + +fn remap_replication_arns( + config: &mut ReplicationConfiguration, + arn_map: &HashMap, +) { + if let Some(updated_role) = arn_map.get(&config.role) { + config.role = updated_role.clone(); + } + + for rule in &mut config.rules { + if let Some(updated_arn) = arn_map.get(&rule.destination.bucket_arn) { + rule.destination.bucket_arn = updated_arn.clone(); + } + } +} + +fn find_matching_remote_target<'a>( + targets: &'a [BucketTarget], + expected: &BucketTarget, +) -> Option<&'a BucketTarget> { + targets.iter().find(|target| { + target.endpoint == expected.endpoint + && target.target_bucket == expected.target_bucket + && target.secure == expected.secure + && target.region == expected.region + && target.target_type == expected.target_type + }) +} + +fn normalize_imported_target(mut target: BucketTarget, bucket: &str) -> BucketTarget { + target.source_bucket = bucket.to_string(); + if target.path.is_empty() { + target.path = DEFAULT_REMOTE_TARGET_PATH.to_string(); + } + if target.api.is_empty() { + target.api = DEFAULT_REMOTE_TARGET_API.to_string(); + } + target +} + fn format_replication_flags(rule: &ReplicationRule) -> String { let mut flags = Vec::new(); @@ -934,6 +1215,7 @@ fn strip_endpoint_path(endpoint: &str) -> String { #[cfg(test)] mod tests { use super::*; + use std::collections::HashMap; #[test] fn test_parse_bucket_path_success() { @@ -990,6 +1272,114 @@ mod tests { assert_eq!(default_replication_role(arn), arn); } + #[test] + fn test_collect_target_arns_deduplicates_destinations() { + let config = ReplicationConfiguration { + role: String::new(), + rules: vec![ + ReplicationRule { + id: "rule-1".to_string(), + priority: 1, + status: ReplicationRuleStatus::Enabled, + prefix: None, + tags: None, + destination: ReplicationDestination { + bucket_arn: "arn:one".to_string(), + storage_class: None, + }, + delete_marker_replication: None, + existing_object_replication: None, + delete_replication: None, + }, + ReplicationRule { + id: "rule-2".to_string(), + priority: 2, + status: ReplicationRuleStatus::Enabled, + prefix: None, + tags: None, + destination: ReplicationDestination { + bucket_arn: "arn:one".to_string(), + storage_class: None, + }, + delete_marker_replication: None, + existing_object_replication: None, + delete_replication: None, + }, + ], + }; + + let arns = collect_target_arns(&config); + assert_eq!(arns.len(), 1); + assert!(arns.contains("arn:one")); + } + + #[test] + fn test_remap_replication_arns_updates_role_and_rules() { + let mut config = ReplicationConfiguration { + role: "arn:old".to_string(), + rules: vec![ReplicationRule { + id: "rule-1".to_string(), + priority: 1, + status: ReplicationRuleStatus::Enabled, + prefix: None, + tags: None, + destination: ReplicationDestination { + bucket_arn: "arn:old".to_string(), + storage_class: None, + }, + delete_marker_replication: None, + existing_object_replication: None, + delete_replication: None, + }], + }; + + let mut arn_map = HashMap::new(); + arn_map.insert("arn:old".to_string(), "arn:new".to_string()); + remap_replication_arns(&mut config, &arn_map); + + assert_eq!(config.role, "arn:new"); + assert_eq!(config.rules[0].destination.bucket_arn, "arn:new"); + } + + #[test] + fn test_replication_export_parses_legacy_config_shape() { + let payload = r#"{ + "role": "arn:role", + "rules": [] + }"#; + + let export: ReplicationExport = serde_json::from_str(payload).expect("parse export"); + assert_eq!(export.config.role, "arn:role"); + assert!(export.remote_targets.is_empty()); + } + + #[test] + fn test_find_matching_remote_target_matches_endpoint_bucket_and_region() { + let targets = vec![BucketTarget { + source_bucket: "source".to_string(), + endpoint: "remote:9000".to_string(), + target_bucket: "dest".to_string(), + secure: true, + target_type: "replication".to_string(), + region: "us-east-1".to_string(), + arn: "arn:one".to_string(), + ..Default::default() + }]; + + let expected = BucketTarget { + source_bucket: "other".to_string(), + endpoint: "remote:9000".to_string(), + target_bucket: "dest".to_string(), + secure: true, + target_type: "replication".to_string(), + region: "us-east-1".to_string(), + ..Default::default() + }; + + let matched = find_matching_remote_target(&targets, &expected).expect("matching target"); + assert_eq!(matched.arn, "arn:one"); + } + #[test] fn test_format_replication_flags_includes_delete_replication() { let rule = ReplicationRule { diff --git a/crates/s3/src/client.rs b/crates/s3/src/client.rs index dcdc2b7..2f5c9e0 100644 --- a/crates/s3/src/client.rs +++ b/crates/s3/src/client.rs @@ -28,6 +28,7 @@ use reqwest::Method; use reqwest::header::{CONTENT_TYPE, HeaderMap, HeaderName, HeaderValue}; use serde::Deserialize; use sha2::{Digest, Sha256}; +use std::collections::HashMap; use tokio::io::AsyncReadExt; /// Keep single-part uploads small to avoid backend incompatibilities with @@ -107,6 +108,23 @@ struct ReplicationRuleXml { #[serde(rename_all = "PascalCase")] struct ReplicationFilterXml { prefix: Option, + tag: Option, + and: Option, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct ReplicationAndXml { + prefix: Option, + #[serde(rename = "Tag", default)] + tags: Vec, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct TagXml { + key: Option, + value: Option, } #[derive(Debug, Deserialize)] @@ -137,6 +155,95 @@ fn parse_replication_rule_status(status: Option<&str>) -> rc_core::ReplicationRu } } +fn collect_tag_map<'a, I>(tags: I) -> Option> +where + I: IntoIterator, +{ + let collected: HashMap = tags + .into_iter() + .map(|(key, value)| (key.to_string(), value.to_string())) + .collect(); + if collected.is_empty() { + None + } else { + Some(collected) + } +} + +fn parse_tag_xml(tag: Option<&TagXml>) -> Option> { + collect_tag_map(tag.and_then(|tag| Some((tag.key.as_deref()?, tag.value.as_deref()?)))) +} + +fn parse_tag_xmls(tags: &[TagXml]) -> Option> { + collect_tag_map( + tags.iter() + .filter_map(|tag| Some((tag.key.as_deref()?, tag.value.as_deref()?))), + ) +} + +fn parse_replication_filter_prefix(filter: Option<&ReplicationFilterXml>) -> Option { + filter + .and_then(|filter| filter.prefix.clone()) + .or_else(|| filter.and_then(|filter| filter.and.as_ref()?.prefix.clone())) +} + +fn parse_replication_filter_tags( + filter: Option<&ReplicationFilterXml>, +) -> Option> { + filter + .and_then(|filter| parse_tag_xml(filter.tag.as_ref())) + .or_else(|| filter.and_then(|filter| parse_tag_xmls(&filter.and.as_ref()?.tags))) +} + +fn sorted_tags(tags: &HashMap) -> Vec<(&str, &str)> { + let mut pairs: Vec<(&str, &str)> = tags + .iter() + .map(|(key, value)| (key.as_str(), value.as_str())) + .collect(); + pairs.sort_unstable(); + pairs +} + +fn append_tag_xml(xml: &mut String, key: &str, value: &str) { + xml.push_str(""); + xml.push_str(&xml_escape(key)); + xml.push_str(""); + xml.push_str(&xml_escape(value)); + xml.push_str(""); +} + +fn append_replication_filter_xml( + xml: &mut String, + prefix: Option<&str>, + tags: Option<&HashMap>, +) { + let Some(tags) = tags.filter(|tags| !tags.is_empty()) else { + if let Some(prefix) = prefix { + xml.push_str(""); + xml.push_str(&xml_escape(prefix)); + xml.push_str(""); + } + return; + }; + + xml.push_str(""); + if prefix.is_some() || tags.len() > 1 { + xml.push_str(""); + if let Some(prefix) = prefix { + xml.push_str(""); + xml.push_str(&xml_escape(prefix)); + xml.push_str(""); + } + for (key, value) in sorted_tags(tags) { + append_tag_xml(xml, key, value); + } + xml.push_str(""); + } else if let Some((key, value)) = sorted_tags(tags).into_iter().next() { + append_tag_xml(xml, key, value); + } + xml.push_str(""); +} + fn parse_replication_configuration_xml(body: &str) -> Result { let config: ReplicationConfigurationXml = from_xml_str(body) .map_err(|e| Error::General(format!("parse replication config xml: {e}")))?; @@ -148,11 +255,8 @@ fn parse_replication_configuration_xml(body: &str) -> Result Str xml.push_str(&rule.priority.to_string()); xml.push_str(""); - if let Some(prefix) = &rule.prefix { - xml.push_str(""); - xml.push_str(&xml_escape(prefix)); - xml.push_str(""); - } + append_replication_filter_xml(&mut xml, rule.prefix.as_deref(), rule.tags.as_ref()); append_replication_status_tag( &mut xml, @@ -268,6 +368,82 @@ fn build_replication_configuration_xml(config: &ReplicationConfiguration) -> Str xml } +fn parse_lifecycle_filter_prefix( + filter: Option<&aws_sdk_s3::types::LifecycleRuleFilter>, +) -> Option { + filter + .and_then(|filter| filter.prefix().map(str::to_string)) + .or_else(|| filter.and_then(|filter| filter.and()?.prefix().map(str::to_string))) +} + +fn parse_lifecycle_filter_tags( + filter: Option<&aws_sdk_s3::types::LifecycleRuleFilter>, +) -> Option> { + filter + .and_then(|filter| collect_tag_map(filter.tag().map(|tag| (tag.key(), tag.value())))) + .or_else(|| { + filter.and_then(|filter| { + collect_tag_map( + filter + .and()? + .tags() + .iter() + .map(|tag| (tag.key(), tag.value())), + ) + }) + }) +} + +fn build_s3_tag(key: &str, value: &str) -> Result { + aws_sdk_s3::types::Tag::builder() + .key(key) + .value(value) + .build() + .map_err(|error| Error::General(format!("build filter tag: {error}"))) +} + +fn build_lifecycle_rule_filter( + prefix: Option<&str>, + tags: Option<&HashMap>, +) -> Result> { + let Some(tags) = tags.filter(|tags| !tags.is_empty()) else { + return Ok(prefix.map(|prefix| { + aws_sdk_s3::types::LifecycleRuleFilter::builder() + .prefix(prefix) + .build() + })); + }; + + let tag_values = sorted_tags(tags) + .into_iter() + .map(|(key, value)| build_s3_tag(key, value)) + .collect::>>()?; + + let filter = if prefix.is_some() || tag_values.len() > 1 { + let mut and_builder = aws_sdk_s3::types::LifecycleRuleAndOperator::builder(); + if let Some(prefix) = prefix { + and_builder = and_builder.prefix(prefix); + } + for tag in tag_values { + and_builder = and_builder.tags(tag); + } + aws_sdk_s3::types::LifecycleRuleFilter::builder() + .and(and_builder.build()) + .build() + } else { + aws_sdk_s3::types::LifecycleRuleFilter::builder() + .tag( + tag_values + .into_iter() + .next() + .expect("non-empty tags required to build lifecycle filter"), + ) + .build() + }; + + Ok(Some(filter)) +} + impl HttpConnector for ReqwestConnector { fn call(&self, request: HttpRequest) -> HttpConnectorFuture { let client = self.client.clone(); @@ -1941,15 +2117,8 @@ impl ObjectStore for S3Client { _ => rc_core::LifecycleRuleStatus::Disabled, }; - let prefix = sdk_rule - .filter() - .and_then(|f| f.prefix().map(|p| p.to_string())) - .or_else(|| { - sdk_rule - .filter() - .and_then(|f| f.and()) - .and_then(|a| a.prefix().map(|p: &str| p.to_string())) - }); + let prefix = parse_lifecycle_filter_prefix(sdk_rule.filter()); + let tags = parse_lifecycle_filter_tags(sdk_rule.filter()); let expiration = sdk_rule .expiration() @@ -2002,7 +2171,7 @@ impl ObjectStore for S3Client { id, status, prefix, - tags: None, + tags, expiration, transition, noncurrent_version_expiration, @@ -2018,7 +2187,7 @@ impl ObjectStore for S3Client { async fn set_bucket_lifecycle(&self, bucket: &str, rules: Vec) -> Result<()> { use aws_sdk_s3::types::{ AbortIncompleteMultipartUpload, BucketLifecycleConfiguration, ExpirationStatus, - LifecycleExpiration as SdkExpiration, LifecycleRule as SdkRule, LifecycleRuleFilter, + LifecycleExpiration as SdkExpiration, LifecycleRule as SdkRule, NoncurrentVersionExpiration as SdkNve, NoncurrentVersionTransition as SdkNvt, Transition, TransitionStorageClass, }; @@ -2030,10 +2199,7 @@ impl ObjectStore for S3Client { rc_core::LifecycleRuleStatus::Disabled => ExpirationStatus::Disabled, }; - let filter = rule - .prefix - .as_ref() - .map(|p| LifecycleRuleFilter::builder().prefix(p).build()); + let filter = build_lifecycle_rule_filter(rule.prefix.as_deref(), rule.tags.as_ref())?; let expiration = rule.expiration.map(|exp| { let mut builder = SdkExpiration::builder(); @@ -2228,6 +2394,7 @@ impl ObjectStore for S3Client { #[cfg(test)] mod tests { use super::*; + use std::collections::HashMap; #[test] fn test_object_info_creation() { @@ -2274,6 +2441,41 @@ mod tests { assert_eq!(config.rules[0].existing_object_replication, Some(true)); } + #[test] + fn parse_replication_configuration_xml_preserves_tag_filters() { + let body = r#" + + + Enabled + + arn:rustfs:replication:us-east-1:123:dest + + tagged-rule + 2 + + + logs/ + + env + prod + + + team + core + + + + +"#; + + let config = parse_replication_configuration_xml(body).expect("parse replication xml"); + let rule = &config.rules[0]; + assert_eq!(rule.prefix.as_deref(), Some("logs/")); + let tags = rule.tags.as_ref().expect("tag filters"); + assert_eq!(tags.get("env").map(String::as_str), Some("prod")); + assert_eq!(tags.get("team").map(String::as_str), Some("core")); + } + #[test] fn build_replication_configuration_xml_writes_delete_replication() { let config = ReplicationConfiguration { @@ -2305,6 +2507,55 @@ mod tests { assert!(xml.contains("logs/")); } + #[test] + fn build_replication_configuration_xml_writes_and_tag_filters() { + let mut tags = HashMap::new(); + tags.insert("env".to_string(), "prod".to_string()); + tags.insert("team".to_string(), "core".to_string()); + + let config = ReplicationConfiguration { + role: String::new(), + rules: vec![rc_core::ReplicationRule { + id: "rule-1".to_string(), + priority: 1, + status: rc_core::ReplicationRuleStatus::Enabled, + prefix: Some("logs/".to_string()), + tags: Some(tags), + destination: rc_core::ReplicationDestination { + bucket_arn: "arn:rustfs:replication:us-east-1:123:dest".to_string(), + storage_class: None, + }, + delete_marker_replication: None, + existing_object_replication: None, + delete_replication: None, + }], + }; + + let xml = build_replication_configuration_xml(&config); + assert!(xml.contains("logs/")); + assert!(xml.contains("envprod")); + assert!(xml.contains("teamcore")); + } + + #[test] + fn build_lifecycle_rule_filter_preserves_prefix_and_tags() { + let mut tags = HashMap::new(); + tags.insert("env".to_string(), "prod".to_string()); + tags.insert("team".to_string(), "core".to_string()); + + let filter = build_lifecycle_rule_filter(Some("logs/"), Some(&tags)) + .expect("build lifecycle filter") + .expect("lifecycle filter"); + + assert_eq!( + parse_lifecycle_filter_prefix(Some(&filter)).as_deref(), + Some("logs/") + ); + let parsed_tags = parse_lifecycle_filter_tags(Some(&filter)).expect("parsed tags"); + assert_eq!(parsed_tags.get("env").map(String::as_str), Some("prod")); + assert_eq!(parsed_tags.get("team").map(String::as_str), Some("core")); + } + #[test] fn bucket_policy_error_kind_uses_error_code() { assert_eq!( From 7d810d550e68316aa5fbb2ef1509c4ed8b4ea86f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=A9=AC=E7=99=BB=E5=B1=B1?= Date: Fri, 20 Mar 2026 13:43:39 +0800 Subject: [PATCH 5/5] feat(phase-0): ignore claude workspace files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index a6b5781..02c1c23 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ .idea/ .vscode/ .cursor/ +.claude/ *.swp *.swo *~