From 6661fde2665667816fb626e12efa4ec98c2866c6 Mon Sep 17 00:00:00 2001 From: David Cook Date: Wed, 1 Apr 2026 17:30:01 -0500 Subject: [PATCH 1/2] Add aggregator API arguments to janus_cli --- aggregator/src/binaries/janus_cli.rs | 277 +++++++++++++++++++-------- aggregator/src/binary_utils.rs | 10 +- 2 files changed, 203 insertions(+), 84 deletions(-) diff --git a/aggregator/src/binaries/janus_cli.rs b/aggregator/src/binaries/janus_cli.rs index 4a5d65875..d4eaa3c0b 100644 --- a/aggregator/src/binaries/janus_cli.rs +++ b/aggregator/src/binaries/janus_cli.rs @@ -1,4 +1,5 @@ use std::{ + borrow::Cow, collections::BTreeMap, fmt::Debug, path::{Path, PathBuf}, @@ -9,7 +10,10 @@ use anyhow::{Context, Result, anyhow}; use aws_lc_rs::aead::AES_128_GCM; use base64::{Engine, engine::general_purpose::URL_SAFE_NO_PAD}; use chrono::TimeDelta; -use clap::Parser; +use clap::{ + Parser, + builder::{StringValueParser, TypedValueParser}, +}; use itertools::Itertools; use janus_aggregator_api::git_revision; use janus_aggregator_core::{ @@ -47,26 +51,32 @@ use tracing::{debug, info}; use url::Url; use crate::{ - binary_utils::{CommonBinaryOptions, database_pool, datastore, read_config}, + binary_utils::{database_pool, datastore, read_config}, config::{BinaryConfig, CommonConfig}, metrics::{MetricsExporterHandle, install_metrics_exporter}, - trace::{TraceGuards, install_trace_subscriber}, + trace::{TokioConsoleConfiguration, TraceConfiguration, TraceGuards, install_trace_subscriber}, }; pub fn run(command_line_options: CommandLineOptions) -> Result<()> { initialize_rustls(); // Read and parse config. - let config_file: ConfigFile = read_config(&command_line_options.common_options)?; + let config_file: Option = command_line_options + .config_file + .as_ref() + .map(read_config) + .transpose()?; let runtime = runtime::Builder::new_multi_thread().enable_all().build()?; runtime.block_on(async { - let _guards = - install_tracing_and_metrics_handlers(config_file.common_config(), &runtime).await?; + let _guards = install_tracing_and_metrics_handlers( + config_file.as_ref().map(ConfigFile::common_config), + &runtime, + ) + .await?; info!( - common_options = ?&command_line_options.common_options, config = ?config_file, version = env!("CARGO_PKG_VERSION"), git_revision = git_revision(), @@ -80,7 +90,7 @@ pub fn run(command_line_options: CommandLineOptions) -> Result<()> { command_line_options .cmd - .execute(&command_line_options, &config_file) + .execute(&command_line_options, config_file.as_ref()) .await }) } @@ -226,21 +236,34 @@ impl Command { async fn execute( &self, command_line_options: &CommandLineOptions, - config_file: &ConfigFile, + config_file: Option<&ConfigFile>, ) -> Result<()> { // Note: to keep this function reasonably-readable, individual command handlers should // generally create the command's dependencies based on options/config, then call another // function with the main command logic. let kube_client = LazyKubeClient::new(); - match self { - Command::GenerateHpkeKey { - kubernetes_secret_options, - id, - kem, - kdf, - aead, - hpke_config_out_file, - } => { + let aggregator_api = command_line_options + .aggregator_api_url + .as_ref() + .and_then(|url| { + Some(( + url, + command_line_options.aggregator_api_auth_token.as_ref()?, + )) + }); + match (self, config_file, aggregator_api) { + ( + Command::GenerateHpkeKey { + kubernetes_secret_options, + id, + kem, + kdf, + aead, + hpke_config_out_file, + }, + Some(config_file), + _, + ) => { let datastore = datastore_from_opts( kubernetes_secret_options, command_line_options, @@ -261,11 +284,19 @@ impl Command { .await } - Command::SetHpkeKeyState { - kubernetes_secret_options, - id, - state, - } => { + (Command::GenerateHpkeKey { .. }, None, _) => Err(anyhow!( + "generate-hpke-key requires a configuration file for database access" + )), + + ( + Command::SetHpkeKeyState { + kubernetes_secret_options, + id, + state, + }, + Some(config_file), + _, + ) => { let datastore = datastore_from_opts( kubernetes_secret_options, command_line_options, @@ -283,17 +314,25 @@ impl Command { .await } - Command::AddTaskprovPeerAggregator { - kubernetes_secret_options, - peer_endpoint, - peer_role, - aggregation_mode, - verify_key_init, - collector_hpke_config_file, - report_expiry_age_s, - aggregator_auth_token, - collector_auth_token, - } => { + (Command::SetHpkeKeyState { .. }, None, _) => Err(anyhow!( + "set-hpke-key-state requires a configuration file for database access" + )), + + ( + Command::AddTaskprovPeerAggregator { + kubernetes_secret_options, + peer_endpoint, + peer_role, + aggregation_mode, + verify_key_init, + collector_hpke_config_file, + report_expiry_age_s, + aggregator_auth_token, + collector_auth_token, + }, + Some(config_file), + _, + ) => { let datastore = datastore_from_opts( kubernetes_secret_options, command_line_options, @@ -317,12 +356,20 @@ impl Command { .await } - Command::ProvisionTasks { - kubernetes_secret_options, - tasks_file, - generate_missing_parameters, - echo_tasks, - } => { + (Command::AddTaskprovPeerAggregator { .. }, None, _) => Err(anyhow!( + "add-taskprov-peer-aggregator requires a configuration file for database access" + )), + + ( + Command::ProvisionTasks { + kubernetes_secret_options, + tasks_file, + generate_missing_parameters, + echo_tasks, + }, + Some(config_file), + _, + ) => { let datastore = datastore_from_opts( kubernetes_secret_options, command_line_options, @@ -348,9 +395,17 @@ impl Command { Ok(()) } - Command::CreateDatastoreKey { - kubernetes_secret_options, - } => { + (Command::ProvisionTasks { .. }, None, _) => Err(anyhow!( + "provision-tasks requires a configuration file for database access" + )), + + ( + Command::CreateDatastoreKey { + kubernetes_secret_options, + }, + _, + _, + ) => { let k8s_namespace = kubernetes_secret_options .secrets_k8s_namespace .as_deref() @@ -365,11 +420,15 @@ impl Command { .await } - Command::ListCollectionJobs { - task, - job, - kubernetes_secret_options, - } => { + ( + Command::ListCollectionJobs { + task, + job, + kubernetes_secret_options, + }, + Some(config_file), + _, + ) => { let datastore = datastore_from_opts( kubernetes_secret_options, command_line_options, @@ -394,11 +453,19 @@ impl Command { .context("couldn't list collection jobs") } - Command::ListAggregationJobs { - task, - job, - kubernetes_secret_options, - } => { + (Command::ListCollectionJobs { .. }, None, _) => Err(anyhow!( + "list-collection-jobs requires a configuration file for database access" + )), + + ( + Command::ListAggregationJobs { + task, + job, + kubernetes_secret_options, + }, + Some(config_file), + _, + ) => { let datastore = datastore_from_opts( kubernetes_secret_options, command_line_options, @@ -422,6 +489,10 @@ impl Command { .await .context("couldn't list aggregation jobs") } + + (Command::ListAggregationJobs { .. }, None, _) => Err(anyhow!( + "list-aggregation-jobs requires a configuration file for database access" + )), } } } @@ -600,16 +671,44 @@ async fn list_aggregation_jobs_generic< } async fn install_tracing_and_metrics_handlers( - config: &CommonConfig, + config: Option<&CommonConfig>, runtime: &Runtime, -) -> Result<(TraceGuards, MetricsExporterHandle)> { +) -> Result<(TraceGuards, Option)> { + // Substitute a default logging configuration if no configuration file is provided. + let (logging_config, metrics_config_opt) = match config.as_ref() { + Some(config) => ( + Cow::Borrowed(&config.logging_config), + Some(&config.metrics_config), + ), + None => ( + Cow::Owned(TraceConfiguration { + use_test_writer: false, + force_json_output: false, + stackdriver_json_output: false, + tokio_console_config: TokioConsoleConfiguration { + enabled: false, + listen_address: None, + }, + open_telemetry_config: None, + chrome: false, + }), + None, + ), + }; + // Discard the trace reload handler, since this program is short-lived. - let (trace_guard, _) = install_trace_subscriber(&config.logging_config) + let (trace_guard, _) = install_trace_subscriber(logging_config.as_ref()) .context("couldn't install tracing subscriber")?; - let metrics_guard = install_metrics_exporter(&config.metrics_config, runtime) - .await - .context("failed to install metrics exporter")?; + let metrics_guard = if let Some(metrics_config) = metrics_config_opt { + Some( + install_metrics_exporter(metrics_config, runtime) + .await + .context("failed to install metrics exporter")?, + ) + } else { + None + }; Ok((trace_guard, metrics_guard)) } @@ -854,10 +953,7 @@ async fn datastore_from_opts( ) -> Result> { let pool = database_pool( &config_file.common_config.database, - command_line_options - .common_options - .database_password - .as_deref(), + command_line_options.database_password.as_deref(), ) .await?; @@ -866,7 +962,7 @@ async fn datastore_from_opts( RealClock::default(), &meter("janus_aggregator"), &kubernetes_secret_options - .datastore_keys(&command_line_options.common_options, kube_client) + .datastore_keys(&command_line_options.datastore_keys, kube_client) .await?, config_file.common_config().database.check_schema_version, config_file.common_config().max_transaction_retries, @@ -885,8 +981,37 @@ pub struct CommandLineOptions { #[clap(subcommand)] cmd: Command, - #[clap(flatten)] - common_options: CommonBinaryOptions, + /// Path to configuration YAML file + #[clap(long, env = "CONFIG_FILE", num_args = 1, required(false))] + config_file: Option, + + /// Password for the PostgreSQL database connection + /// + /// If specified, it must not be specified in the connection string. + #[clap(long, env = "PGPASSWORD", hide_env_values = true)] + database_password: Option, + + /// Datastore encryption keys + /// + /// Keys are encoded in unpadded url-safe base64, then comma separated. + #[clap( + long, + env = "DATASTORE_KEYS", + hide_env_values = true, + num_args = 1, + use_value_delimiter = true + )] + datastore_keys: Vec, + + #[clap(long, short = 'u', requires = "aggregator_api_auth_token")] + aggregator_api_url: Option, + + #[clap( + long, + short = 'a', + value_parser = StringValueParser::new().try_map(AuthenticationToken::new_bearer_token_from_string), + requires = "aggregator_api_url", env, hide_env_values = true)] + aggregator_api_auth_token: Option, /// Do not make permanent changes /// @@ -928,7 +1053,7 @@ impl KubernetesSecretOptions { /// --datastore-keys. If neither was set, returns an error. async fn datastore_keys( &self, - options: &CommonBinaryOptions, + cli_datastore_keys: &[String], kube_client: &LazyKubeClient, ) -> Result> { if let Some(secrets_namespace) = &self.secrets_k8s_namespace { @@ -940,8 +1065,8 @@ impl KubernetesSecretOptions { ) .await .context("failed to fetch datastore key(s) from Kubernetes secret") - } else if !options.datastore_keys.is_empty() { - Ok(options.datastore_keys.clone()) + } else if !cli_datastore_keys.is_empty() { + Ok(cli_datastore_keys.to_vec()) } else { Err(anyhow!( "Either --datastore-keys or --secrets-k8s-namespace must be set" @@ -1044,7 +1169,6 @@ mod tests { CommandLineOptions, ConfigFile, KubernetesSecretOptions, LazyKubeClient, fetch_datastore_keys, }, - binary_utils::CommonBinaryOptions, config::{ CommonConfig, default_max_transaction_retries, test_util::{generate_db_config, generate_metrics_config, generate_trace_config}, @@ -1076,10 +1200,7 @@ mod tests { Vec::from(["datastore-key-1".to_string(), "datastore-key-2".to_string()]); // Keys provided at command line, not present in k8s - let common_options = CommonBinaryOptions { - datastore_keys: expected_datastore_keys.clone(), - ..Default::default() - }; + let datastore_keys = expected_datastore_keys.clone(); let kubernetes_secret_options = KubernetesSecretOptions { datastore_keys_secret_name: "secret-name".to_string(), @@ -1090,7 +1211,7 @@ mod tests { assert_eq!( kubernetes_secret_options - .datastore_keys(&common_options, &empty_kube_client) + .datastore_keys(&datastore_keys, &empty_kube_client) .await .unwrap(), expected_datastore_keys @@ -1099,7 +1220,6 @@ mod tests { assert!(empty_kube_client.lock.lock().await.is_none()); // Keys not provided at command line, present in k8s - let common_options = CommonBinaryOptions::default(); let kubernetes_secret_options = KubernetesSecretOptions { datastore_keys_secret_name: "secret-name".to_string(), datastore_keys_secret_data_key: "secret-data-key".to_string(), @@ -1108,7 +1228,7 @@ mod tests { assert_eq!( kubernetes_secret_options - .datastore_keys(&common_options, &kube_client) + .datastore_keys(&[], &kube_client) .await .unwrap() .len(), @@ -1116,7 +1236,6 @@ mod tests { ); // Neither flag provided - let common_options = CommonBinaryOptions::default(); let kubernetes_secret_options = KubernetesSecretOptions { datastore_keys_secret_name: "secret-name".to_string(), datastore_keys_secret_data_key: "secret-data-key".to_string(), @@ -1124,7 +1243,7 @@ mod tests { }; kubernetes_secret_options - .datastore_keys(&common_options, &kube_client) + .datastore_keys(&[], &kube_client) .await .unwrap_err(); } diff --git a/aggregator/src/binary_utils.rs b/aggregator/src/binary_utils.rs index 58ab5a6db..970eaca04 100644 --- a/aggregator/src/binary_utils.rs +++ b/aggregator/src/binary_utils.rs @@ -49,11 +49,11 @@ use crate::{ /// Reads, parses, and returns the config referenced by the given options, or None if no config file /// path was set. -pub fn read_config(options: &CommonBinaryOptions) -> Result { - let config_content = fs::read_to_string(&options.config_file) - .with_context(|| format!("couldn't read config file {:?}", options.config_file))?; +pub fn read_config(config_file: &PathBuf) -> Result { + let config_content = fs::read_to_string(config_file) + .with_context(|| format!("couldn't read config file {:?}", config_file))?; yaml_serde::from_str(&config_content) - .with_context(|| format!("couldn't parse config file {:?}", options.config_file)) + .with_context(|| format!("couldn't parse config file {:?}", config_file)) } /// Connects to a database, given a config. `db_password` is mutually exclusive with the database @@ -266,7 +266,7 @@ where initialize_rustls(); // Read and parse config. - let config: Config = read_config(options.common_options())?; + let config: Config = read_config(&options.common_options().config_file)?; let mut runtime_builder = runtime::Builder::new_multi_thread(); runtime_builder.enable_all(); From ec9834f8db60be1c7aab02d207988f1ad87fbf98 Mon Sep 17 00:00:00 2001 From: David Cook Date: Thu, 2 Apr 2026 10:02:27 -0500 Subject: [PATCH 2/2] New mode to add HPKE keypair via aggregator API --- aggregator/src/binaries/janus_cli.rs | 200 ++++++++++++++++++++++++--- aggregator_api/src/lib.rs | 4 +- aggregator_api/src/models.rs | 14 +- 3 files changed, 192 insertions(+), 26 deletions(-) diff --git a/aggregator/src/binaries/janus_cli.rs b/aggregator/src/binaries/janus_cli.rs index d4eaa3c0b..01eec282a 100644 --- a/aggregator/src/binaries/janus_cli.rs +++ b/aggregator/src/binaries/janus_cli.rs @@ -15,7 +15,10 @@ use clap::{ builder::{StringValueParser, TypedValueParser}, }; use itertools::Itertools; -use janus_aggregator_api::git_revision; +use janus_aggregator_api::{ + git_revision, + models::{HpkeConfigResp, PutHpkeConfigReq}, +}; use janus_aggregator_core::{ AsyncAggregator, datastore::{self, Datastore, Transaction, models::HpkeKeyState}, @@ -105,7 +108,7 @@ enum Command { /// Numeric identifier of the HPKE configuration to generate #[arg(long)] - id: u8, + id: Option, /// HPKE Key Encapsulation Mechanism algorithm #[arg(long)] @@ -264,6 +267,12 @@ impl Command { Some(config_file), _, ) => { + let Some(id) = id else { + return Err(anyhow!( + "HPKE config ID is required when creating keypair via datastore" + )); + }; + let datastore = datastore_from_opts( kubernetes_secret_options, command_line_options, @@ -272,7 +281,7 @@ impl Command { ) .await?; - generate_hpke_key( + generate_hpke_key_datastore( &datastore, command_line_options.dry_run, (*id).into(), @@ -284,8 +293,38 @@ impl Command { .await } - (Command::GenerateHpkeKey { .. }, None, _) => Err(anyhow!( - "generate-hpke-key requires a configuration file for database access" + ( + Command::GenerateHpkeKey { + kubernetes_secret_options: _, + id, + kem, + kdf, + aead, + hpke_config_out_file, + }, + None, + Some((aggregator_api_url, aggregator_api_auth_token)), + ) => { + if id.is_some() { + return Err(anyhow!( + "HPKE config ID cannot be specified when creating keypair via aggregator API" + )); + } + + generate_hpke_key_aggregator_api( + aggregator_api_url, + aggregator_api_auth_token, + command_line_options.dry_run, + (*kem).into(), + (*kdf).into(), + (*aead).into(), + hpke_config_out_file.as_deref(), + ) + .await + } + + (Command::GenerateHpkeKey { .. }, None, None) => Err(anyhow!( + "generate-hpke-key requires either a configuration file or aggregator API arguments" )), ( @@ -712,7 +751,7 @@ async fn install_tracing_and_metrics_handlers( Ok((trace_guard, metrics_guard)) } -async fn generate_hpke_key( +async fn generate_hpke_key_datastore( datastore: &Datastore, dry_run: bool, id: HpkeConfigId, @@ -740,6 +779,49 @@ async fn generate_hpke_key( Ok(()) } +async fn generate_hpke_key_aggregator_api( + aggregator_api_url: &Url, + aggregator_api_auth_token: &AuthenticationToken, + dry_run: bool, + kem: HpkeKemId, + kdf: HpkeKdfId, + aead: HpkeAeadId, + hpke_config_out_file: Option<&Path>, +) -> Result<()> { + let client = reqwest::Client::new(); + let (auth_header_name, auth_header_value) = + aggregator_api_auth_token.request_authentication()?; + + if dry_run { + return Ok(()); + } + + let response = client + .put(aggregator_api_url.join("hpke_configs")?) + .header(auth_header_name, auth_header_value) + .header("Content-Type", janus_aggregator_api::CONTENT_TYPE) + .header("Accept", janus_aggregator_api::CONTENT_TYPE) + .body(serde_json::to_vec(&PutHpkeConfigReq { + kem_id: Some(kem), + kdf_id: Some(kdf), + aead_id: Some(aead), + })?) + .send() + .await?; + let status = response.status(); + if !status.is_success() { + let body = response.text().await?; + return Err(anyhow!("got error status code {status}: {body}")); + } + let response_body: HpkeConfigResp = response.json().await?; + + if let Some(hpke_config_out_file) = hpke_config_out_file { + fs::write(hpke_config_out_file, response_body.config.get_encoded()?).await?; + } + + Ok(()) +} + async fn set_hpke_key_state( datastore: &Datastore, dry_run: bool, @@ -1136,23 +1218,26 @@ mod tests { collections::HashMap, io::Write, net::{Ipv4Addr, SocketAddr}, + sync::Arc, }; use aws_lc_rs::aead::{AES_128_GCM, UnboundKey}; use base64::{Engine, engine::general_purpose::URL_SAFE_NO_PAD}; use chrono::TimeDelta; use clap::CommandFactory; + use janus_aggregator_api::{Config, aggregator_api_handler}; use janus_aggregator_core::{ datastore::{Datastore, models::HpkeKeyState, test_util::ephemeral_datastore}, task::{AggregationMode, AggregatorTask, BatchMode, test_util::TaskBuilder}, taskprov::{PeerAggregator, VerifyKeyInit}, + test_util::noop_meter, }; use janus_core::{ auth_tokens::AuthenticationToken, hpke::HpkeKeypair, initialize_rustls, - test_util::{kubernetes, roundtrip_encoding}, - time::{RealClock, TimeDeltaExt}, + test_util::{install_test_trace_subscriber, kubernetes, roundtrip_encoding}, + time::{MockClock, RealClock, TimeDeltaExt}, vdaf::{VdafInstance, vdaf_dp_strategies}, }; use janus_messages::{ @@ -1253,7 +1338,7 @@ mod tests { } // Returns the HPKE config written to disk. - async fn run_generate_hpke_key_testcase( + async fn run_generate_hpke_key_datastore_testcase( ds: &Datastore, dry_run: bool, id: HpkeConfigId, @@ -1264,15 +1349,23 @@ mod tests { let temp_dir = tempdir().unwrap(); let hpke_config_out_file = temp_dir.path().join("hpke_config"); - super::generate_hpke_key(ds, dry_run, id, kem, kdf, aead, Some(&hpke_config_out_file)) - .await - .unwrap(); + super::generate_hpke_key_datastore( + ds, + dry_run, + id, + kem, + kdf, + aead, + Some(&hpke_config_out_file), + ) + .await + .unwrap(); HpkeConfig::get_decoded(&fs::read(hpke_config_out_file).await.unwrap()).unwrap() } #[tokio::test] - async fn generate_hpke_key() { + async fn generate_hpke_key_datastore() { let ephemeral_datastore = ephemeral_datastore().await; let ds = ephemeral_datastore.datastore(RealClock::default()).await; @@ -1281,8 +1374,10 @@ mod tests { let kdf = HpkeKdfId::HkdfSha256; let aead = HpkeAeadId::Aes128Gcm; - let disk_hpke_config = - run_generate_hpke_key_testcase(&ds, /* dry_run */ false, id, kem, kdf, aead).await; + let disk_hpke_config = run_generate_hpke_key_datastore_testcase( + &ds, /* dry_run */ false, id, kem, kdf, aead, + ) + .await; let hpke_keypair = ds .run_unnamed_tx(|tx| { @@ -1313,8 +1408,10 @@ mod tests { let kdf = HpkeKdfId::HkdfSha512; let aead = HpkeAeadId::ChaCha20Poly1305; - let disk_hpke_config = - run_generate_hpke_key_testcase(&ds, /* dry_run */ true, id, kem, kdf, aead).await; + let disk_hpke_config = run_generate_hpke_key_datastore_testcase( + &ds, /* dry_run */ true, id, kem, kdf, aead, + ) + .await; let hpke_keypairs = ds .run_unnamed_tx(|tx| Box::pin(async move { Ok(tx.get_hpke_keypairs().await.unwrap()) })) @@ -1331,6 +1428,75 @@ mod tests { assert_eq!(disk_hpke_config.aead_id(), &aead); } + #[tokio::test] + async fn generate_hpke_key_aggregator_api() { + install_test_trace_subscriber(); + let ephemeral_datastore = ephemeral_datastore().await; + let datastore = Arc::new(ephemeral_datastore.datastore(MockClock::default()).await); + let aggregator_api_auth_token: AuthenticationToken = random(); + + let kem = HpkeKemId::P256HkdfSha256; + let kdf = HpkeKdfId::HkdfSha256; + let aead = HpkeAeadId::Aes128Gcm; + + let handler = aggregator_api_handler( + Arc::clone(&datastore), + Config { + auth_tokens: Vec::from([aggregator_api_auth_token.clone()]), + public_dap_url: "https://dap.url".parse().unwrap(), + }, + &noop_meter(), + ); + + let server_handle = trillium_tokio::config() + .without_signals() + .with_host("127.0.0.1") + .with_port(0) + .spawn(handler); + let server_info = server_handle.info().await; + let socket_addr = server_info.tcp_socket_addr().unwrap(); + + let temp_dir = tempdir().unwrap(); + let hpke_config_out_file = temp_dir.path().join("hpke_config"); + + let aggregator_api_url = format!("http://{socket_addr}/").parse().unwrap(); + + super::generate_hpke_key_aggregator_api( + &aggregator_api_url, + &aggregator_api_auth_token, + false, + kem, + kdf, + aead, + Some(&hpke_config_out_file), + ) + .await + .unwrap(); + + server_handle.stop().await; + + let hpke_config = + HpkeConfig::get_decoded(&fs::read(hpke_config_out_file).await.unwrap()).unwrap(); + + let id = *hpke_config.id(); + let hpke_keypair = datastore + .run_unnamed_tx(|tx| { + Box::pin(async move { Ok(tx.get_hpke_keypair(&id).await.unwrap()) }) + }) + .await + .unwrap() + .unwrap(); + + // Verify datastore state matches what was written to disk. + assert_eq!(hpke_keypair.state(), &HpkeKeyState::Pending); + assert_eq!(hpke_keypair.hpke_keypair().config(), &hpke_config); + + // Verify HPKE configuration matches what was expected. + assert_eq!(hpke_config.kem_id(), &kem); + assert_eq!(hpke_config.kdf_id(), &kdf); + assert_eq!(hpke_config.aead_id(), &aead); + } + #[tokio::test] async fn set_hpke_key_state() { let ephemeral_datastore = ephemeral_datastore().await; diff --git a/aggregator_api/src/lib.rs b/aggregator_api/src/lib.rs index db383d0a2..3e6c565b3 100644 --- a/aggregator_api/src/lib.rs +++ b/aggregator_api/src/lib.rs @@ -1,5 +1,5 @@ //! This crate implements the Janus Aggregator API. -mod models; +pub mod models; mod routes; #[cfg(test)] mod tests; @@ -36,7 +36,7 @@ pub struct Config { } /// Content type -const CONTENT_TYPE: &str = "application/vnd.janus.aggregator+json;version=0.1"; +pub const CONTENT_TYPE: &str = "application/vnd.janus.aggregator+json;version=0.1"; struct ReplaceMimeTypes; diff --git a/aggregator_api/src/models.rs b/aggregator_api/src/models.rs index 03ec01341..ad834baf5 100644 --- a/aggregator_api/src/models.rs +++ b/aggregator_api/src/models.rs @@ -177,9 +177,9 @@ pub(crate) struct GetTaskUploadMetricsResp(pub(crate) TaskUploadCounter); pub(crate) struct GetTaskAggregationMetricsResp(pub(crate) TaskAggregationCounter); #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub(crate) struct HpkeConfigResp { - pub(crate) config: HpkeConfig, - pub(crate) state: HpkeKeyState, +pub struct HpkeConfigResp { + pub config: HpkeConfig, + pub state: HpkeKeyState, } impl From for HpkeConfigResp { @@ -192,10 +192,10 @@ impl From for HpkeConfigResp { } #[derive(Serialize, Deserialize)] -pub(crate) struct PutHpkeConfigReq { - pub(crate) kem_id: Option, - pub(crate) kdf_id: Option, - pub(crate) aead_id: Option, +pub struct PutHpkeConfigReq { + pub kem_id: Option, + pub kdf_id: Option, + pub aead_id: Option, } #[derive(Serialize, Deserialize)]