Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix usage of Url::join. #380

Merged
merged 4 commits into from
Aug 11, 2022
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion janus_server/src/aggregator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2004,7 +2004,7 @@ where
const CORS_PREFLIGHT_CACHE_AGE: u32 = 24 * 60 * 60;

/// Constructs a Warp filter with endpoints common to all aggregators.
fn aggregator_filter<C: Clock>(
pub fn aggregator_filter<C: Clock>(
datastore: Arc<Datastore<C>>,
clock: C,
) -> Result<BoxedFilter<(impl Reply,)>, Error> {
Expand Down
5 changes: 1 addition & 4 deletions janus_server/src/aggregator/aggregate_share.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,10 +179,7 @@ impl CollectJobDriver {

let response = self
.http_client
.post(
task.aggregator_url(Role::Helper)?
.join("/aggregate_share")?,
)
.post(task.aggregator_url(Role::Helper)?.join("aggregate_share")?)
.header(CONTENT_TYPE, AggregateShareReq::MEDIA_TYPE)
.header(
DAP_AUTH_HEADER,
Expand Down
4 changes: 2 additions & 2 deletions janus_server/src/aggregator/aggregation_job_driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ impl AggregationJobDriver {

let response = self
.http_client
.post(task.aggregator_url(Role::Helper)?.join("/aggregate")?)
.post(task.aggregator_url(Role::Helper)?.join("aggregate")?)
.header(CONTENT_TYPE, AggregateInitializeReq::MEDIA_TYPE)
.header(
DAP_AUTH_HEADER,
Expand Down Expand Up @@ -501,7 +501,7 @@ impl AggregationJobDriver {

let response = self
.http_client
.post(task.aggregator_url(Role::Helper)?.join("/aggregate")?)
.post(task.aggregator_url(Role::Helper)?.join("aggregate")?)
.header(CONTENT_TYPE, AggregateContinueReq::MEDIA_TYPE)
.header(
DAP_AUTH_HEADER,
Expand Down
82 changes: 62 additions & 20 deletions janus_server/src/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ impl Task {
/// Create a new [`Task`] from the provided values
pub fn new<I: IntoIterator<Item = (HpkeConfig, HpkePrivateKey)>>(
task_id: TaskId,
aggregator_endpoints: Vec<Url>,
mut aggregator_endpoints: Vec<Url>,
vdaf: VdafInstance,
role: Role,
vdaf_verify_keys: Vec<Vec<u8>>,
Expand Down Expand Up @@ -239,6 +239,15 @@ impl Task {
return Err(Error::InvalidParameter("vdaf_verify_keys"));
}

// Ensure provided aggregator endpoints end with a slash, as we will be joining additional
// path segments into these endpoints & the Url::join implementation is persnickety about
// the slash at the end of the path.
for url in &mut aggregator_endpoints {
if !url.as_str().ends_with('/') {
url.set_path(&format!("{}/", url.path()));
}
}

// Compute hpke_configs mapping cfg.id -> (cfg, key).
let hpke_configs: HashMap<HpkeConfigId, (HpkeConfig, HpkePrivateKey)> = hpke_keys
.into_iter()
Expand Down Expand Up @@ -424,32 +433,28 @@ impl<'de> Deserialize<'de> for Task {
.collect::<Result<_, _>>()?;

// hpke_keys
let hpke_keys: HashMap<_, _> = serialized_task
let hpke_keys: Vec<(_, _)> = serialized_task
.hpke_keys
.into_iter()
.map(|keypair| {
Ok((
keypair.config.id,
keypair.try_into().map_err(D::Error::custom)?,
))
})
.map(|keypair| keypair.try_into().map_err(D::Error::custom))
.collect::<Result<_, _>>()?;

Ok(Task {
id: task_id,
aggregator_endpoints: serialized_task.aggregator_endpoints,
vdaf: serialized_task.vdaf,
role: serialized_task.role,
Task::new(
task_id,
serialized_task.aggregator_endpoints,
serialized_task.vdaf,
serialized_task.role,
vdaf_verify_keys,
max_batch_lifetime: serialized_task.max_batch_lifetime,
min_batch_size: serialized_task.min_batch_size,
min_batch_duration: serialized_task.min_batch_duration,
tolerable_clock_skew: serialized_task.tolerable_clock_skew,
serialized_task.max_batch_lifetime,
serialized_task.min_batch_size,
serialized_task.min_batch_duration,
serialized_task.tolerable_clock_skew,
collector_hpke_config,
aggregator_auth_tokens,
collector_auth_tokens,
hpke_keys,
})
)
.map_err(D::Error::custom)
}
}

Expand Down Expand Up @@ -602,9 +607,15 @@ pub mod test_util {

#[cfg(test)]
mod tests {
use super::test_util::new_dummy_task;
use super::{
test_util::{generate_auth_token, new_dummy_task},
Task, PRIO3_AES128_VERIFY_KEY_LENGTH,
};
use crate::{config::test_util::roundtrip_encoding, task::VdafInstance};
use janus_core::message::{Duration, Interval, Role, TaskId, Time};
use janus_core::{
hpke::test_util::generate_hpke_config_and_private_key,
message::{Duration, Interval, Role, TaskId, Time},
};
use serde_test::{assert_tokens, Token};

#[test]
Expand Down Expand Up @@ -765,4 +776,35 @@ mod tests {
Role::Leader,
));
}

#[test]
fn aggregator_endpoints_end_in_slash() {
let task = Task::new(
TaskId::random(),
Vec::from([
"http://leader_endpoint/foo/bar".parse().unwrap(),
"http://helper_endpoint".parse().unwrap(),
]),
VdafInstance::Real(janus_core::task::VdafInstance::Prio3Aes128Count),
Role::Leader,
Vec::from([[0; PRIO3_AES128_VERIFY_KEY_LENGTH].into()]),
0,
0,
Duration::from_hours(8).unwrap(),
Duration::from_minutes(10).unwrap(),
generate_hpke_config_and_private_key().0,
Vec::from([generate_auth_token()]),
Vec::from([generate_auth_token()]),
Vec::from([generate_hpke_config_and_private_key()]),
)
.unwrap();

assert_eq!(
task.aggregator_endpoints,
Vec::from([
"http://leader_endpoint/foo/bar/".parse().unwrap(),
"http://helper_endpoint/".parse().unwrap()
])
);
}
}
2 changes: 2 additions & 0 deletions monolithic_integration_test/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ janus = [
[dependencies]
backoff = "0.4"
daphne = { git = "https://github.com/cloudflare/daphne", rev = "6301e712df216a0301c42cb3177110dd8217fa84", optional = true }
futures = "0.3"
hex = "0.4"
hpke-dispatch = "0.3"
http = "0.2"
Expand All @@ -38,6 +39,7 @@ subprocess = { version = "0.2", optional = true }
tempfile = { version = "3", optional = true }
tokio = { version = "1", features = ["full", "tracing"] }
toml = "0.5"
warp = { version = "0.3", features = ["tls"] }

[dev-dependencies]
chrono = "0.4.21"
Expand Down
33 changes: 22 additions & 11 deletions monolithic_integration_test/src/janus.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
//! Functionality for tests interacting with Janus (<https://github.com/divviup/janus>).

use http::HeaderMap;
use futures::FutureExt;
use janus_core::{message::Duration, time::RealClock, TokioRuntime};
use janus_server::{
aggregator::{
aggregate_share::CollectJobDriver, aggregation_job_creator::AggregationJobCreator,
aggregation_job_driver::AggregationJobDriver, aggregator_server,
aggregation_job_driver::AggregationJobDriver, aggregator_filter,
},
binary_utils::job_driver::JobDriver,
datastore::test_util::{ephemeral_datastore, DbHandle},
Expand All @@ -18,6 +18,7 @@ use std::{
time,
};
use tokio::{select, sync::oneshot, task, try_join};
use warp::Filter;

/// Represents a running Janus test instance.
pub struct Janus {
Expand Down Expand Up @@ -48,15 +49,25 @@ impl Janus {

// Start aggregator server.
let (server_shutdown_sender, server_shutdown_receiver) = oneshot::channel();
let (_, leader_server) = aggregator_server(
Arc::clone(&datastore),
RealClock::default(),
SocketAddr::from((Ipv4Addr::LOCALHOST, port)),
HeaderMap::new(),
async move { server_shutdown_receiver.await.unwrap() },
)
.unwrap();
let server_task_handle = task::spawn(leader_server);
let aggregator_filter = task
.aggregator_url(task.role)
.unwrap()
.path_segments()
.unwrap()
.filter_map(|s| (!s.is_empty()).then(|| warp::path(s.to_owned()).boxed()))
.reduce(|x, y| x.and(y).boxed())
.unwrap_or_else(|| warp::any().boxed())
.and(aggregator_filter(datastore, RealClock::default()).unwrap());
let server = warp::serve(aggregator_filter);
let server_task_handle = task::spawn(async move {
server
.bind_with_graceful_shutdown(
SocketAddr::from((Ipv4Addr::LOCALHOST, port)),
server_shutdown_receiver.map(Result::unwrap),
)
.1
.await
});

// Start aggregation job creator.
let (
Expand Down
2 changes: 1 addition & 1 deletion monolithic_integration_test/tests/common/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ pub async fn submit_measurements_and_verify_aggregate(
let collect_url = leader_task
.aggregator_url(Role::Leader)
.unwrap()
.join("/collect")
.join("collect")
.unwrap();
let batch_interval = Interval::new(
before_timestamp
Expand Down
9 changes: 8 additions & 1 deletion monolithic_integration_test/tests/janus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,16 @@ async fn janus_janus() {
// Start servers.
let (janus_leader_port, janus_helper_port) = pick_two_unused_ports();
let (collector_hpke_config, collector_private_key) = generate_hpke_config_and_private_key();
let (janus_leader_task, janus_helper_task) =
let (mut janus_leader_task, mut janus_helper_task) =
create_test_tasks(janus_leader_port, janus_helper_port, &collector_hpke_config);

// Update tasks to serve out of /dap/ prefix.
for task in [&mut janus_leader_task, &mut janus_helper_task] {
for url in &mut task.aggregator_endpoints {
url.set_path("/dap/");
}
}

let _janus_leader = Janus::new(janus_leader_port, &janus_leader_task).await;
let _janus_helper = Janus::new(janus_helper_port, &janus_helper_task).await;

Expand Down