Skip to content

Commit

Permalink
Use containers for interop API tests. (#434)
Browse files Browse the repository at this point in the history
With this commit, the `interop_binaries` E2E tests now test the interop
test containers, rather than the binaries that are added to the
containers. This is important since the container build definitions
include important wiring -- before this change, the container builds
could be broken (eg Janus can't talk to datastore), but the E2E tests
would still pass.
  • Loading branch information
branlwyd authored Aug 25, 2022
1 parent e842b61 commit e14fd45
Show file tree
Hide file tree
Showing 13 changed files with 520 additions and 401 deletions.
5 changes: 4 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Dockerfile.interop_aggregator
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,targe

FROM postgres:14-alpine
RUN mkdir /logs
RUN apk add --update supervisor && rm -rf /tmp/* /var/cache/apk/*
RUN apk add --update supervisor && rm -rf /tmp/* /var/cache/apk/*
COPY interop_binaries/supervisord.conf /supervisord.conf
COPY --from=builder /src/db/schema.sql /db/schema.sql
COPY --from=builder /janus_interop_aggregator /janus_interop_aggregator
Expand Down
8 changes: 7 additions & 1 deletion interop_binaries/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ publish = false

[features]
test-util = [
"dep:backoff",
"dep:hex",
"dep:futures",
"dep:lazy_static",
"dep:regex",
"dep:zstd",
Expand All @@ -20,8 +23,11 @@ testcontainer = [

[dependencies]
anyhow = "1"
backoff = { version = "0.4", features = ["tokio"], optional = true }
base64 = "0.13.0"
clap = "3.2.17"
futures = { version = "0.3.23", optional = true }
hex = { version = "0.4", optional = true }
lazy_static = { version = "1", optional = true }
janus_client = { path = "../janus_client" }
janus_core = { path = "../janus_core" }
Expand All @@ -44,8 +50,8 @@ warp = "^0.3"
zstd = { version = "0.11", optional = true }

[dev-dependencies]
interop_binaries = { path = ".", features = ["testcontainer"] }
lazy_static = "1"
portpicker = "0.1"
reqwest = { version = "0.11.4", default-features = false, features = ["json"] }
testcontainers = "0.14.0"

Expand Down
151 changes: 111 additions & 40 deletions interop_binaries/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@ fn main() {
"build" => {
// The "build" strategy causes us to build a container image based on the current
// repository, and embed it in the test library.
println!("cargo:rerun-if-changed=../Dockerfile.interop");
println!("cargo:rerun-if-changed=../Dockerfile.interop_aggregator");

// These directives should match the dependencies copied into Dockerfile.interop_aggregator.
// These directives should match the dependencies copied into the build stage in
// Dockerfile.interop & Dockerfile.interop_aggregator.
println!("cargo:rerun-if-changed=../Cargo.lock");
println!("cargo:rerun-if-changed=../Cargo.toml");
println!("cargo:rerun-if-changed=../db/schema.sql");
Expand All @@ -28,63 +30,132 @@ fn main() {
println!("cargo:rerun-if-changed=../janus_server");
println!("cargo:rerun-if-changed=../monolithic_integration_test");

// Build & save off a container image for the interop_aggregator.
// Build containers.
// Note: `docker build` has an `--output` flag which writes the output to somewhere, which
// may be a tarfile. But `docker build --output` only exports the image filesystem, and not
// any other image metadata (such as exposed ports, the entrypoint, etc), so we can't easily
// use it.
let build_output = Command::new("docker")
.args([
"build",
"--file=Dockerfile.interop_aggregator",
"--build-arg",
"PROFILE=small",
"--quiet",
".",
])
.current_dir("..")
.env("DOCKER_BUILDKIT", "1")
.output()
.expect("Failed to execute `docker build` for interop aggregator");
assert!(
build_output.status.success(),
"Docker build of interop aggregator failed:\n{}",
String::from_utf8_lossy(&build_output.stderr)
);
let image_id = String::from_utf8(build_output.stdout).unwrap();
let image_id = image_id.trim();
let client_image_id = {
let client_build_output = Command::new("docker")
.args([
"build",
"--quiet",
"--file=Dockerfile.interop",
"--build-arg=PROFILE=small",
"--build-arg=BINARY=janus_interop_client",
".",
])
.current_dir("..")
.env("DOCKER_BUILDKIT", "1")
.output()
.expect("Failed to execute `docker build` for interop client");
assert!(
client_build_output.status.success(),
"Docker build of interop client failed:\n{}",
String::from_utf8_lossy(&client_build_output.stderr)
);
String::from_utf8(client_build_output.stdout).unwrap().trim().to_string()
};

let image_file = File::create(format!(
let aggregator_image_id = {
let aggregator_build_output = Command::new("docker")
.args([
"build",
"--quiet",
"--file=Dockerfile.interop_aggregator",
"--build-arg=PROFILE=small",
".",
])
.current_dir("..")
.env("DOCKER_BUILDKIT", "1")
.output()
.expect("Failed to execute `docker build` for interop aggregator");
assert!(
aggregator_build_output.status.success(),
"Docker build of interop aggregator failed:\n{}",
String::from_utf8_lossy(&aggregator_build_output.stderr)
);
String::from_utf8(aggregator_build_output.stdout).unwrap().trim().to_string()
};

let collector_image_id = {
let collector_build_output = Command::new("docker")
.args([
"build",
"--quiet",
"--file=Dockerfile.interop",
"--build-arg=PROFILE=small",
"--build-arg=BINARY=janus_interop_collector",
"."
])
.current_dir("..")
.env("DOCKER_BUILDKIT", "1")
.output()
.expect("Failed to execute `docker build` for interop collector");
assert!(
collector_build_output.status.success(),
"Docker build of interop collector failed:\n{}",
String::from_utf8_lossy(&collector_build_output.stderr)
);
String::from_utf8(collector_build_output.stdout).unwrap().trim().to_string()
};

// Save off containers to disk.
let client_image_file = File::create(format!(
"{}/interop_client.tar.zst",
env::var("OUT_DIR").unwrap()
))
.expect("Couldn't create interop client image file");
save_zstd_compressed_docker_image(&client_image_id, &client_image_file);
client_image_file
.sync_all()
.expect("Couldn't write compressed image file");
drop(client_image_file);

let aggregator_image_file = File::create(format!(
"{}/interop_aggregator.tar.zst",
env::var("OUT_DIR").unwrap()
))
.expect("Couldn't create interop aggregator image file");
save_zstd_compressed_docker_image(image_id, &image_file);
image_file
save_zstd_compressed_docker_image(&aggregator_image_id, &aggregator_image_file);
aggregator_image_file
.sync_all()
.expect("Couldn't write compressed image file");
drop(aggregator_image_file);

let collector_image_file = File::create(format!(
"{}/interop_collector.tar.zst",
env::var("OUT_DIR").unwrap()
))
.expect("Couldn't create interop collector image file");
save_zstd_compressed_docker_image(&collector_image_id, &collector_image_file);
collector_image_file
.sync_all()
.expect("Couldn't write compressed image file");
drop(image_file);
drop(collector_image_file);

// Make a best-effort attempt to clean up after ourselves.
// Make a best-effort attempt to clean up Docker's post-build state.
Command::new("docker")
.args(["rmi", image_id])
.args(["image", "rm", &client_image_id, &aggregator_image_id, &collector_image_id])
.status()
.expect("Failed to execute `docker rmi` for interop aggregator");
.expect("Failed to execute `docker image remove`");
}

"skip" => {
// The "skip" strategy causes us to skip building a container at all. Tests
// depending on having the image available will fail.
// The "skip" strategy causes us to skip building containers at all. Tests which
// depend on having interop test images available will fail.

// We create an empty file since it's necessary for compilation to succeed; the
// consumer (testcontainer.rs) will panic if someone attempts to instantiate a Janus
// instance in this case.
let image_file = File::create(format!(
"{}/interop_aggregator.tar.zst",
env::var("OUT_DIR").unwrap()
))
.expect("Couldn't create interop aggregator image file");
image_file.sync_all().expect("Couldn't write interop aggregator image file");
// We create empty image files since these files are required for compilation to
// succeed; the consumer (testcontainer.rs) will panic if someone attempts to
// instantiate a container in this case.
for filename in ["interop_client", "interop_aggregator", "interop_collector"] {
let image_file = File::create(format!(
"{}/{filename}.tar.zst",
env::var("OUT_DIR").unwrap()
))
.expect("Couldn't create empty image file");
image_file.sync_all().expect("Couldn't write empty image file");
}
}

_ => panic!("Unexpected JANUS_INTEROP_CONTAINER value {container_strategy:?} (valid values are \"build\" & \"skip\")")
Expand Down
137 changes: 90 additions & 47 deletions interop_binaries/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,52 +177,95 @@ impl HpkeConfigRegistry {
}

#[cfg(feature = "test-util")]
lazy_static::lazy_static! {
static ref DOCKER_HASH_RE: regex::Regex = regex::Regex::new(r"sha256:([0-9a-f]{64})").unwrap();
}
pub mod test_util {
use backoff::{future::retry, ExponentialBackoff};
use futures::TryFutureExt;
use rand::{thread_rng, Rng};
use std::time::Duration;
use url::Url;

/// Loads a given zstd-compressed docker image into Docker. Returns the hash of the loaded image,
/// e.g. as referenced by `sha256:$HASH`. Panics on failure.
#[cfg(feature = "test-util")]
pub fn load_zstd_compressed_docker_image(compressed_image: &[u8]) -> String {
use std::{
io::{Cursor, Read},
process::{Command, Stdio},
thread,
};

let mut docker_load_child = Command::new("docker")
.args(["load", "--quiet"])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()
.expect("Failed to execute `docker load`");
let child_stdin = docker_load_child.stdin.take().unwrap();
thread::scope(|s| {
let writer_handle = s.spawn(|| {
// We write in a separate thread as "writing more than a pipe buffer's
// worth of input to stdin without also reading stdout and stderr at the
// same time may cause a deadlock."
zstd::stream::copy_decode(Cursor::new(compressed_image), child_stdin)
});
let reader_handle = s.spawn(|| {
let mut child_stdout = docker_load_child.stdout.take().unwrap();
let mut stdout = String::new();
child_stdout
.read_to_string(&mut stdout)
.expect("Couldn't read image ID from docker");
let caps = DOCKER_HASH_RE
.captures(&stdout)
.expect("Couldn't find image ID from `docker load` output");
caps.get(1).unwrap().as_str().to_string()
});

// The first `expect` catches panics, the second `expect` catches write errors.
writer_handle
.join()
.expect("Couldn't write image to docker")
.expect("Couldn't write image to docker");
reader_handle.join().unwrap()
})
lazy_static::lazy_static! {
static ref DOCKER_HASH_RE: regex::Regex = regex::Regex::new(r"sha256:([0-9a-f]{64})").unwrap();
}

/// Waits a while for the given port to start responding to HTTP requests, panicking if this
/// doesn't happen soon enough.
pub async fn await_http_server(port: u16) {
let http_client = reqwest::Client::default();
let url = Url::parse(&format!("http://localhost:{port}/")).unwrap();
retry(
// (We use ExponentialBackoff as a constant-time backoff as the built-in Constant
// backoff will never time out.)
ExponentialBackoff {
initial_interval: Duration::from_millis(250),
max_interval: Duration::from_millis(250),
multiplier: 1.0,
max_elapsed_time: Some(Duration::from_secs(10)),
..Default::default()
},
|| {
http_client
.get(url.clone())
.send()
.map_err(backoff::Error::transient)
},
)
.await
.unwrap();
}

/// Loads a given zstd-compressed docker image into Docker. Returns the hash of the loaded
/// image, e.g. as referenced by `sha256:$HASH`. Panics on failure.
pub fn load_zstd_compressed_docker_image(compressed_image: &[u8]) -> String {
use std::{
io::{Cursor, Read},
process::{Command, Stdio},
thread,
};

let mut docker_load_child = Command::new("docker")
.args(["load", "--quiet"])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()
.expect("Failed to execute `docker load`");
let child_stdin = docker_load_child.stdin.take().unwrap();
thread::scope(|s| {
let writer_handle = s.spawn(|| {
// We write in a separate thread as "writing more than a pipe buffer's
// worth of input to stdin without also reading stdout and stderr at the
// same time may cause a deadlock."
zstd::stream::copy_decode(Cursor::new(compressed_image), child_stdin)
});
let reader_handle = s.spawn(|| {
let mut child_stdout = docker_load_child.stdout.take().unwrap();
let mut stdout = String::new();
child_stdout
.read_to_string(&mut stdout)
.expect("Couldn't read image ID from docker");
let caps = DOCKER_HASH_RE
.captures(&stdout)
.expect("Couldn't find image ID from `docker load` output");
caps.get(1).unwrap().as_str().to_string()
});

// The first `expect` catches panics, the second `expect` catches write errors.
writer_handle
.join()
.expect("Couldn't write image to docker")
.expect("Couldn't write image to docker");
reader_handle.join().unwrap()
})
}

pub fn generate_network_name() -> String {
generate_unique_name("janus_ephemeral_network")
}

pub fn generate_unique_name(prefix: &str) -> String {
let mut buf = [0; 4];
thread_rng().fill(&mut buf);
format!("{}_{}", prefix, hex::encode(buf))
}
}
Loading

0 comments on commit e14fd45

Please sign in to comment.