Skip to content

Commit

Permalink
Texture reading and Frame capture. FormatConverter -> Reshaper.
Browse files Browse the repository at this point in the history
This adds a suite of functionality for efficiently mapping texture data
and reading it from the CPU.

Highlights include:

- `TextureCapturer` to simplify the reading of textures into image file
  compatible formats. A `capture_hi_res.rs` example has been added to
  demonstrate.
- `window.capture_frame(path)` for writing the next frame to a file at
  the given path. A `simple_capture.rs` example has been added
  demonstrating this.

`TextureFormatConverter` has been renamed to `TextureReshaper` and
support has been added for converting both texture size and sample
count.
  • Loading branch information
mitchmindtree committed Mar 2, 2020
1 parent 206479e commit 2167d3f
Show file tree
Hide file tree
Showing 20 changed files with 1,168 additions and 102 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
capture_hi_res/
simple_capture/
target/
**/*.rs.bk
Cargo.lock
Expand Down
4 changes: 4 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ rusttype = "0.8"
serde = "1"
serde_derive = "1"
serde_json = "1"
threadpool = "1"
toml = "0.5"
walkdir = "2"
wgpu = "0.4"
Expand Down Expand Up @@ -69,6 +70,9 @@ path = "examples/simple_audio.rs"
name = "simple_audio_file"
path = "examples/simple_audio_file.rs"
[[example]]
name = "simple_capture"
path = "examples/simple_capture.rs"
[[example]]
name = "simple_draw"
path = "examples/simple_draw.rs"
[[example]]
Expand Down
200 changes: 200 additions & 0 deletions examples/capture_hi_res.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
// A demonstration of drawing to a very large texture, capturing the texture in its original size
// as a PNG and displaying a down-scaled version of the image within the window each frame.

use nannou::prelude::*;

fn main() {
nannou::app(model).update(update).exit(exit).run();
}

struct Model {
// The texture that we will draw to.
texture: wgpu::Texture,
// Create a `Draw` instance for drawing to our texture.
draw: nannou::Draw,
// The type used to render the `Draw` vertices to our texture.
renderer: nannou::draw::Renderer,
// The type used to capture the texture.
texture_capturer: wgpu::TextureCapturer,
// The type used to resize our texture to the window texture.
texture_reshaper: wgpu::TextureReshaper,
}

fn model(app: &App) -> Model {
// Lets write to a 4K UHD texture.
let texture_size = [3_840, 2_160];

// Create the window.
let [win_w, win_h] = [texture_size[0] / 4, texture_size[1] / 4];
let w_id = app
.new_window()
.size(win_w, win_h)
.title("nannou")
.view(view)
.build()
.unwrap();
let window = app.window(w_id).unwrap();

// Retrieve the wgpu device.
let device = window.swap_chain_device();

// Create our custom texture.
let sample_count = window.msaa_samples();
let texture = wgpu::TextureBuilder::new()
.size(texture_size)
// Our texture will be used as the OUTPUT_ATTACHMENT for our `Draw` render pass.
// It will also be SAMPLED by the `TextureCapturer` and `TextureResizer`.
.usage(wgpu::TextureUsage::OUTPUT_ATTACHMENT | wgpu::TextureUsage::SAMPLED)
// Use nannou's default multisampling sample count.
.sample_count(sample_count)
// Use a spacious 16-bit linear sRGBA format suitable for high quality drawing.
.format(wgpu::TextureFormat::Rgba16Unorm)
// Build it!
.build(device);

// Create our `Draw` instance and a renderer for it.
let draw = nannou::Draw::new();
let descriptor = texture.descriptor();
let renderer = nannou::draw::Renderer::from_texture_descriptor(device, descriptor);

// Create the texture capturer.
let texture_capturer = wgpu::TextureCapturer::with_num_threads(4);

// Create the texture reshaper.
let texture_view = texture.create_default_view();
let src_multisampled = texture.sample_count() > 1;
let dst_format = Frame::TEXTURE_FORMAT;
let texture_reshaper = wgpu::TextureReshaper::new(
device,
&texture_view,
src_multisampled,
sample_count,
dst_format,
);

// Make sure the directory where we will save images to exists.
std::fs::create_dir_all(&capture_directory(app)).unwrap();

Model {
texture,
draw,
renderer,
texture_capturer,
texture_reshaper,
}
}

fn update(app: &App, model: &mut Model, _update: Update) {
// First, reset the `draw` state.
let draw = &model.draw;
draw.reset();

// Create a `Rect` for our texture to help with drawing.
let [w, h] = model.texture.size();
let r = geom::Rect::from_w_h(w as f32, h as f32);

// Use the frame number to animate, ensuring we get a constant update time.
let elapsed_frames = app.main_window().elapsed_frames();
let t = elapsed_frames as f32 / 60.0;

// Draw like we normally would in the `view`.
draw.background().color(BLACK);
let n_points = 10;
let weight = 8.0;
let hz = 6.0;
let vertices = (0..n_points)
.map(|i| {
let x = map_range(i, 0, n_points - 1, r.left(), r.right());
let fract = i as f32 / n_points as f32;
let amp = (t + fract * hz * TAU).sin();
let y = map_range(amp, -1.0, 1.0, r.bottom() * 0.75, r.top() * 0.75);
pt2(x, y)
})
.enumerate()
.map(|(i, p)| {
let fract = i as f32 / n_points as f32;
let r = (t + fract) % 1.0;
let g = (t + 1.0 - fract) % 1.0;
let b = (t + 0.5 + fract) % 1.0;
let rgba = srgba(r, g, b, 1.0);
(p, rgba)
});
draw.polyline()
.weight(weight)
.join_round()
.colored_points(vertices);

// Draw frame number and size in bottom left.
let string = format!("Frame {} - {:?}", elapsed_frames, [w, h]);
let text = text(&string)
.font_size(48)
.left_justify()
.align_bottom()
.build(r.pad(r.h() * 0.05));
draw.path().fill().color(WHITE).events(text.path_events());

// Render our drawing to the texture.
let window = app.main_window();
let device = window.swap_chain_device();
let ce_desc = wgpu::CommandEncoderDescriptor::default();
let mut encoder = device.create_command_encoder(&ce_desc);
model
.renderer
.render_to_texture(device, &mut encoder, draw, &model.texture);

// Take a snapshot of the texture. The capturer will do the following:
//
// 1. Resolve the texture to a non-multisampled texture if necessary.
// 2. Convert the format to non-linear 8-bit sRGBA ready for image storage.
// 3. Copy the result to a buffer ready to be mapped for reading.
let snapshot = model
.texture_capturer
.capture(device, &mut encoder, &model.texture);

// Submit the commands for our drawing and texture capture to the GPU.
window
.swap_chain_queue()
.lock()
.unwrap()
.submit(&[encoder.finish()]);

// Submit a function for writing our snapshot to a PNG.
//
//
// NOTE: It is essential that the commands for capturing the snapshot are `submit`ted before we
// attempt to read the snapshot - otherwise we will read a blank texture!
//
// NOTE: You can also use `read` instead of `read_threaded` if you want to read the texture on
// the current thread. This will slow down the main thread, but will allow the PNG writing to
// keep up with the main thread.
let path = capture_directory(app)
.join(elapsed_frames.to_string())
.with_extension("png");
snapshot.read_threaded(move |result| {
let image = result.expect("failed to map texture memory");
image
.save(&path)
.expect("failed to save texture to png image");
});
}

// Draw the state of your `Model` into the given `Frame` here.
fn view(_app: &App, model: &Model, frame: Frame) {
// Sample the texture and write it to the frame.
let mut encoder = frame.command_encoder();
model
.texture_reshaper
.encode_render_pass(frame.texture_view(), &mut *encoder);
}

// Wait for capture to finish.
fn exit(_app: &App, model: Model) {
println!("Waiting for PNG writing to complete...");
model.texture_capturer.finish();
println!("Done!");
}

// The directory where we'll save the frames.
fn capture_directory(app: &App) -> std::path::PathBuf {
app.project_dir().join(app.exe_name().unwrap())
}
56 changes: 56 additions & 0 deletions examples/simple_capture.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// This example is a copy of the `simple_draw.rs` example, but captures each frame and writes them
// as a PNG image file to `/<path_to_nannou>/nannou/simple_capture/<frame_number>.png`

use nannou::prelude::*;

fn main() {
nannou::sketch(view);
}

fn view(app: &App, frame: Frame) {
let draw = app.draw();

draw.background().color(CORNFLOWERBLUE);

let win = app.window_rect();
draw.tri()
.points(win.bottom_left(), win.top_left(), win.top_right())
.color(VIOLET);

let t = app.time;
draw.ellipse()
.x_y(app.mouse.x * t.cos(), app.mouse.y)
.radius(win.w() * 0.125 * t.sin())
.color(RED);

draw.line()
.weight(10.0 + (t.sin() * 0.5 + 0.5) * 90.0)
.caps_round()
.color(PALEGOLDENROD)
.points(win.top_left() * t.sin(), win.bottom_right() * t.cos());

draw.quad()
.x_y(-app.mouse.x, app.mouse.y)
.color(DARKGREEN)
.rotate(t);

draw.rect()
.x_y(app.mouse.y, app.mouse.x)
.w(app.mouse.x * 0.25)
.hsv(t, 1.0, 1.0);

draw.to_frame(app, &frame).unwrap();

// Create a path that we want to save this frame to.
let file_path = app
.project_dir()
// Capture all frames to a directory called `/<path_to_nannou>/nannou/simple_capture`.
.join(app.exe_name().unwrap())
// Name each file after the number of the frame.
.join(frame.nth().to_string())
// The extension will be PNG. We also support tiff, bmp, gif, jpeg, webp and some others.
.with_extension("png");

// Capture the frame!
app.main_window().capture_frame(file_path);
}
2 changes: 1 addition & 1 deletion examples/wgpu/wgpu_teapot/wgpu_teapot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ fn view(app: &App, model: &Model, frame: Frame) {
let depth_size = g.depth_texture.size();
let frame_size = frame.texture_size();
let device = frame.device_queue_pair().device();
if frame_size != [depth_size.width, depth_size.height] {
if frame_size != depth_size {
let depth_format = g.depth_texture.format();
let sample_count = frame.texture_msaa_samples();
g.depth_texture = create_depth_texture(device, frame_size, depth_format, sample_count);
Expand Down
2 changes: 1 addition & 1 deletion examples/wgpu/wgpu_teapot_camera/wgpu_teapot_camera.rs
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ fn view(_app: &App, model: &Model, frame: Frame) {
let depth_size = g.depth_texture.size();
let frame_size = frame.texture_size();
let device = frame.device_queue_pair().device();
if frame_size != [depth_size.width, depth_size.height] {
if frame_size != depth_size {
let depth_format = g.depth_texture.format();
let sample_count = frame.texture_msaa_samples();
g.depth_texture = create_depth_texture(device, frame_size, depth_format, sample_count);
Expand Down
Loading

0 comments on commit 2167d3f

Please sign in to comment.