Skip to content

Commit

Permalink
fix: handle Queue::submit non-fatally
Browse files Browse the repository at this point in the history
* Change the signature of `wgpu_core::Global::queue_submit` to return
  a `(SubmissionIndex, …)` in addition to its current error type.
* Change the control flow of errors in `Queue::submit` to break to the
  end of a block. This is similar to what we already do in many APIs in
  `wgpu_core`.
* Hoist the scope of the local `submit_index` binding so it can be used
  at the point where we need to convert current error paths to also
  return the submission index.

Later, we will likely want to avoid actually retrieving a new submission
index so we can minimize the critical section of code. We'll need to
figure out a strategy for returning a valid (but not necessarily unique)
index in the case of failures that prevent successful submission.
  • Loading branch information
ErichDonGubler committed Sep 24, 2024
1 parent 859dd88 commit 6f4f701
Show file tree
Hide file tree
Showing 5 changed files with 295 additions and 197 deletions.
2 changes: 1 addition & 1 deletion deno_webgpu/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ pub fn op_webgpu_queue_submit(
})
.collect::<Result<Vec<_>, AnyError>>()?;

let maybe_err = instance.queue_submit(queue, &ids).err();
let maybe_err = instance.queue_submit(queue, &ids).err().map(|(_idx, e)| e);

for rid in command_buffers {
let resource = state.resource_table.take::<WebGpuCommandBuffer>(rid)?;
Expand Down
58 changes: 58 additions & 0 deletions tests/tests/regression/issue_6317.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
use wgpu::{DownlevelFlags, Limits};
use wgpu_macros::gpu_test;
use wgpu_test::{fail, GpuTestConfiguration, TestParameters};

#[gpu_test]
static NON_FATAL_ERRORS_IN_QUEUE_SUBMIT: GpuTestConfiguration = GpuTestConfiguration::new()
.parameters(
TestParameters::default()
.downlevel_flags(DownlevelFlags::COMPUTE_SHADERS)
.limits(Limits::downlevel_defaults()),
)
.run_sync(|ctx| {
let shader_with_trivial_bind_group = concat!(
"@group(0) @binding(0) var<storage, read_write> stuff: u32;\n",
"\n",
"@compute @workgroup_size(1) fn main() { stuff = 2u; }\n"
);

let module = ctx
.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(shader_with_trivial_bind_group.into()),
});

let compute_pipeline =
ctx.device
.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: None,
layout: None,
module: &module,
entry_point: None,
compilation_options: Default::default(),
cache: Default::default(),
});

fail(
&ctx.device,
|| {
let mut command_encoder = ctx.device.create_command_encoder(&Default::default());
{
let mut render_pass = command_encoder.begin_compute_pass(&Default::default());
render_pass.set_pipeline(&compute_pipeline);

// NOTE: We deliberately don't set a bind group here, to provoke a validation
// error.

render_pass.dispatch_workgroups(1, 1, 1);
}

let _idx = ctx.queue.submit([command_encoder.finish()]);
},
Some(concat!(
"The current set ComputePipeline with '' label ",
"expects a BindGroup to be set at index 0"
)),
)
});
1 change: 1 addition & 0 deletions tests/tests/root.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ mod regression {
mod issue_4485;
mod issue_4514;
mod issue_5553;
mod issue_6317;
}

mod bgra8unorm_storage;
Expand Down
Loading

0 comments on commit 6f4f701

Please sign in to comment.