Skip to content

Commit

Permalink
Handle Multi-threaded EGL Context Access
Browse files Browse the repository at this point in the history
Implements the synchronization necessary to use the GL backend from
multiple threads.
  • Loading branch information
zicklag committed Jul 27, 2021
1 parent 9ce884c commit 00d1c17
Show file tree
Hide file tree
Showing 5 changed files with 176 additions and 58 deletions.
12 changes: 9 additions & 3 deletions wgpu-hal/src/gles/adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,10 @@ impl super::Adapter {
}
}

pub(super) unsafe fn expose(gl: glow::Context) -> Option<crate::ExposedAdapter<super::Api>> {
pub(super) unsafe fn expose(
context: super::AdapterContext,
) -> Option<crate::ExposedAdapter<super::Api>> {
let gl = context.lock();
let extensions = gl.supported_extensions();

let (vendor_const, renderer_const) = if extensions.contains("WEBGL_debug_renderer_info") {
Expand Down Expand Up @@ -333,10 +336,13 @@ impl super::Adapter {

let downlevel_defaults = wgt::DownlevelLimits {};

// Drop the GL guard so we can move the context into AdapterShared
drop(gl);

Some(crate::ExposedAdapter {
adapter: super::Adapter {
shared: Arc::new(super::AdapterShared {
context: gl,
context,
private_caps,
workarounds,
shading_language_version,
Expand Down Expand Up @@ -401,7 +407,7 @@ impl crate::Adapter<super::Api> for super::Adapter {
&self,
features: wgt::Features,
) -> Result<crate::OpenDevice<super::Api>, crate::DeviceError> {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
gl.pixel_store_i32(glow::UNPACK_ALIGNMENT, 1);
gl.pixel_store_i32(glow::PACK_ALIGNMENT, 1);
let main_vao = gl
Expand Down
53 changes: 27 additions & 26 deletions wgpu-hal/src/gles/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,11 @@ impl CompilationContext<'_> {

impl super::Device {
unsafe fn compile_shader(
&self,
gl: &glow::Context,
shader: &str,
naga_stage: naga::ShaderStage,
label: Option<&str>,
) -> Result<glow::Shader, crate::PipelineError> {
let gl = &self.shared.context;
let target = match naga_stage {
naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
Expand Down Expand Up @@ -111,7 +110,7 @@ impl super::Device {
}

fn create_shader(
&self,
gl: &glow::Context,
naga_stage: naga::ShaderStage,
stage: &crate::ProgrammableStage<super::Api>,
context: CompilationContext,
Expand Down Expand Up @@ -156,16 +155,16 @@ impl super::Device {
reflection_info,
);

unsafe { self.compile_shader(&output, naga_stage, stage.module.label.as_deref()) }
unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
}

unsafe fn create_pipeline<'a, I: Iterator<Item = ShaderStage<'a>>>(
&self,
gl: &glow::Context,
shaders: I,
layout: &super::PipelineLayout,
label: crate::Label,
) -> Result<super::PipelineInner, crate::PipelineError> {
let gl = &self.shared.context;
let program = gl.create_program().unwrap();
if let Some(label) = label {
if gl.supports_debug() {
Expand All @@ -186,7 +185,7 @@ impl super::Device {
name_binding_map: &mut name_binding_map,
};

let shader = self.create_shader(naga_stage, stage, context)?;
let shader = Self::create_shader(gl, naga_stage, stage, context)?;
shaders_to_delete.push(shader);
}

Expand All @@ -199,7 +198,7 @@ impl super::Device {
let shader_src = format!("#version {} es \n void main(void) {{}}", version,);
log::info!("Only vertex shader is present. Creating an empty fragment shader",);
let shader =
self.compile_shader(&shader_src, naga::ShaderStage::Fragment, Some("_dummy"))?;
Self::compile_shader(gl, &shader_src, naga::ShaderStage::Fragment, Some("_dummy"))?;
shaders_to_delete.push(shader);
}

Expand Down Expand Up @@ -292,7 +291,7 @@ impl super::Device {

impl crate::Device<super::Api> for super::Device {
unsafe fn exit(self, queue: super::Queue) {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
gl.delete_vertex_array(self.main_vao);
gl.delete_framebuffer(queue.draw_fbo);
gl.delete_framebuffer(queue.copy_fbo);
Expand All @@ -303,7 +302,7 @@ impl crate::Device<super::Api> for super::Device {
&self,
desc: &crate::BufferDescriptor,
) -> Result<super::Buffer, crate::DeviceError> {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();

let target = if desc.usage.contains(crate::BufferUses::INDEX) {
glow::ELEMENT_ARRAY_BUFFER
Expand Down Expand Up @@ -360,7 +359,7 @@ impl crate::Device<super::Api> for super::Device {
})
}
unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
gl.delete_buffer(buffer.raw);
}

Expand All @@ -369,7 +368,7 @@ impl crate::Device<super::Api> for super::Device {
buffer: &super::Buffer,
range: crate::MemoryRange,
) -> Result<crate::BufferMapping, crate::DeviceError> {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();

let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;

Expand All @@ -388,7 +387,7 @@ impl crate::Device<super::Api> for super::Device {
})
}
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
gl.bind_buffer(buffer.target, Some(buffer.raw));
gl.unmap_buffer(buffer.target);
gl.bind_buffer(buffer.target, None);
Expand All @@ -398,7 +397,7 @@ impl crate::Device<super::Api> for super::Device {
where
I: Iterator<Item = crate::MemoryRange>,
{
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
gl.bind_buffer(buffer.target, Some(buffer.raw));
for range in ranges {
gl.flush_mapped_buffer_range(
Expand All @@ -416,7 +415,7 @@ impl crate::Device<super::Api> for super::Device {
&self,
desc: &crate::TextureDescriptor,
) -> Result<super::Texture, crate::DeviceError> {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();

let render_usage = crate::TextureUses::COLOR_TARGET
| crate::TextureUses::DEPTH_STENCIL_WRITE
Expand Down Expand Up @@ -559,7 +558,7 @@ impl crate::Device<super::Api> for super::Device {
})
}
unsafe fn destroy_texture(&self, texture: super::Texture) {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
match texture.inner {
super::TextureInner::Renderbuffer { raw, .. } => {
gl.delete_renderbuffer(raw);
Expand Down Expand Up @@ -600,7 +599,7 @@ impl crate::Device<super::Api> for super::Device {
&self,
desc: &crate::SamplerDescriptor,
) -> Result<super::Sampler, crate::DeviceError> {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();

let raw = gl.create_sampler().unwrap();

Expand Down Expand Up @@ -667,7 +666,7 @@ impl crate::Device<super::Api> for super::Device {
Ok(super::Sampler { raw })
}
unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
gl.delete_sampler(sampler.raw);
}

Expand Down Expand Up @@ -862,12 +861,13 @@ impl crate::Device<super::Api> for super::Device {
&self,
desc: &crate::RenderPipelineDescriptor<super::Api>,
) -> Result<super::RenderPipeline, crate::PipelineError> {
let gl = &self.shared.context.lock();
let shaders = iter::once((naga::ShaderStage::Vertex, &desc.vertex_stage)).chain(
desc.fragment_stage
.as_ref()
.map(|fs| (naga::ShaderStage::Fragment, fs)),
);
let inner = self.create_pipeline(shaders, desc.layout, desc.label)?;
let inner = self.create_pipeline(gl, shaders, desc.layout, desc.label)?;

let (vertex_buffers, vertex_attributes) = {
let mut buffers = Vec::new();
Expand Down Expand Up @@ -925,21 +925,22 @@ impl crate::Device<super::Api> for super::Device {
})
}
unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
gl.delete_program(pipeline.inner.program);
}

unsafe fn create_compute_pipeline(
&self,
desc: &crate::ComputePipelineDescriptor<super::Api>,
) -> Result<super::ComputePipeline, crate::PipelineError> {
let gl = &self.shared.context.lock();
let shaders = iter::once((naga::ShaderStage::Compute, &desc.stage));
let inner = self.create_pipeline(shaders, desc.layout, desc.label)?;
let inner = self.create_pipeline(gl, shaders, desc.layout, desc.label)?;

Ok(super::ComputePipeline { inner })
}
unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
gl.delete_program(pipeline.inner.program);
}

Expand All @@ -948,7 +949,7 @@ impl crate::Device<super::Api> for super::Device {
desc: &wgt::QuerySetDescriptor<crate::Label>,
) -> Result<super::QuerySet, crate::DeviceError> {
use std::fmt::Write;
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
let mut temp_string = String::new();

let mut queries = Vec::with_capacity(desc.count as usize);
Expand All @@ -975,7 +976,7 @@ impl crate::Device<super::Api> for super::Device {
})
}
unsafe fn destroy_query_set(&self, set: super::QuerySet) {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
for &query in set.queries.iter() {
gl.delete_query(query);
}
Expand All @@ -987,7 +988,7 @@ impl crate::Device<super::Api> for super::Device {
})
}
unsafe fn destroy_fence(&self, fence: super::Fence) {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
for (_, sync) in fence.pending {
gl.delete_sync(sync);
}
Expand All @@ -996,7 +997,7 @@ impl crate::Device<super::Api> for super::Device {
&self,
fence: &super::Fence,
) -> Result<crate::FenceValue, crate::DeviceError> {
Ok(fence.get_latest(&self.shared.context))
Ok(fence.get_latest(&self.shared.context.lock()))
}
unsafe fn wait(
&self,
Expand All @@ -1005,7 +1006,7 @@ impl crate::Device<super::Api> for super::Device {
timeout_ms: u32,
) -> Result<bool, crate::DeviceError> {
if fence.last_completed < wait_value {
let gl = &self.shared.context;
let gl = &self.shared.context.lock();
let timeout_ns = (timeout_ms as u64 * 1_000_000).min(!0u32 as u64);
let &(_, sync) = fence
.pending
Expand Down
Loading

0 comments on commit 00d1c17

Please sign in to comment.