diff --git a/wgpu-hal/src/gles/adapter.rs b/wgpu-hal/src/gles/adapter.rs index 726d794bf10..4f731fef8da 100644 --- a/wgpu-hal/src/gles/adapter.rs +++ b/wgpu-hal/src/gles/adapter.rs @@ -163,7 +163,10 @@ impl super::Adapter { } } - pub(super) unsafe fn expose(gl: glow::Context) -> Option> { + pub(super) unsafe fn expose( + context: super::AdapterContext, + ) -> Option> { + let gl = context.lock(); let extensions = gl.supported_extensions(); let (vendor_const, renderer_const) = if extensions.contains("WEBGL_debug_renderer_info") { @@ -333,10 +336,13 @@ impl super::Adapter { let downlevel_defaults = wgt::DownlevelLimits {}; + // Drop the GL guard so we can move the context into AdapterShared + drop(gl); + Some(crate::ExposedAdapter { adapter: super::Adapter { shared: Arc::new(super::AdapterShared { - context: gl, + context, private_caps, workarounds, shading_language_version, @@ -401,7 +407,7 @@ impl crate::Adapter for super::Adapter { &self, features: wgt::Features, ) -> Result, crate::DeviceError> { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); gl.pixel_store_i32(glow::UNPACK_ALIGNMENT, 1); gl.pixel_store_i32(glow::PACK_ALIGNMENT, 1); let main_vao = gl diff --git a/wgpu-hal/src/gles/device.rs b/wgpu-hal/src/gles/device.rs index 1450c118420..bf47814ded9 100644 --- a/wgpu-hal/src/gles/device.rs +++ b/wgpu-hal/src/gles/device.rs @@ -73,12 +73,11 @@ impl CompilationContext<'_> { impl super::Device { unsafe fn compile_shader( - &self, + gl: &glow::Context, shader: &str, naga_stage: naga::ShaderStage, label: Option<&str>, ) -> Result { - let gl = &self.shared.context; let target = match naga_stage { naga::ShaderStage::Vertex => glow::VERTEX_SHADER, naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER, @@ -111,7 +110,7 @@ impl super::Device { } fn create_shader( - &self, + gl: &glow::Context, naga_stage: naga::ShaderStage, stage: &crate::ProgrammableStage, context: CompilationContext, @@ -156,16 +155,16 @@ impl super::Device { reflection_info, ); - unsafe { self.compile_shader(&output, naga_stage, stage.module.label.as_deref()) } + unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) } } unsafe fn create_pipeline<'a, I: Iterator>>( &self, + gl: &glow::Context, shaders: I, layout: &super::PipelineLayout, label: crate::Label, ) -> Result { - let gl = &self.shared.context; let program = gl.create_program().unwrap(); if let Some(label) = label { if gl.supports_debug() { @@ -186,7 +185,7 @@ impl super::Device { name_binding_map: &mut name_binding_map, }; - let shader = self.create_shader(naga_stage, stage, context)?; + let shader = Self::create_shader(gl, naga_stage, stage, context)?; shaders_to_delete.push(shader); } @@ -199,7 +198,7 @@ impl super::Device { let shader_src = format!("#version {} es \n void main(void) {{}}", version,); log::info!("Only vertex shader is present. Creating an empty fragment shader",); let shader = - self.compile_shader(&shader_src, naga::ShaderStage::Fragment, Some("_dummy"))?; + Self::compile_shader(gl, &shader_src, naga::ShaderStage::Fragment, Some("_dummy"))?; shaders_to_delete.push(shader); } @@ -292,7 +291,7 @@ impl super::Device { impl crate::Device for super::Device { unsafe fn exit(self, queue: super::Queue) { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); gl.delete_vertex_array(self.main_vao); gl.delete_framebuffer(queue.draw_fbo); gl.delete_framebuffer(queue.copy_fbo); @@ -303,7 +302,7 @@ impl crate::Device for super::Device { &self, desc: &crate::BufferDescriptor, ) -> Result { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); let target = if desc.usage.contains(crate::BufferUses::INDEX) { glow::ELEMENT_ARRAY_BUFFER @@ -360,7 +359,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_buffer(&self, buffer: super::Buffer) { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); gl.delete_buffer(buffer.raw); } @@ -369,7 +368,7 @@ impl crate::Device for super::Device { buffer: &super::Buffer, range: crate::MemoryRange, ) -> Result { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0; @@ -388,7 +387,7 @@ impl crate::Device for super::Device { }) } unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); gl.bind_buffer(buffer.target, Some(buffer.raw)); gl.unmap_buffer(buffer.target); gl.bind_buffer(buffer.target, None); @@ -398,7 +397,7 @@ impl crate::Device for super::Device { where I: Iterator, { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); gl.bind_buffer(buffer.target, Some(buffer.raw)); for range in ranges { gl.flush_mapped_buffer_range( @@ -416,7 +415,7 @@ impl crate::Device for super::Device { &self, desc: &crate::TextureDescriptor, ) -> Result { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); let render_usage = crate::TextureUses::COLOR_TARGET | crate::TextureUses::DEPTH_STENCIL_WRITE @@ -559,7 +558,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_texture(&self, texture: super::Texture) { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); match texture.inner { super::TextureInner::Renderbuffer { raw, .. } => { gl.delete_renderbuffer(raw); @@ -600,7 +599,7 @@ impl crate::Device for super::Device { &self, desc: &crate::SamplerDescriptor, ) -> Result { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); let raw = gl.create_sampler().unwrap(); @@ -667,7 +666,7 @@ impl crate::Device for super::Device { Ok(super::Sampler { raw }) } unsafe fn destroy_sampler(&self, sampler: super::Sampler) { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); gl.delete_sampler(sampler.raw); } @@ -862,12 +861,13 @@ impl crate::Device for super::Device { &self, desc: &crate::RenderPipelineDescriptor, ) -> Result { + let gl = &self.shared.context.lock(); let shaders = iter::once((naga::ShaderStage::Vertex, &desc.vertex_stage)).chain( desc.fragment_stage .as_ref() .map(|fs| (naga::ShaderStage::Fragment, fs)), ); - let inner = self.create_pipeline(shaders, desc.layout, desc.label)?; + let inner = self.create_pipeline(gl, shaders, desc.layout, desc.label)?; let (vertex_buffers, vertex_attributes) = { let mut buffers = Vec::new(); @@ -925,7 +925,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); gl.delete_program(pipeline.inner.program); } @@ -933,13 +933,14 @@ impl crate::Device for super::Device { &self, desc: &crate::ComputePipelineDescriptor, ) -> Result { + let gl = &self.shared.context.lock(); let shaders = iter::once((naga::ShaderStage::Compute, &desc.stage)); - let inner = self.create_pipeline(shaders, desc.layout, desc.label)?; + let inner = self.create_pipeline(gl, shaders, desc.layout, desc.label)?; Ok(super::ComputePipeline { inner }) } unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); gl.delete_program(pipeline.inner.program); } @@ -948,7 +949,7 @@ impl crate::Device for super::Device { desc: &wgt::QuerySetDescriptor, ) -> Result { use std::fmt::Write; - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); let mut temp_string = String::new(); let mut queries = Vec::with_capacity(desc.count as usize); @@ -975,7 +976,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_query_set(&self, set: super::QuerySet) { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); for &query in set.queries.iter() { gl.delete_query(query); } @@ -987,7 +988,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_fence(&self, fence: super::Fence) { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); for (_, sync) in fence.pending { gl.delete_sync(sync); } @@ -996,7 +997,7 @@ impl crate::Device for super::Device { &self, fence: &super::Fence, ) -> Result { - Ok(fence.get_latest(&self.shared.context)) + Ok(fence.get_latest(&self.shared.context.lock())) } unsafe fn wait( &self, @@ -1005,7 +1006,7 @@ impl crate::Device for super::Device { timeout_ms: u32, ) -> Result { if fence.last_completed < wait_value { - let gl = &self.shared.context; + let gl = &self.shared.context.lock(); let timeout_ns = (timeout_ms as u64 * 1_000_000).min(!0u32 as u64); let &(_, sync) = fence .pending diff --git a/wgpu-hal/src/gles/egl.rs b/wgpu-hal/src/gles/egl.rs index f8686a19e38..844ced21f32 100644 --- a/wgpu-hal/src/gles/egl.rs +++ b/wgpu-hal/src/gles/egl.rs @@ -1,8 +1,11 @@ use glow::HasContext; -use parking_lot::Mutex; +use parking_lot::{Mutex, MutexGuard}; use std::{ffi::CStr, os::raw, ptr, sync::Arc}; +/// The amount of time to wait while trying to obtain a lock to the adapter context +const CONTEXT_LOCK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(1); + const EGL_CONTEXT_FLAGS_KHR: i32 = 0x30FC; const EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR: i32 = 0x0001; const EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT: i32 = 0x30BF; @@ -225,6 +228,92 @@ enum SrgbFrameBufferKind { Khr, } +/// A wrapper around a [`glow::Context`] and the required EGL context that uses locking to guarantee +/// exclusive access when shared with multiple threads. +pub struct AdapterContext { + glow_context: Mutex, + egl: Arc>, + egl_display: egl::Display, + egl_context: egl::Context, + egl_pbuffer: Option, +} + +unsafe impl Sync for AdapterContext {} +unsafe impl Send for AdapterContext {} + +/// A guard containing a lock to an [`AdapterContext`] +pub struct AdapterContextLock<'a> { + glow_context: MutexGuard<'a, glow::Context>, + egl: &'a Arc>, + egl_display: egl::Display, + egl_pbuffer: Option, +} + +impl<'a> std::ops::Deref for AdapterContextLock<'a> { + type Target = glow::Context; + + fn deref(&self) -> &Self::Target { + &self.glow_context + } +} + +impl<'a> Drop for AdapterContextLock<'a> { + fn drop(&mut self) { + // Make the EGL context *not* current on this thread + self.egl + .make_current(self.egl_display, self.egl_pbuffer, self.egl_pbuffer, None) + .expect("Cannot make EGL context not current"); + } +} + +impl AdapterContext { + /// Get's the [`glow::Context`] without waiting for a lock + /// + /// # Safety + /// + /// This should only be called when you have manually made sure that the current thread has made + /// the EGL context current and that no other thread also has the EGL context current. + /// Additionally, you must manually make the EGL context **not** current after you are done with + /// it, so that future calls to `lock()` will not fail. + /// + /// > **Note:** Calling this function **will** still lock the [`glow::Context`] which adds an + /// > extra safe-guard against accidental concurrent access to the context. + pub unsafe fn get_without_egl_lock(&self) -> MutexGuard { + self.glow_context + .try_lock_for(CONTEXT_LOCK_TIMEOUT) + .expect("Could not lock adapter context. This is most-likely a deadlcok.") + } + + /// Obtain a lock to the EGL context and get handle to the [`glow::Context`] that can be used to + /// do rendering. + #[track_caller] + pub fn lock<'a>(&'a self) -> AdapterContextLock<'a> { + let glow_context = self + .glow_context + // Don't lock forever. If it takes longer than 1 second to get the lock we've got a + // deadlock and should panic to show where we got stuck + .try_lock_for(CONTEXT_LOCK_TIMEOUT) + .expect("Could not lock adapter context. This is most-likely a deadlcok."); + + // Make the EGL context current on this thread + self.egl + .make_current( + self.egl_display, + self.egl_pbuffer, + self.egl_pbuffer, + Some(self.egl_context), + ) + .expect("Cannot make EGL context current"); + + AdapterContextLock { + glow_context, + egl: &self.egl, + egl_display: self.egl_display, + egl_pbuffer: self.egl_pbuffer, + } + } +} + #[derive(Debug)] struct Inner { egl: Arc>, @@ -623,6 +712,11 @@ impl crate::Instance for Instance { } } + inner + .egl + .make_current(inner.display, inner.pbuffer, inner.pbuffer, None) + .unwrap(); + Ok(Surface { egl: Arc::clone(&inner.egl), raw, @@ -684,7 +778,20 @@ impl crate::Instance for Instance { gl.debug_message_callback(gl_debug_message_callback); } - super::Adapter::expose(gl).into_iter().collect() + inner + .egl + .make_current(inner.display, inner.pbuffer, inner.pbuffer, None) + .unwrap(); + + super::Adapter::expose(AdapterContext { + glow_context: Mutex::new(gl), + egl: inner.egl.clone(), + egl_display: inner.display, + egl_context: inner.context, + egl_pbuffer: inner.pbuffer, + }) + .into_iter() + .collect() } } @@ -759,7 +866,7 @@ impl Surface { crate::SurfaceError::Lost })?; self.egl - .make_current(self.display, self.pbuffer, self.pbuffer, Some(self.context)) + .make_current(self.display, self.pbuffer, self.pbuffer, None) .map_err(|e| { log::error!("make_current(null) failed: {}", e); crate::SurfaceError::Lost @@ -791,7 +898,7 @@ impl crate::Surface for Surface { } let format_desc = device.shared.describe_texture_format(config.format); - let gl = &device.shared.context; + let gl = &device.shared.context.lock(); let renderbuffer = gl.create_renderbuffer().unwrap(); gl.bind_renderbuffer(glow::RENDERBUFFER, Some(renderbuffer)); gl.renderbuffer_storage( @@ -824,7 +931,7 @@ impl crate::Surface for Surface { } unsafe fn unconfigure(&mut self, device: &super::Device) { - let gl = &device.shared.context; + let gl = &device.shared.context.lock(); if let Some(sc) = self.swapchain.take() { gl.delete_renderbuffer(sc.renderbuffer); gl.delete_framebuffer(sc.framebuffer); diff --git a/wgpu-hal/src/gles/mod.rs b/wgpu-hal/src/gles/mod.rs index 8374505ba4d..144e9bd725e 100644 --- a/wgpu-hal/src/gles/mod.rs +++ b/wgpu-hal/src/gles/mod.rs @@ -66,7 +66,7 @@ mod device; mod queue; #[cfg(not(target_arch = "wasm32"))] -use self::egl::{Instance, Surface}; +use self::egl::{AdapterContext, Instance, Surface}; use arrayvec::ArrayVec; @@ -161,7 +161,7 @@ struct TextureFormatDesc { } struct AdapterShared { - context: glow::Context, + context: AdapterContext, private_caps: PrivateCapabilities, workarounds: Workarounds, shading_language_version: naga::back::glsl::Version, diff --git a/wgpu-hal/src/gles/queue.rs b/wgpu-hal/src/gles/queue.rs index 0f940523606..dcdbbad4dca 100644 --- a/wgpu-hal/src/gles/queue.rs +++ b/wgpu-hal/src/gles/queue.rs @@ -27,9 +27,7 @@ fn is_3d_target(target: super::BindTarget) -> bool { impl super::Queue { /// Performs a manual shader clear, used as a workaround for a clearing bug on mesa - unsafe fn perform_shader_clear(&self, draw_buffer: u32, color: [f32; 4]) { - let gl = &self.shared.context; - + unsafe fn perform_shader_clear(&self, gl: &glow::Context, draw_buffer: u32, color: [f32; 4]) { gl.use_program(Some(self.shader_clear_program)); gl.uniform_4_f32( Some(&self.shader_clear_program_color_uniform_location), @@ -56,8 +54,7 @@ impl super::Queue { } } - unsafe fn reset_state(&self) { - let gl = &self.shared.context; + unsafe fn reset_state(&self, gl: &glow::Context) { gl.use_program(None); gl.bind_framebuffer(glow::FRAMEBUFFER, None); gl.disable(glow::DEPTH_TEST); @@ -71,8 +68,13 @@ impl super::Queue { } } - unsafe fn set_attachment(&self, fbo_target: u32, attachment: u32, view: &super::TextureView) { - let gl = &self.shared.context; + unsafe fn set_attachment( + &self, + gl: &glow::Context, + fbo_target: u32, + attachment: u32, + view: &super::TextureView, + ) { match view.inner { super::TextureInner::Renderbuffer { raw } => { gl.framebuffer_renderbuffer(fbo_target, attachment, glow::RENDERBUFFER, Some(raw)); @@ -99,8 +101,13 @@ impl super::Queue { } } - unsafe fn process(&mut self, command: &C, data_bytes: &[u8], data_words: &[u32]) { - let gl = &self.shared.context; + unsafe fn process( + &mut self, + gl: &glow::Context, + command: &C, + data_bytes: &[u8], + data_words: &[u32], + ) { match *command { C::Draw { topology, @@ -551,7 +558,7 @@ impl super::Queue { attachment, ref view, } => { - self.set_attachment(glow::DRAW_FRAMEBUFFER, attachment, view); + self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, attachment, view); } C::ResolveAttachment { attachment, @@ -561,7 +568,7 @@ impl super::Queue { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.draw_fbo)); gl.read_buffer(attachment); gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.copy_fbo)); - self.set_attachment(glow::DRAW_FRAMEBUFFER, glow::COLOR_ATTACHMENT0, dst); + self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, glow::COLOR_ATTACHMENT0, dst); gl.blit_framebuffer( 0, 0, @@ -601,7 +608,7 @@ impl super::Queue { .contains(super::Workarounds::MESA_I915_SRGB_SHADER_CLEAR) && is_srgb { - self.perform_shader_clear(draw_buffer, *color); + self.perform_shader_clear(gl, draw_buffer, *color); } else { gl.clear_buffer_f32_slice(glow::COLOR, draw_buffer, color); } @@ -933,25 +940,22 @@ impl crate::Queue for super::Queue { command_buffers: &[&super::CommandBuffer], signal_fence: Option<(&mut super::Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { - self.reset_state(); + let shared = self.shared.clone(); + let gl = &shared.context.lock(); + self.reset_state(gl); for cmd_buf in command_buffers.iter() { if let Some(ref label) = cmd_buf.label { - self.shared.context.push_debug_group( - glow::DEBUG_SOURCE_APPLICATION, - DEBUG_ID, - label, - ); + gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, label); } for command in cmd_buf.commands.iter() { - self.process(command, &cmd_buf.data_bytes, &cmd_buf.data_words); + self.process(gl, command, &cmd_buf.data_bytes, &cmd_buf.data_words); } if cmd_buf.label.is_some() { - self.shared.context.pop_debug_group(); + gl.pop_debug_group(); } } if let Some((fence, value)) = signal_fence { - let gl = &self.shared.context; fence.maintain(gl); let sync = gl .fence_sync(glow::SYNC_GPU_COMMANDS_COMPLETE, 0) @@ -967,7 +971,7 @@ impl crate::Queue for super::Queue { surface: &mut super::Surface, texture: super::Texture, ) -> Result<(), crate::SurfaceError> { - let gl = &self.shared.context; + let gl = &self.shared.context.get_without_egl_lock(); surface.present(texture, gl) } }