From 77b5a864b318f841b6c76eb220895d0065b1c702 Mon Sep 17 00:00:00 2001 From: yanchith Date: Thu, 12 Dec 2019 15:43:16 +0100 Subject: [PATCH] Deduplicate Bind Group Layouts on creation To make later bind group layout compatibility checks simple (and cheaper), deduplicate them on creation. If two bind group layouts with same descriptors are requested, only one is created and the same id is returned for both. --- wgpu-core/src/binding_model.rs | 2 +- wgpu-core/src/device.rs | 9 ++++++ wgpu-core/src/hub.rs | 54 +++++++++++++++++++++------------- 3 files changed, 43 insertions(+), 22 deletions(-) diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 99342194b8d..9c219cee843 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -41,7 +41,7 @@ pub enum BindingType { } #[repr(C)] -#[derive(Clone, Debug, Hash)] +#[derive(Clone, Debug, Hash, PartialEq)] pub struct BindGroupLayoutBinding { pub binding: u32, pub visibility: ShaderStage, diff --git a/wgpu-core/src/device.rs b/wgpu-core/src/device.rs index e5ddab3cb67..a8c0bf8b8aa 100644 --- a/wgpu-core/src/device.rs +++ b/wgpu-core/src/device.rs @@ -1182,6 +1182,15 @@ impl> Global { let hub = B::hub(self); let bindings = unsafe { slice::from_raw_parts(desc.bindings, desc.bindings_length) }; + { + let (bind_group_layout_guard, _) = hub.bind_group_layouts.read(&mut token); + for (id, value) in bind_group_layout_guard.iter() { + if &value.bindings[..] == bindings { + return id; + } + } + } + let raw_bindings = bindings .iter() .map(|binding| hal::pso::DescriptorSetLayoutBinding { diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 909fb1719bb..b281ec3752d 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -89,55 +89,67 @@ impl IdentityManager { #[derive(Debug)] pub struct Storage { //TODO: consider concurrent hashmap? - map: VecMap<(T, Epoch)>, + map: VecMap<(T, Epoch, Backend)>, _phantom: PhantomData, } impl ops::Index for Storage { type Output = T; fn index(&self, id: I) -> &T { - let (index, epoch, _) = id.unzip(); - let (ref value, storage_epoch) = self.map[index as usize]; + let (index, epoch, backend) = id.unzip(); + let (ref value, storage_epoch, storage_backend) = self.map[index as usize]; assert_eq!(epoch, storage_epoch); + assert_eq!(backend, storage_backend); value } } impl ops::IndexMut for Storage { fn index_mut(&mut self, id: I) -> &mut T { - let (index, epoch, _) = id.unzip(); - let (ref mut value, storage_epoch) = self.map[index as usize]; + let (index, epoch, backend) = id.unzip(); + let (ref mut value, storage_epoch, storage_backend) = self.map[index as usize]; assert_eq!(epoch, storage_epoch); + assert_eq!(backend, storage_backend); value } } impl Storage { pub fn contains(&self, id: I) -> bool { - let (index, epoch, _) = id.unzip(); + let (index, epoch, backend) = id.unzip(); match self.map.get(index as usize) { - Some(&(_, storage_epoch)) => epoch == storage_epoch, + Some(&(_, storage_epoch, storage_backend)) => { + epoch == storage_epoch && backend == storage_backend + }, None => false, } } pub fn insert(&mut self, id: I, value: T) -> Option { - let (index, epoch, _) = id.unzip(); - let old = self.map.insert(index as usize, (value, epoch)); - old.map(|(v, _storage_epoch)| v) + let (index, epoch, backend) = id.unzip(); + let old = self.map.insert(index as usize, (value, epoch, backend)); + old.map(|(v, _storage_epoch, _storage_backend)| v) } pub fn remove(&mut self, id: I) -> Option { - let (index, epoch, _) = id.unzip(); + let (index, epoch, backend) = id.unzip(); self.map .remove(index as usize) - .map(|(value, storage_epoch)| { + .map(|(value, storage_epoch, storage_backend)| { assert_eq!(epoch, storage_epoch); + assert_eq!(backend, storage_backend); value }) } -} + pub fn iter(&self) -> impl Iterator { + self.map + .iter() + .map(|(index, (value, storage_epoch, storage_backend))| { + (I::zip(index as Index, *storage_epoch, *storage_backend), value) + }) + } +} /// Type system for enforcing the lock order on shared HUB structures. /// If type A implements `Access`, that means we are allowed to proceed @@ -409,7 +421,7 @@ impl Drop for Hub { let mut devices = self.devices.data.write(); - for (_, (sampler, _)) in self.samplers.data.write().map.drain() { + for (_, (sampler, _, _)) in self.samplers.data.write().map.drain() { unsafe { devices[sampler.device_id.value] .raw @@ -418,7 +430,7 @@ impl Drop for Hub { } { let textures = self.textures.data.read(); - for (_, (texture_view, _)) in self.texture_views.data.write().map.drain() { + for (_, (texture_view, _, _)) in self.texture_views.data.write().map.drain() { match texture_view.inner { TextureViewInner::Native { raw, source_id } => { let device = &devices[textures[source_id.value].device_id.value]; @@ -430,14 +442,14 @@ impl Drop for Hub { } } } - for (_, (texture, _)) in self.textures.data.write().map.drain() { + for (_, (texture, _, _)) in self.textures.data.write().map.drain() { unsafe { devices[texture.device_id.value] .raw .destroy_image(texture.raw); } } - for (_, (buffer, _)) in self.buffers.data.write().map.drain() { + for (_, (buffer, _, _)) in self.buffers.data.write().map.drain() { //TODO: unmap if needed unsafe { devices[buffer.device_id.value] @@ -445,12 +457,12 @@ impl Drop for Hub { .destroy_buffer(buffer.raw); } } - for (_, (command_buffer, _)) in self.command_buffers.data.write().map.drain() { + for (_, (command_buffer, _, _)) in self.command_buffers.data.write().map.drain() { devices[command_buffer.device_id.value] .com_allocator .after_submit(command_buffer, 0); } - for (_, (bind_group, _)) in self.bind_groups.data.write().map.drain() { + for (_, (bind_group, _, _)) in self.bind_groups.data.write().map.drain() { let device = &devices[bind_group.device_id.value]; device.destroy_bind_group(bind_group); } @@ -466,7 +478,7 @@ impl Drop for Hub { // self.swap_chains // self.adapters - for (_, (device, _)) in devices.map.drain() { + for (_, (device, _, _)) in devices.map.drain() { device.dispose(); } } @@ -511,7 +523,7 @@ impl Global { } = self; drop(hubs); // destroy surfaces - for (_, (surface, _)) in surfaces.data.write().map.drain() { + for (_, (surface, _, _)) in surfaces.data.write().map.drain() { instance.destroy_surface(surface); } }