Skip to content

Commit

Permalink
Deduplicate Bind Group Layouts on creation
Browse files Browse the repository at this point in the history
To make later bind group layout compatibility checks simple (and
cheaper), deduplicate them on creation. If two bind group layouts with
same descriptors are requested, only one is created and the same id is
returned for both.
  • Loading branch information
yanchith committed Dec 12, 2019
1 parent 6b097ab commit 77b5a86
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 22 deletions.
2 changes: 1 addition & 1 deletion wgpu-core/src/binding_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ pub enum BindingType {
}

#[repr(C)]
#[derive(Clone, Debug, Hash)]
#[derive(Clone, Debug, Hash, PartialEq)]
pub struct BindGroupLayoutBinding {
pub binding: u32,
pub visibility: ShaderStage,
Expand Down
9 changes: 9 additions & 0 deletions wgpu-core/src/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1182,6 +1182,15 @@ impl<F: IdentityFilter<BindGroupLayoutId>> Global<F> {
let hub = B::hub(self);
let bindings = unsafe { slice::from_raw_parts(desc.bindings, desc.bindings_length) };

{
let (bind_group_layout_guard, _) = hub.bind_group_layouts.read(&mut token);
for (id, value) in bind_group_layout_guard.iter() {
if &value.bindings[..] == bindings {
return id;
}
}
}

let raw_bindings = bindings
.iter()
.map(|binding| hal::pso::DescriptorSetLayoutBinding {
Expand Down
54 changes: 33 additions & 21 deletions wgpu-core/src/hub.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,55 +89,67 @@ impl IdentityManager {
#[derive(Debug)]
pub struct Storage<T, I: TypedId> {
//TODO: consider concurrent hashmap?
map: VecMap<(T, Epoch)>,
map: VecMap<(T, Epoch, Backend)>,
_phantom: PhantomData<I>,
}

impl<T, I: TypedId> ops::Index<I> for Storage<T, I> {
type Output = T;
fn index(&self, id: I) -> &T {
let (index, epoch, _) = id.unzip();
let (ref value, storage_epoch) = self.map[index as usize];
let (index, epoch, backend) = id.unzip();
let (ref value, storage_epoch, storage_backend) = self.map[index as usize];
assert_eq!(epoch, storage_epoch);
assert_eq!(backend, storage_backend);
value
}
}

impl<T, I: TypedId> ops::IndexMut<I> for Storage<T, I> {
fn index_mut(&mut self, id: I) -> &mut T {
let (index, epoch, _) = id.unzip();
let (ref mut value, storage_epoch) = self.map[index as usize];
let (index, epoch, backend) = id.unzip();
let (ref mut value, storage_epoch, storage_backend) = self.map[index as usize];
assert_eq!(epoch, storage_epoch);
assert_eq!(backend, storage_backend);
value
}
}

impl<T, I: TypedId> Storage<T, I> {
pub fn contains(&self, id: I) -> bool {
let (index, epoch, _) = id.unzip();
let (index, epoch, backend) = id.unzip();
match self.map.get(index as usize) {
Some(&(_, storage_epoch)) => epoch == storage_epoch,
Some(&(_, storage_epoch, storage_backend)) => {
epoch == storage_epoch && backend == storage_backend
},
None => false,
}
}

pub fn insert(&mut self, id: I, value: T) -> Option<T> {
let (index, epoch, _) = id.unzip();
let old = self.map.insert(index as usize, (value, epoch));
old.map(|(v, _storage_epoch)| v)
let (index, epoch, backend) = id.unzip();
let old = self.map.insert(index as usize, (value, epoch, backend));
old.map(|(v, _storage_epoch, _storage_backend)| v)
}

pub fn remove(&mut self, id: I) -> Option<T> {
let (index, epoch, _) = id.unzip();
let (index, epoch, backend) = id.unzip();
self.map
.remove(index as usize)
.map(|(value, storage_epoch)| {
.map(|(value, storage_epoch, storage_backend)| {
assert_eq!(epoch, storage_epoch);
assert_eq!(backend, storage_backend);
value
})
}
}

pub fn iter(&self) -> impl Iterator<Item = (I, &T)> {
self.map
.iter()
.map(|(index, (value, storage_epoch, storage_backend))| {
(I::zip(index as Index, *storage_epoch, *storage_backend), value)
})
}
}

/// Type system for enforcing the lock order on shared HUB structures.
/// If type A implements `Access<B>`, that means we are allowed to proceed
Expand Down Expand Up @@ -409,7 +421,7 @@ impl<B: hal::Backend, F> Drop for Hub<B, F> {

let mut devices = self.devices.data.write();

for (_, (sampler, _)) in self.samplers.data.write().map.drain() {
for (_, (sampler, _, _)) in self.samplers.data.write().map.drain() {
unsafe {
devices[sampler.device_id.value]
.raw
Expand All @@ -418,7 +430,7 @@ impl<B: hal::Backend, F> Drop for Hub<B, F> {
}
{
let textures = self.textures.data.read();
for (_, (texture_view, _)) in self.texture_views.data.write().map.drain() {
for (_, (texture_view, _, _)) in self.texture_views.data.write().map.drain() {
match texture_view.inner {
TextureViewInner::Native { raw, source_id } => {
let device = &devices[textures[source_id.value].device_id.value];
Expand All @@ -430,27 +442,27 @@ impl<B: hal::Backend, F> Drop for Hub<B, F> {
}
}
}
for (_, (texture, _)) in self.textures.data.write().map.drain() {
for (_, (texture, _, _)) in self.textures.data.write().map.drain() {
unsafe {
devices[texture.device_id.value]
.raw
.destroy_image(texture.raw);
}
}
for (_, (buffer, _)) in self.buffers.data.write().map.drain() {
for (_, (buffer, _, _)) in self.buffers.data.write().map.drain() {
//TODO: unmap if needed
unsafe {
devices[buffer.device_id.value]
.raw
.destroy_buffer(buffer.raw);
}
}
for (_, (command_buffer, _)) in self.command_buffers.data.write().map.drain() {
for (_, (command_buffer, _, _)) in self.command_buffers.data.write().map.drain() {
devices[command_buffer.device_id.value]
.com_allocator
.after_submit(command_buffer, 0);
}
for (_, (bind_group, _)) in self.bind_groups.data.write().map.drain() {
for (_, (bind_group, _, _)) in self.bind_groups.data.write().map.drain() {
let device = &devices[bind_group.device_id.value];
device.destroy_bind_group(bind_group);
}
Expand All @@ -466,7 +478,7 @@ impl<B: hal::Backend, F> Drop for Hub<B, F> {
// self.swap_chains
// self.adapters

for (_, (device, _)) in devices.map.drain() {
for (_, (device, _, _)) in devices.map.drain() {
device.dispose();
}
}
Expand Down Expand Up @@ -511,7 +523,7 @@ impl<F: Default> Global<F> {
} = self;
drop(hubs);
// destroy surfaces
for (_, (surface, _)) in surfaces.data.write().map.drain() {
for (_, (surface, _, _)) in surfaces.data.write().map.drain() {
instance.destroy_surface(surface);
}
}
Expand Down

0 comments on commit 77b5a86

Please sign in to comment.