diff --git a/.deny.toml b/.deny.toml index 8448c81e85..718836d0e7 100644 --- a/.deny.toml +++ b/.deny.toml @@ -4,10 +4,10 @@ skip-tree = [ # We never enable loom in any of our dependencies but it causes dupes { name = "loom", version = "0.7.2" }, { name = "windows-sys", version = "0.45" }, - { name = "winit", version = "0.27" }, { name = "winit", version = "0.29" }, { name = "rustc_version", version = "0.2.3" }, { name = "sourcemap", version = "7.1.1" }, + { name = "miniz_oxide", version = "0.7.4" }, ] skip = [ { name = "hlsl-snapshots", version = "0.1.0" }, diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 009cd30564..1cae09787e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,10 +1,17 @@ * @gfx-rs/wgpu -/cts_runner/ @gfx-rs/deno @gfx-rs/wgpu -/deno_webgpu/ @gfx-rs/deno @gfx-rs/wgpu +/cts_runner/ @crowlkats @gfx-rs/wgpu +/deno_webgpu/ @crowlkats @gfx-rs/wgpu /naga/ @gfx-rs/naga /naga-cli/ @gfx-rs/naga +# Both wgpu and naga teams are owners of naga infrastructure so +# either team can review changes to deps and docs. +naga/Cargo.toml @gfx-rs/wgpu @gfx-rs/naga +naga/README.md @gfx-rs/wgpu @gfx-rs/naga +naga/CHANGELOG.md @gfx-rs/wgpu @gfx-rs/naga +naga-cli/Cargo.toml @gfx-rs/wgpu @gfx-rs/naga + # We leave the codeowners empty for the changelog, so naga changes # don't trigger wgpu reviews and vise versa. /CHANGELOG.md diff --git a/.github/dependabot.yml b/.github/dependabot.yml index edfc210ef8..dceafcc145 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -16,7 +16,7 @@ updates: groups: patch-updates: patterns: - - "*" + - "*" update-types: - "minor" - "patch" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 78ff542a7a..9570d13b85 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,21 +13,23 @@ env: # # Sourced from https://vulkan.lunarg.com/sdk/home#linux - VULKAN_SDK_VERSION: "1.3.268" + # + # We don't include the 4th version number, as it's not used in any URL. + VULKAN_SDK_VERSION: "1.3.290" # Sourced from https://www.nuget.org/packages/Microsoft.Direct3D.WARP - WARP_VERSION: "1.0.8" + WARP_VERSION: "1.0.13" # Sourced from https://github.com/microsoft/DirectXShaderCompiler/releases # # Must also be changed in shaders.yaml - DXC_RELEASE: "v1.7.2308" - DXC_FILENAME: "dxc_2023_08_14.zip" + DXC_RELEASE: "v1.8.2407" + DXC_FILENAME: "dxc_2024_07_31_clang_cl.zip" # Sourced from https://archive.mesa3d.org/. Bumping this requires # updating the mesa build in https://github.com/gfx-rs/ci-build and creating a new release. - MESA_VERSION: "23.3.1" + MESA_VERSION: "24.2.3" # Corresponds to https://github.com/gfx-rs/ci-build/releases - CI_BINARY_BUILD: "build18" + CI_BINARY_BUILD: "build19" # We sometimes need nightly to use special things in CI. # @@ -220,20 +222,18 @@ jobs: # build docs cargo doc --target ${{ matrix.target }} --all-features --no-deps - # wgpu-core docs are not feasible due to - # - # - name: check private item docs - # if: matrix.kind == 'native' - # shell: bash - # run: | - # set -e - # - # # wgpu_core package - # cargo +${{ env.DOCS_RUST_VERSION }} doc --target ${{ matrix.target }} \ - # --package wgpu-core \ - # --package wgpu-hal \ - # --package naga \ - # --all-features --no-deps --document-private-items + - name: check private item docs + if: matrix.kind == 'native' + shell: bash + run: | + set -e + + # wgpu_core package + cargo doc --target ${{ matrix.target }} \ + --package wgpu-core \ + --package wgpu-hal \ + --package naga \ + --all-features --no-deps --document-private-items # We run minimal checks on the MSRV of the core crates, ensuring that # its dependency tree does not cause issues for firefox. @@ -313,7 +313,6 @@ jobs: rustup override set ${{ env.CORE_MSRV }} cargo -V - # Use special toolchain for rustdoc, see https://github.com/gfx-rs/wgpu/issues/4905 - name: Install Nightly Toolchain run: | rustup toolchain install ${{ env.NIGHTLY_VERSION }} --no-self-update --profile=minimal --component clippy @@ -444,7 +443,7 @@ jobs: dxc --version curl.exe -L --retry 5 https://www.nuget.org/api/v2/package/Microsoft.Direct3D.WARP/$WARP_VERSION -o warp.zip - 7z.exe e warp.zip -owarp build/native/amd64/d3d10warp.dll + 7z.exe e warp.zip -owarp build/native/bin/x64/d3d10warp.dll mkdir -p target/llvm-cov-target/debug/deps @@ -556,7 +555,7 @@ jobs: cargo llvm-cov report --lcov --output-path lcov.info - name: upload coverage report to codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 if: steps.coverage.outcome == 'success' with: files: lcov.info @@ -621,7 +620,7 @@ jobs: cargo fmt --manifest-path xtask/Cargo.toml -- --check - name: Check for typos - uses: crate-ci/typos@v1.23.6 + uses: crate-ci/typos@v1.24.6 check-cts-runner: # runtime is normally 2 minutes diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b1c83e53b6..9e5dc27165 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -41,7 +41,7 @@ jobs: if: ${{ failure() }} - name: Deploy the docs - uses: JamesIves/github-pages-deploy-action@v4.6.3 + uses: JamesIves/github-pages-deploy-action@v4.6.4 if: github.ref == 'refs/heads/trunk' with: token: ${{ secrets.WEB_DEPLOY }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index e8a6300240..266d4e7f51 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -41,7 +41,7 @@ jobs: run: cargo xtask run-wasm --no-serve - name: Deploy WebGPU examples - uses: JamesIves/github-pages-deploy-action@v4.6.3 + uses: JamesIves/github-pages-deploy-action@v4.6.4 if: github.ref == 'refs/heads/trunk' with: token: ${{ secrets.WEB_DEPLOY }} diff --git a/.github/workflows/shaders.yml b/.github/workflows/shaders.yml index 86ec5d0b2e..c326942a0e 100644 --- a/.github/workflows/shaders.yml +++ b/.github/workflows/shaders.yml @@ -9,13 +9,17 @@ on: env: # Sourced from https://vulkan.lunarg.com/sdk/home#linux - VULKAN_SDK_VERSION: "1.3.268" + # + # We don't include the 4th version number, as it's not used in any URL. + # + # Held back from 1.3.290 by https://github.com/gfx-rs/wgpu/issues/6307 + VULKAN_SDK_VERSION: "1.3.283" # Sourced from https://github.com/microsoft/DirectXShaderCompiler/releases # # Must also be changed in ci.yaml - DXC_RELEASE: "v1.7.2308" - DXC_FILENAME: "dxc_2023_08_14.zip" + DXC_RELEASE: "v1.8.2407" + DXC_FILENAME: "dxc_2024_07_31_clang_cl.zip" jobs: naga-validate-windows: diff --git a/CHANGELOG.md b/CHANGELOG.md index feb03c0c8f..225ae0d7a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,11 +27,12 @@ Top level categories: Bottom level categories: +- Naga - General - DX12 - Vulkan - Metal -- GLES +- GLES / OpenGL - WebGPU - Emscripten - Hal @@ -55,18 +56,53 @@ which we're hoping to build performance improvements upon in the future. By @wumpf in [#6069](https://github.com/gfx-rs/wgpu/pull/6069), [#6099](https://github.com/gfx-rs/wgpu/pull/6099), [#6100](https://github.com/gfx-rs/wgpu/pull/6100). +#### `wgpu`'s resources no longer have `.global_id()` getters + +`wgpu-core`'s internals no longer use nor need IDs and we are moving towards removing IDs +completely. This is a step in that direction. + +Current users of `.global_id()` are encouraged to make use of the `PartialEq`, `Eq`, `Hash`, `PartialOrd` and `Ord` +traits that have now been implemented for `wgpu` resources. + +By @teoxoy [#6134](https://github.com/gfx-rs/wgpu/pull/6134). + +#### `set_bind_group` now takes an `Option` for the bind group argument. + +https://gpuweb.github.io/gpuweb/#programmable-passes-bind-groups specifies that bindGroup +is nullable. This change is the start of implementing this part of the spec. Callers that +specify a `Some()` value should have unchanged behavior. Handling of `None` values still +needs to be implemented by backends. + +By @bradwerth [#6216](https://github.com/gfx-rs/wgpu/pull/6216). + ### New Features 64 bit integer atomic support in shaders. By @atlv24 in [#5383](https://github.com/gfx-rs/wgpu/pull/5383) #### Naga -* Support constant evaluation for `firstLeadingBit` and `firstTrailingBit` numeric built-ins in WGSL. Front-ends that translate to these built-ins also benefit from constant evaluation. By @ErichDonGubler in [#5101](https://github.com/gfx-rs/wgpu/pull/5101). +- Support constant evaluation for `firstLeadingBit` and `firstTrailingBit` numeric built-ins in WGSL. Front-ends that translate to these built-ins also benefit from constant evaluation. By @ErichDonGubler in [#5101](https://github.com/gfx-rs/wgpu/pull/5101). +- Add `first` and `either` sampling types for `@interpolate(flat, …)` in WGSL. By @ErichDonGubler in [#6181](https://github.com/gfx-rs/wgpu/pull/6181). +- Support for more atomic ops in the SPIR-V frontend. By @schell in [#5824](https://github.com/gfx-rs/wgpu/pull/5824). + +#### General + +- Add `VideoFrame` to `ExternalImageSource` enum. By @jprochazk in [#6170](https://github.com/gfx-rs/wgpu/pull/6170) + +#### Vulkan + +- Allow using [VK_GOOGLE_display_timing](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_GOOGLE_display_timing.html) unsafely with the `VULKAN_GOOGLE_DISPLAY_TIMING` feature. By @DJMcNab in [#6149](https://github.com/gfx-rs/wgpu/pull/6149) ### Bug Fixes - Fix incorrect hlsl image output type conversion. By @atlv24 in [#6123](https://github.com/gfx-rs/wgpu/pull/6123) +#### Naga + +- Accept only `vec3` (not `vecN`) for the `cross` built-in. By @ErichDonGubler in [#6171](https://github.com/gfx-rs/wgpu/pull/6171). +- Configure `SourceLanguage` when enabling debug info in SPV-out. By @kvark in [#6256](https://github.com/gfx-rs/wgpu/pull/6256) +- Per-polygon and flat inputs should not be considered subgroup uniform. By @magcius in [#6276](https://github.com/gfx-rs/wgpu/pull/6276). + #### General - If GL context creation fails retry with GLES. By @Rapdorian in [#5996](https://github.com/gfx-rs/wgpu/pull/5996) @@ -78,21 +114,52 @@ By @wumpf in [#6069](https://github.com/gfx-rs/wgpu/pull/6069), [#6099](https:// - Deduplicate bind group layouts that are created from pipelines with "auto" layouts. By @teoxoy [#6049](https://github.com/gfx-rs/wgpu/pull/6049) - Fix crash when dropping the surface after the device. By @wumpf in [#6052](https://github.com/gfx-rs/wgpu/pull/6052) - Fix error message that is thrown in create_render_pass to no longer say `compute_pass`. By @matthew-wong1 [#6041](https://github.com/gfx-rs/wgpu/pull/6041) +- Document `wgpu_hal` bounds-checking promises, and adapt `wgpu_core`'s lazy initialization logic to the slightly weaker-than-expected guarantees. By @jimblandy in [#6201](https://github.com/gfx-rs/wgpu/pull/6201) +- Raise validation error instead of panicking in `{Render,Compute}Pipeline::get_bind_group_layout` on native / WebGL. By @bgr360 in [#6280](https://github.com/gfx-rs/wgpu/pull/6280). +- **BREAKING**: Remove the last exposed C symbols in project, located in `wgpu_core::render::bundle::bundle_ffi`, to allow multiple versions of WGPU to compile together. By @ErichDonGubler in [#6272](https://github.com/gfx-rs/wgpu/pull/6272). + +#### GLES / OpenGL + +- Fix GL debug message callbacks not being properly cleaned up (causing UB). By @Imberflur in [#6114](https://github.com/gfx-rs/wgpu/pull/6114) + +#### WebGPU + +- Fix JS `TypeError` exception in `Instance::request_adapter` when browser doesn't support WebGPU but `wgpu` not compiled with `webgl` support. By @bgr360 in [#6197](https://github.com/gfx-rs/wgpu/pull/6197). + +#### Vulkan + +- Vulkan debug labels assumed no interior nul byte. By @DJMcNab in [#6257](https://github.com/gfx-rs/wgpu/pull/6257) +- Add `.index_type(vk::IndexType::NONE_KHR)` when creating `AccelerationStructureGeometryTrianglesDataKHR` in the raytraced triangle example to prevent a validation error. By @Vecvec in [#6282](https://github.com/gfx-rs/wgpu/pull/6282) ### Changes +- `wgpu_hal::gles::Adapter::new_external` now requires the context to be current when dropping the adapter and related objects. By @Imberflur in [#6114](https://github.com/gfx-rs/wgpu/pull/6114). - Reduce the amount of debug and trace logs emitted by wgpu-core and wgpu-hal. By @nical in [#6065](https://github.com/gfx-rs/wgpu/issues/6065) -- `Rg11b10Float` is renamed to `Rg11b10UFloat`. By @sagudev in [#6108](https://github.com/gfx-rs/wgpu/pull/6108) +- `Rg11b10Float` is renamed to `Rg11b10Ufloat`. By @sagudev in [#6108](https://github.com/gfx-rs/wgpu/pull/6108) + +#### HAL + +- Change the inconsistent `DropGuard` based API on Vulkan and GLES to a consistent, callback-based one. By @jerzywilczek in [#6164](https://github.com/gfx-rs/wgpu/pull/6164) + +### Documentation + +- Removed some OpenGL and Vulkan references from `wgpu-types` documentation. Fixed Storage texel types in examples. By @Nelarius in [#6271](https://github.com/gfx-rs/wgpu/pull/6271) ### Dependency Updates #### GLES - Replace `winapi` code in WGL wrapper to use the `windows` crate. By @MarijnS95 in [#6006](https://github.com/gfx-rs/wgpu/pull/6006) +- Update `glutin` to `0.31` with `glutin-winit` crate. By @MarijnS95 in [#6150](https://github.com/gfx-rs/wgpu/pull/6150) and [#6176](https://github.com/gfx-rs/wgpu/pull/6176) +- Implement `Adapter::new_external()` for WGL (just like EGL) to import an external OpenGL ES context. By @MarijnS95 in [#6152](https://github.com/gfx-rs/wgpu/pull/6152) #### DX12 -- Replace `winapi` code to use the `windows` crate. By @MarijnS95 in [#5956](https://github.com/gfx-rs/wgpu/pull/5956) +- Replace `winapi` code to use the `windows` crate. By @MarijnS95 in [#5956](https://github.com/gfx-rs/wgpu/pull/5956) and [#6173](https://github.com/gfx-rs/wgpu/pull/6173) + +#### HAL + +- Update `parking_lot` to `0.12`. By @mahkoh in [#6287](https://github.com/gfx-rs/wgpu/pull/6287) ## 22.0.0 (2024-07-17) diff --git a/Cargo.lock b/Cargo.lock index 17bbe72b55..f7a1591099 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20,9 +20,9 @@ checksum = "c71b1793ee61086797f5c80b6efa2b8ffa6d5dd703f118545808a7f2e27f7046" [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] @@ -81,10 +81,10 @@ dependencies = [ "jni-sys", "libc", "log", - "ndk 0.8.0", + "ndk", "ndk-context", - "ndk-sys 0.5.0+25.2.9519653", - "num_enum 0.7.2", + "ndk-sys", + "num_enum", "thiserror", ] @@ -160,9 +160,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arbitrary" @@ -192,7 +192,7 @@ dependencies = [ "argh_shared", "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -206,15 +206,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" - -[[package]] -name = "arrayvec" -version = "0.5.2" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -237,18 +231,18 @@ version = "0.38.0+1.3.281" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bb44936d800fea8f016d7f2311c6a4f97aebd5dc86f09906139ec848cf3a46f" dependencies = [ - "libloading 0.8.5", + "libloading", ] [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -265,17 +259,17 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide 0.8.0", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -376,9 +370,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd4c6dcc3b0aea2f5c0b4b82c2b15fe39ddbc76041a310848f4706edf76bb31" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" dependencies = [ "bytemuck_derive", ] @@ -391,7 +385,7 @@ checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -402,23 +396,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" - -[[package]] -name = "calloop" -version = "0.10.6" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e0d00eb1ea24371a97d2da6201c6747a633dc6dc1988ef503403b4c59504a8" -dependencies = [ - "bitflags 1.3.2", - "log", - "nix 0.25.1", - "slotmap", - "thiserror", - "vec_map", -] +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "calloop" @@ -440,10 +420,10 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0ea9b9476c7fad82841a8dbb380e2eae480c21910feba80725b46931ed8f02" dependencies = [ - "calloop 0.12.4", + "calloop", "rustix", "wayland-backend", - "wayland-client 0.31.2", + "wayland-client", ] [[package]] @@ -454,9 +434,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.13" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72db2f7947ecee9b03b510377e8bb9077afa27176fdbff55c51027e976fdcc48" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -519,9 +499,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", @@ -529,26 +509,26 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.1", + "strsim", ] [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -557,61 +537,6 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" -[[package]] -name = "cmake" -version = "0.1.51" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" -dependencies = [ - "cc", -] - -[[package]] -name = "cocoa" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f425db7937052c684daec3bd6375c8abe2d146dca4b8b143d6db777c39138f3a" -dependencies = [ - "bitflags 1.3.2", - "block", - "cocoa-foundation", - "core-foundation", - "core-graphics 0.22.3", - "foreign-types 0.3.2", - "libc", - "objc", -] - -[[package]] -name = "cocoa" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6140449f97a6e97f9511815c5632d84c8aacf8ac271ad77c559218161a1373c" -dependencies = [ - "bitflags 1.3.2", - "block", - "cocoa-foundation", - "core-foundation", - "core-graphics 0.23.2", - "foreign-types 0.5.0", - "libc", - "objc", -] - -[[package]] -name = "cocoa-foundation" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c6234cbb2e4c785b456c0644748b1ac416dd045799740356f8363dfe00c93f7" -dependencies = [ - "bitflags 1.3.2", - "block", - "core-foundation", - "core-graphics-types", - "libc", - "objc", -] - [[package]] name = "codespan-reporting" version = "0.11.1" @@ -634,37 +559,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" -[[package]] -name = "com" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e17887fd17353b65b1b2ef1c526c83e26cd72e74f598a8dc1bee13a48f3d9f6" -dependencies = [ - "com_macros", -] - -[[package]] -name = "com_macros" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d375883580a668c7481ea6631fc1a8863e33cc335bf56bfad8d7e6d4b04b13a5" -dependencies = [ - "com_macros_support", - "proc-macro2", - "syn 1.0.109", -] - -[[package]] -name = "com_macros_support" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad899a1087a9296d5644792d7cb72b8e34c1bec8e7d4fbc002230169a6e8710c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "combine" version = "4.6.7" @@ -706,9 +600,9 @@ dependencies = [ [[package]] name = "const_panic" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" +checksum = "7782af8f90fe69a4bb41e460abe1727d493403d8b2cc43201a3a3e906b24379f" [[package]] name = "convert_case" @@ -738,19 +632,6 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" -[[package]] -name = "core-graphics" -version = "0.22.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2581bbab3b8ffc6fcbd550bf46c355135d16e9ff2a6ea032ad6b9bf1d7efe4fb" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-graphics-types", - "foreign-types 0.3.2", - "libc", -] - [[package]] name = "core-graphics" version = "0.23.2" @@ -760,7 +641,7 @@ dependencies = [ "bitflags 1.3.2", "core-foundation", "core-graphics-types", - "foreign-types 0.5.0", + "foreign-types", "libc", ] @@ -775,18 +656,6 @@ dependencies = [ "libc", ] -[[package]] -name = "core-text" -version = "20.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9d2790b5c08465d49f8dc05c8bcae9fea467855947db39b0f8145c091aaced5" -dependencies = [ - "core-foundation", - "core-graphics 0.23.2", - "foreign-types 0.5.0", - "libc", -] - [[package]] name = "crc32fast" version = "1.4.2" @@ -857,29 +726,6 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" -[[package]] -name = "crossfont" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb5a3822b594afc99b503cc1859b94686d3c3efdd60507a28587dab80ee1071" -dependencies = [ - "cocoa 0.25.0", - "core-foundation", - "core-foundation-sys", - "core-graphics 0.23.2", - "core-text", - "dwrote", - "foreign-types 0.5.0", - "freetype-rs", - "libc", - "log", - "objc", - "once_cell", - "pkg-config", - "servo-fontconfig", - "winapi", -] - [[package]] name = "crunchy" version = "0.2.2" @@ -893,7 +739,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -910,53 +756,12 @@ dependencies = [ "tokio", ] -[[package]] -name = "cty" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35" - [[package]] name = "cursor-icon" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96a6ac251f4a2aca6b3f91340350eab87ae57c3f127ffeb585e92bd336717991" -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core", - "quote", - "syn 1.0.109", -] - [[package]] name = "data-encoding" version = "2.6.0" @@ -1000,7 +805,7 @@ dependencies = [ "futures", "libc", "log", - "memoffset 0.9.1", + "memoffset", "parking_lot", "pin-project", "serde", @@ -1031,7 +836,7 @@ dependencies = [ "quote", "strum", "strum_macros", - "syn 2.0.75", + "syn", "thiserror", ] @@ -1104,7 +909,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -1117,7 +922,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.75", + "syn", ] [[package]] @@ -1138,7 +943,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "330c60081dcc4c72131f8eb70510f1ac07223e5d4163db481a04a0befcffa412" dependencies = [ - "libloading 0.8.5", + "libloading", ] [[package]] @@ -1156,20 +961,6 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" -[[package]] -name = "dwrote" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439a1c2ba5611ad3ed731280541d36d2e9c4ac5e7fb818a27b604bdc5a6aa65b" -dependencies = [ - "lazy_static", - "libc", - "serde", - "serde_derive", - "winapi", - "wio", -] - [[package]] name = "either" version = "1.13.0" @@ -1205,7 +996,7 @@ checksum = "b36f2ddfca91251bed7f931f24b192e4eaf0a0e0fa70cf81cfb1416a1973620e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -1256,21 +1047,11 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "expat-sys" -version = "2.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658f19728920138342f68408b7cf7644d90d4784353d8ebc32e7e8663dbe45fa" -dependencies = [ - "cmake", - "pkg-config", -] - [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fdeflate" @@ -1298,9 +1079,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c0596c1eac1f9e04ed902702e9878208b336edc9d6fddc8a48387349bab3666" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", "miniz_oxide 0.8.0", @@ -1318,21 +1099,6 @@ dependencies = [ "spin", ] -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared 0.1.1", -] - [[package]] name = "foreign-types" version = "0.5.0" @@ -1340,7 +1106,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" dependencies = [ "foreign-types-macros", - "foreign-types-shared 0.3.1", + "foreign-types-shared", ] [[package]] @@ -1351,15 +1117,9 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "foreign-types-shared" version = "0.3.1" @@ -1375,28 +1135,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "freetype-rs" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74eadec9d0a5c28c54bb9882e54787275152a4e36ce206b45d7451384e5bf5fb" -dependencies = [ - "bitflags 1.3.2", - "freetype-sys", - "libc", -] - -[[package]] -name = "freetype-sys" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a37d4011c0cc628dfa766fcc195454f4b068d7afdc2adfd28861191d866e731a" -dependencies = [ - "cmake", - "libc", - "pkg-config", -] - [[package]] name = "fslock" version = "0.2.1" @@ -1476,7 +1214,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -1511,9 +1249,9 @@ dependencies = [ [[package]] name = "generator" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "979f00864edc7516466d6b3157706e06c032f22715700ddd878228a91d02bc56" +checksum = "dbb949699c3e4df3a183b1d2142cb24277057055ed23c68ed58894f76c517223" dependencies = [ "cfg-if", "libc", @@ -1547,9 +1285,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "gl_generator" @@ -1570,9 +1308,9 @@ checksum = "779ae4bf7e8421cf91c0b3b64e7e8b40b862fba4d393f59150042de7c4965a94" [[package]] name = "glow" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f865cbd94bd355b89611211e49508da98a1fce0ad755c1e8448fb96711b24528" +checksum = "2f4a888dbe8181a7535853469c21c67ca9a1cea9460b16808fc018ea9e55d248" dependencies = [ "js-sys", "slotmap", @@ -1582,65 +1320,54 @@ dependencies = [ [[package]] name = "glutin" -version = "0.29.1" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444c9ad294fdcaf20ccf6726b78f380b5450275540c9b68ab62f49726ad1c713" +checksum = "18fcd4ae4e86d991ad1300b8f57166e5be0c95ef1f63f3f5b827f8a164548746" dependencies = [ + "bitflags 2.6.0", + "cfg_aliases", "cgl", - "cocoa 0.24.1", "core-foundation", + "dispatch", "glutin_egl_sys", - "glutin_gles2_sys", - "glutin_glx_sys", - "glutin_wgl_sys 0.1.5", - "libloading 0.7.4", - "log", - "objc", + "glutin_wgl_sys 0.5.0", + "icrate", + "libloading", + "objc2", "once_cell", - "osmesa-sys", - "parking_lot", "raw-window-handle 0.5.2", - "wayland-client 0.29.5", - "wayland-egl", - "winapi", - "winit 0.27.5", -] - -[[package]] -name = "glutin_egl_sys" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68900f84b471f31ea1d1355567eb865a2cf446294f06cef8d653ed7bcf5f013d" -dependencies = [ - "gl_generator", - "winapi", + "wayland-sys", + "windows-sys 0.48.0", + "x11-dl", ] [[package]] -name = "glutin_gles2_sys" -version = "0.1.5" +name = "glutin-winit" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094e708b730a7c8a1954f4f8a31880af00eb8a1c5b5bf85d28a0a3c6d69103" +checksum = "1ebcdfba24f73b8412c5181e56f092b5eff16671c514ce896b258a0a64bd7735" dependencies = [ - "gl_generator", - "objc", + "cfg_aliases", + "glutin", + "raw-window-handle 0.5.2", + "winit", ] [[package]] -name = "glutin_glx_sys" -version = "0.1.8" +name = "glutin_egl_sys" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93d0575865098580c5b3a423188cd959419912ea60b1e48e8b3b526f6d02468" +checksum = "77cc5623f5309ef433c3dd4ca1223195347fe62c413da8e2fdd0eb76db2d9bcd" dependencies = [ "gl_generator", - "x11-dl", + "windows-sys 0.48.0", ] [[package]] name = "glutin_wgl_sys" -version = "0.1.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5951a1569dbab865c6f2a863efafff193a93caf05538d193e9e3816d21696" +checksum = "6c8098adac955faa2d31079b65dc48841251f69efd3ac25477903fc424362ead" dependencies = [ "gl_generator", ] @@ -1734,21 +1461,6 @@ dependencies = [ "allocator-api2", ] -[[package]] -name = "hassle-rs" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af2a7e73e1f34c48da31fb668a907f250794837e08faa144fd24f0b8b741e890" -dependencies = [ - "bitflags 2.6.0", - "com", - "libc", - "libloading 0.8.5", - "thiserror", - "widestring", - "winapi", -] - [[package]] name = "heck" version = "0.4.1" @@ -1813,12 +1525,6 @@ dependencies = [ "objc2", ] -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - [[package]] name = "idna" version = "0.5.0" @@ -1850,9 +1556,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "arbitrary", "equivalent", @@ -1860,18 +1566,6 @@ dependencies = [ "serde", ] -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - [[package]] name = "is-terminal" version = "0.4.13" @@ -1951,7 +1645,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6aae1df220ece3c0ada96b8153459b67eebe9ae9212258bb0134ae60416fdf76" dependencies = [ "libc", - "libloading 0.8.5", + "libloading", "pkg-config", ] @@ -1993,16 +1687,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "libloading" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" -dependencies = [ - "cfg-if", - "winapi", -] - [[package]] name = "libloading" version = "0.8.5" @@ -2047,6 +1731,15 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5" +[[package]] +name = "lock-analyzer" +version = "22.0.0" +dependencies = [ + "anyhow", + "ron", + "serde", +] + [[package]] name = "lock_api" version = "0.4.12" @@ -2102,31 +1795,13 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memmap2" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" -dependencies = [ - "libc", -] - -[[package]] -name = "memmap2" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" dependencies = [ "libc", ] -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] - [[package]] name = "memoffset" version = "0.9.1" @@ -2145,7 +1820,7 @@ dependencies = [ "bitflags 2.6.0", "block", "core-graphics-types", - "foreign-types 0.5.0", + "foreign-types", "log", "objc", "paste", @@ -2182,21 +1857,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" -dependencies = [ - "libc", - "log", - "wasi", - "windows-sys 0.48.0", -] - -[[package]] -name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", @@ -2209,7 +1872,7 @@ name = "naga" version = "22.0.0" dependencies = [ "arbitrary", - "arrayvec 0.7.6", + "arrayvec", "bit-set", "bitflags 2.6.0", "cfg_aliases", @@ -2219,6 +1882,7 @@ dependencies = [ "hexf-parse", "hlsl-snapshots", "indexmap", + "itertools", "log", "petgraph", "pp-rs", @@ -2277,20 +1941,6 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e943b2c21337b7e3ec6678500687cdc741b7639ad457f234693352075c082204" -[[package]] -name = "ndk" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "451422b7e4718271c8b5b3aadf5adedba43dc76312454b387e98fae0fc951aa0" -dependencies = [ - "bitflags 1.3.2", - "jni-sys", - "ndk-sys 0.4.1+23.1.7779620", - "num_enum 0.5.11", - "raw-window-handle 0.5.2", - "thiserror", -] - [[package]] name = "ndk" version = "0.8.0" @@ -2300,8 +1950,9 @@ dependencies = [ "bitflags 2.6.0", "jni-sys", "log", - "ndk-sys 0.5.0+25.2.9519653", - "num_enum 0.7.2", + "ndk-sys", + "num_enum", + "raw-window-handle 0.5.2", "raw-window-handle 0.6.2", "thiserror", ] @@ -2312,44 +1963,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b" -[[package]] -name = "ndk-glue" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0434fabdd2c15e0aab768ca31d5b7b333717f03cf02037d5a0a3ff3c278ed67f" -dependencies = [ - "libc", - "log", - "ndk 0.7.0", - "ndk-context", - "ndk-macro", - "ndk-sys 0.4.1+23.1.7779620", - "once_cell", - "parking_lot", -] - -[[package]] -name = "ndk-macro" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0df7ac00c4672f9d5aece54ee3347520b7e20f158656c7db2e6de01902eb7a6c" -dependencies = [ - "darling", - "proc-macro-crate 1.3.1", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ndk-sys" -version = "0.4.1+23.1.7779620" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf2aae958bd232cac5069850591667ad422d263686d75b52a065f9badeee5a3" -dependencies = [ - "jni-sys", -] - [[package]] name = "ndk-sys" version = "0.5.0+25.2.9519653" @@ -2359,31 +1972,6 @@ dependencies = [ "jni-sys", ] -[[package]] -name = "nix" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", - "memoffset 0.6.5", -] - -[[package]] -name = "nix" -version = "0.25.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "libc", - "memoffset 0.6.5", -] - [[package]] name = "noise" version = "0.8.2" @@ -2426,63 +2014,42 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi 0.3.9", - "libc", -] - -[[package]] -name = "num_enum" -version = "0.5.11" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ - "num_enum_derive 0.5.11", + "autocfg", ] [[package]] -name = "num_enum" -version = "0.7.2" +name = "num_cpus" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "num_enum_derive 0.7.2", + "hermit-abi 0.3.9", + "libc", ] [[package]] -name = "num_enum_derive" -version = "0.5.11" +name = "num_enum" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2", - "quote", - "syn 1.0.109", + "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -2542,9 +2109,9 @@ checksum = "d079845b37af429bfe5dfa76e6d087d788031045b25cfc6fd898486fd9847666" [[package]] name = "object" -version = "0.36.3" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] @@ -2570,15 +2137,6 @@ dependencies = [ "libredox", ] -[[package]] -name = "osmesa-sys" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88cfece6e95d2e717e0872a7f53a8684712ad13822a7979bc760b9c77ec0013b" -dependencies = [ - "shared_library", -] - [[package]] name = "outref" version = "0.1.0" @@ -2608,9 +2166,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -2683,7 +2241,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -2715,14 +2273,14 @@ dependencies = [ "serde", "wgpu-core", "wgpu-types", - "winit 0.29.15", + "winit", ] [[package]] name = "plotters" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -2733,15 +2291,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] @@ -2797,21 +2355,11 @@ checksum = "e8cf8e6a8aa66ce33f63993ffc4ea4271eb5b0530a9002db8455ea6050c77bfa" [[package]] name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit 0.19.15", -] - -[[package]] -name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit", ] [[package]] @@ -2822,7 +2370,7 @@ checksum = "07c277e4e643ef00c1233393c673f655e3672cf7eb3ba08a00bdd0ea59139b5f" dependencies = [ "proc-macro-rules-macros", "proc-macro2", - "syn 2.0.75", + "syn", ] [[package]] @@ -2834,7 +2382,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -2854,18 +2402,18 @@ checksum = "43d84d1d7a6ac92673717f9f6d1518374ef257669c24ebc5ac25d5033828be58" [[package]] name = "quick-xml" -version = "0.31.0" +version = "0.36.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33" +checksum = "f7649a7b4df05aed9ea7ec6f628c67c9953a43869b8bc50929569b2999d443fe" dependencies = [ "memchr", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -2900,15 +2448,6 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8a99fddc9f0ba0a85884b8d14e3592853e787d581ca1816c91349b10e4eeab" -[[package]] -name = "raw-window-handle" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b800beb9b6e7d2df1fe337c9e3d04e3af22a124460fb4c30fcc22c9117cefb41" -dependencies = [ - "cty", -] - [[package]] name = "raw-window-handle" version = "0.5.2" @@ -3071,9 +2610,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -3094,15 +2633,6 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" -[[package]] -name = "safe_arch" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ff3d6d9696af502cc3110dacce942840fb06ff4514cad92236ecc455f2ce05" -dependencies = [ - "bytemuck", -] - [[package]] name = "same-file" version = "1.0.6" @@ -3126,27 +2656,15 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sctk-adwaita" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61270629cc6b4d77ec1907db1033d5c2e1a404c412743621981a871dc9c12339" -dependencies = [ - "crossfont", - "log", - "smithay-client-toolkit 0.16.1", - "tiny-skia 0.7.0", -] - -[[package]] -name = "sctk-adwaita" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b2eaf3a5b264a521b988b2e73042e742df700c4f962cde845d1541adb46550" +checksum = "70b31447ca297092c5a9916fc3b955203157b37c19ca8edde4f52e9843e602c7" dependencies = [ "ab_glyph", "log", - "memmap2 0.9.4", - "smithay-client-toolkit 0.18.1", - "tiny-skia 0.11.4", + "memmap2", + "smithay-client-toolkit", + "tiny-skia", ] [[package]] @@ -3172,29 +2690,29 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] name = "serde_json" -version = "1.0.125" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "indexmap", "itoa", @@ -3217,27 +2735,6 @@ dependencies = [ "v8", ] -[[package]] -name = "servo-fontconfig" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7e3e22fe5fd73d04ebf0daa049d3efe3eae55369ce38ab16d07ddd9ac5c217c" -dependencies = [ - "libc", - "servo-fontconfig-sys", -] - -[[package]] -name = "servo-fontconfig-sys" -version = "5.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36b879db9892dfa40f95da1c38a835d41634b825fbd8c4c418093d53c24b388" -dependencies = [ - "expat-sys", - "freetype-sys", - "pkg-config", -] - [[package]] name = "sharded-slab" version = "0.1.7" @@ -3247,16 +2744,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shared_library" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11" -dependencies = [ - "lazy_static", - "libc", -] - [[package]] name = "shlex" version = "1.3.0" @@ -3311,25 +2798,6 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" -[[package]] -name = "smithay-client-toolkit" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870427e30b8f2cbe64bf43ec4b86e88fe39b0a84b3f15efd9c9c2d020bc86eb9" -dependencies = [ - "bitflags 1.3.2", - "calloop 0.10.6", - "dlib", - "lazy_static", - "log", - "memmap2 0.5.10", - "nix 0.24.3", - "pkg-config", - "wayland-client 0.29.5", - "wayland-cursor 0.29.5", - "wayland-protocols 0.29.5", -] - [[package]] name = "smithay-client-toolkit" version = "0.18.1" @@ -3337,21 +2805,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "922fd3eeab3bd820d76537ce8f582b1cf951eceb5475c28500c7457d9d17f53a" dependencies = [ "bitflags 2.6.0", - "calloop 0.12.4", + "calloop", "calloop-wayland-source", "cursor-icon", "libc", "log", - "memmap2 0.9.4", + "memmap2", "rustix", "thiserror", "wayland-backend", - "wayland-client 0.31.2", + "wayland-client", "wayland-csd-frame", - "wayland-cursor 0.31.1", - "wayland-protocols 0.31.2", + "wayland-cursor", + "wayland-protocols", "wayland-protocols-wlr", - "wayland-scanner 0.31.1", + "wayland-scanner", "xkeysym", ] @@ -3430,12 +2898,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6637bab7722d379c8b41ba849228d680cc12d0a45ba1fa2b48f2a30577a06731" -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" @@ -3461,25 +2923,14 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.75", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", + "syn", ] [[package]] name = "syn" -version = "2.0.75" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -3497,22 +2948,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -3544,21 +2995,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "tiny-skia" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642680569bb895b16e4b9d181c60be1ed136fa0c9c7f11d004daf053ba89bf82" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "bytemuck", - "cfg-if", - "png", - "safe_arch", - "tiny-skia-path 0.7.0", -] - [[package]] name = "tiny-skia" version = "0.11.4" @@ -3566,21 +3002,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83d13394d44dae3207b52a326c0c85a8bf87f1541f23b0d143811088497b09ab" dependencies = [ "arrayref", - "arrayvec 0.7.6", + "arrayvec", "bytemuck", "cfg-if", "log", - "tiny-skia-path 0.11.4", -] - -[[package]] -name = "tiny-skia-path" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c114d32f0c2ee43d585367cb013dfaba967ab9f62b90d9af0d696e955e70fa6c" -dependencies = [ - "arrayref", - "bytemuck", + "tiny-skia-path", ] [[package]] @@ -3621,14 +3047,14 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.3" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.1", + "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -3645,7 +3071,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -3656,20 +3082,9 @@ checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" [[package]] name = "toml_edit" -version = "0.19.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" -dependencies = [ - "indexmap", - "toml_datetime", - "winnow", -] - -[[package]] -name = "toml_edit" -version = "0.21.1" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap", "toml_datetime", @@ -3727,9 +3142,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.17.1" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63de1e1d4115534008d8fd5788b39324d6f58fc707849090533828619351d855" +checksum = "373db47331c3407b343538df77eea2516884a0b126cdfb4b135acfd400015dd7" dependencies = [ "loom", "once_cell", @@ -3738,9 +3153,9 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98b98232a2447ce0a58f9a0bfb5f5e39647b5c597c994b63945fcccd1306fafb" +checksum = "49cf0064dcb31c99aa1244c1b93439359e53f72ed217eef5db50abd442241e9a" dependencies = [ "cc", ] @@ -3800,42 +3215,42 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-id-start" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3882f69607a2ac8cc4de3ee7993d8f68bb06f2974271195065b3bd07f2edea" +checksum = "97e2a3c5fc9de285c0e805d98eba666adb4b2d9e1049ce44821ff7707cc34e91" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "url" @@ -3899,12 +3314,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - [[package]] name = "version_check" version = "0.9.5" @@ -3955,7 +3364,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.75", + "syn", "wasm-bindgen-shared", ] @@ -3989,7 +3398,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4023,61 +3432,33 @@ checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] name = "wayland-backend" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90e11ce2ca99c97b940ee83edbae9da2d56a08f9ea8158550fd77fa31722993" +checksum = "056535ced7a150d45159d3a8dc30f91a2e2d588ca0b23f70e56033622b8016f6" dependencies = [ "cc", "downcast-rs", "rustix", "scoped-tls", "smallvec", - "wayland-sys 0.31.4", -] - -[[package]] -name = "wayland-client" -version = "0.29.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f3b068c05a039c9f755f881dc50f01732214f5685e379829759088967c46715" -dependencies = [ - "bitflags 1.3.2", - "downcast-rs", - "libc", - "nix 0.24.3", - "scoped-tls", - "wayland-commons", - "wayland-scanner 0.29.5", - "wayland-sys 0.29.5", + "wayland-sys", ] [[package]] name = "wayland-client" -version = "0.31.2" +version = "0.31.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fb96ee935c2cea6668ccb470fb7771f6215d1691746c2d896b447a00ad3f1f" +checksum = "e3f45d1222915ef1fd2057220c1d9d9624b7654443ea35c3877f7a52bd0a5a2d" dependencies = [ "bitflags 2.6.0", "rustix", "wayland-backend", - "wayland-scanner 0.31.1", -] - -[[package]] -name = "wayland-commons" -version = "0.29.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8691f134d584a33a6606d9d717b95c4fa20065605f798a3f350d78dced02a902" -dependencies = [ - "nix 0.24.3", - "once_cell", - "smallvec", - "wayland-sys 0.29.5", + "wayland-scanner", ] [[package]] @@ -4093,48 +3474,15 @@ dependencies = [ [[package]] name = "wayland-cursor" -version = "0.29.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6865c6b66f13d6257bef1cd40cbfe8ef2f150fb8ebbdb1e8e873455931377661" -dependencies = [ - "nix 0.24.3", - "wayland-client 0.29.5", - "xcursor", -] - -[[package]] -name = "wayland-cursor" -version = "0.31.1" +version = "0.31.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71ce5fa868dd13d11a0d04c5e2e65726d0897be8de247c0c5a65886e283231ba" +checksum = "3a94697e66e76c85923b0d28a0c251e8f0666f58fc47d316c0f4da6da75d37cb" dependencies = [ "rustix", - "wayland-client 0.31.2", + "wayland-client", "xcursor", ] -[[package]] -name = "wayland-egl" -version = "0.29.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402de949f81a012926d821a2d659f930694257e76dd92b6e0042ceb27be4107d" -dependencies = [ - "wayland-client 0.29.5", - "wayland-sys 0.29.5", -] - -[[package]] -name = "wayland-protocols" -version = "0.29.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b950621f9354b322ee817a23474e479b34be96c2e909c14f7bc0100e9a970bc6" -dependencies = [ - "bitflags 1.3.2", - "wayland-client 0.29.5", - "wayland-commons", - "wayland-scanner 0.29.5", -] - [[package]] name = "wayland-protocols" version = "0.31.2" @@ -4143,8 +3491,8 @@ checksum = "8f81f365b8b4a97f422ac0e8737c438024b5951734506b0e1d775c73030561f4" dependencies = [ "bitflags 2.6.0", "wayland-backend", - "wayland-client 0.31.2", - "wayland-scanner 0.31.1", + "wayland-client", + "wayland-scanner", ] [[package]] @@ -4155,9 +3503,9 @@ checksum = "23803551115ff9ea9bce586860c5c5a971e360825a0309264102a9495a5ff479" dependencies = [ "bitflags 2.6.0", "wayland-backend", - "wayland-client 0.31.2", - "wayland-protocols 0.31.2", - "wayland-scanner 0.31.1", + "wayland-client", + "wayland-protocols", + "wayland-scanner", ] [[package]] @@ -4168,27 +3516,16 @@ checksum = "ad1f61b76b6c2d8742e10f9ba5c3737f6530b4c243132c2a2ccc8aa96fe25cd6" dependencies = [ "bitflags 2.6.0", "wayland-backend", - "wayland-client 0.31.2", - "wayland-protocols 0.31.2", - "wayland-scanner 0.31.1", -] - -[[package]] -name = "wayland-scanner" -version = "0.29.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4303d8fa22ab852f789e75a967f0a2cdc430a607751c0499bada3e451cbd53" -dependencies = [ - "proc-macro2", - "quote", - "xml-rs", + "wayland-client", + "wayland-protocols", + "wayland-scanner", ] [[package]] name = "wayland-scanner" -version = "0.31.1" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63b3a62929287001986fb58c789dce9b67604a397c15c611ad9f747300b6c283" +checksum = "597f2001b2e5fc1121e3d5b9791d3e78f05ba6bfa4641053846248e3a13661c3" dependencies = [ "proc-macro2", "quick-xml", @@ -4197,20 +3534,9 @@ dependencies = [ [[package]] name = "wayland-sys" -version = "0.29.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be12ce1a3c39ec7dba25594b97b42cb3195d54953ddb9d3d95a7c3902bc6e9d4" -dependencies = [ - "dlib", - "lazy_static", - "pkg-config", -] - -[[package]] -name = "wayland-sys" -version = "0.31.4" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43676fe2daf68754ecf1d72026e4e6c15483198b5d24e888b74d3f22f887a148" +checksum = "efa8ac0d8e8ed3e3b5c9fc92c7881406a268e11555abe36493efabe649a29e09" dependencies = [ "dlib", "log", @@ -4242,7 +3568,7 @@ dependencies = [ name = "wgpu" version = "22.0.0" dependencies = [ - "arrayvec 0.7.6", + "arrayvec", "cfg_aliases", "document-features", "js-sys", @@ -4283,7 +3609,7 @@ dependencies = [ name = "wgpu-core" version = "22.0.0" dependencies = [ - "arrayvec 0.7.6", + "arrayvec", "bit-vec", "bitflags 2.6.0", "bytemuck", @@ -4333,7 +3659,7 @@ dependencies = [ "web-time", "wgpu", "wgpu-test", - "winit 0.29.15", + "winit", ] [[package]] @@ -4341,7 +3667,7 @@ name = "wgpu-hal" version = "22.0.0" dependencies = [ "android_system_properties", - "arrayvec 0.7.6", + "arrayvec", "ash", "bit-set", "bitflags 2.6.0", @@ -4353,24 +3679,25 @@ dependencies = [ "glam", "glow", "glutin", + "glutin-winit", "glutin_wgl_sys 0.6.0", "gpu-alloc", "gpu-allocator", "gpu-descriptor", - "hassle-rs", "js-sys", "khronos-egl", "libc", - "libloading 0.8.5", + "libloading", "log", "metal", "naga", - "ndk-sys 0.5.0+25.2.9519653", + "ndk-sys", "objc", "once_cell", "parking_lot", "profiling", "range-alloc", + "raw-window-handle 0.5.2", "raw-window-handle 0.6.2", "renderdoc-sys", "rustc-hash", @@ -4381,7 +3708,7 @@ dependencies = [ "wgpu-types", "windows", "windows-core", - "winit 0.29.15", + "winit", ] [[package]] @@ -4403,7 +3730,7 @@ version = "22.0.0" dependencies = [ "heck 0.5.0", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -4411,7 +3738,7 @@ name = "wgpu-test" version = "22.0.0" dependencies = [ "anyhow", - "arrayvec 0.7.6", + "arrayvec", "bitflags 2.6.0", "bytemuck", "cfg-if", @@ -4465,12 +3792,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "widestring" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" - [[package]] name = "winapi" version = "0.3.9" @@ -4533,7 +3854,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -4544,7 +3865,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] [[package]] @@ -4566,19 +3887,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -4679,12 +3987,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -4703,12 +4005,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_i686_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -4733,12 +4029,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -4757,12 +4047,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -4799,12 +4083,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -4823,39 +4101,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "winit" -version = "0.27.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb796d6fbd86b2fd896c9471e6f04d39d750076ebe5680a3958f00f5ab97657c" -dependencies = [ - "bitflags 1.3.2", - "cocoa 0.24.1", - "core-foundation", - "core-graphics 0.22.3", - "dispatch", - "instant", - "libc", - "log", - "mio 0.8.11", - "ndk 0.7.0", - "ndk-glue", - "objc", - "once_cell", - "parking_lot", - "percent-encoding", - "raw-window-handle 0.4.3", - "raw-window-handle 0.5.2", - "sctk-adwaita 0.4.3", - "smithay-client-toolkit 0.16.1", - "wasm-bindgen", - "wayland-client 0.29.5", - "wayland-protocols 0.29.5", - "web-sys", - "windows-sys 0.36.1", - "x11-dl", -] - [[package]] name = "winit" version = "0.29.15" @@ -4867,34 +4112,35 @@ dependencies = [ "atomic-waker", "bitflags 2.6.0", "bytemuck", - "calloop 0.12.4", + "calloop", "cfg_aliases", "core-foundation", - "core-graphics 0.23.2", + "core-graphics", "cursor-icon", "icrate", "js-sys", "libc", "log", - "memmap2 0.9.4", - "ndk 0.8.0", - "ndk-sys 0.5.0+25.2.9519653", + "memmap2", + "ndk", + "ndk-sys", "objc2", "once_cell", "orbclient", "percent-encoding", + "raw-window-handle 0.5.2", "raw-window-handle 0.6.2", "redox_syscall 0.3.5", "rustix", - "sctk-adwaita 0.8.1", - "smithay-client-toolkit 0.18.1", + "sctk-adwaita", + "smithay-client-toolkit", "smol_str", "unicode-segmentation", "wasm-bindgen", "wasm-bindgen-futures", "wayland-backend", - "wayland-client 0.31.2", - "wayland-protocols 0.31.2", + "wayland-client", + "wayland-protocols", "wayland-protocols-plasma", "web-sys", "web-time", @@ -4906,22 +4152,13 @@ dependencies = [ [[package]] name = "winnow" -version = "0.5.40" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] -[[package]] -name = "wio" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d129932f4644ac2396cb456385cbf9e63b5b30c6e8dc4820bdca4eb082037a5" -dependencies = [ - "winapi", -] - [[package]] name = "x11-dl" version = "2.21.0" @@ -4942,7 +4179,7 @@ dependencies = [ "as-raw-xcb-connection", "gethostname", "libc", - "libloading 0.8.5", + "libloading", "once_cell", "rustix", "x11rb-protocol", @@ -4981,9 +4218,9 @@ checksum = "b9cc00251562a284751c9973bace760d86c0276c471b4be569fe6b068ee97a56" [[package]] name = "xml-rs" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "zerocopy" @@ -5002,5 +4239,5 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn", ] diff --git a/Cargo.toml b/Cargo.toml index fe3bc0b37c..73f17b7555 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,7 @@ members = [ # default members "benches", "examples", + "lock-analyzer", "naga-cli", "naga", "naga/fuzz", @@ -24,6 +25,7 @@ exclude = [] default-members = [ "benches", "examples", + "lock-analyzer", "naga-cli", "naga", "naga/fuzz", @@ -68,13 +70,13 @@ path = "./naga" version = "22.0.0" [workspace.dependencies] -anyhow = "1.0.86" +anyhow = "1.0.89" argh = "0.1.5" arrayvec = "0.7" bincode = "1" bit-vec = "0.8" bitflags = "2.6" -bytemuck = { version = "1.17", features = ["derive"] } +bytemuck = { version = "1.18", features = ["derive"] } cfg_aliases = "0.1" cfg-if = "1" criterion = "0.5" @@ -103,7 +105,7 @@ noise = { version = "0.8", git = "https://github.com/Razaekel/noise-rs.git", rev nv-flip = "0.1" obj = "0.10" once_cell = "1.19.0" -parking_lot = ">=0.11, <0.13" # parking_lot 0.12 switches from `winapi` to `windows`; permit either +parking_lot = "0.12.1" pico-args = { version = "0.5.0", features = [ "eq-separator", "short-space-opt", @@ -118,12 +120,12 @@ renderdoc-sys = "1.1.0" ron = "0.8" rustc-hash = "1.1.0" serde = "1" -serde_json = "1.0.125" +serde_json = "1.0.128" smallvec = "1" static_assertions = "1.1.0" strum = { version = "0.25.0", features = ["derive"] } tracy-client = "0.17" -thiserror = "1.0.63" +thiserror = "1.0.64" wgpu = { version = "22.0.0", path = "./wgpu", default-features = false } wgpu-core = { version = "22.0.0", path = "./wgpu-core" } wgpu-macros = { version = "22.0.0", path = "./wgpu-macros" } @@ -147,13 +149,13 @@ gpu-descriptor = "0.3" bit-set = "0.8" gpu-allocator = { version = "0.27", default-features = false } range-alloc = "0.1" -hassle-rs = "0.11.0" windows-core = { version = "0.58", default-features = false } # Gles dependencies khronos-egl = "6" -glow = "0.14.0" -glutin = "0.29.1" +glow = "0.14.1" +glutin = { version = "0.31", default-features = false } +glutin-winit = { version = "0.4", default-features = false } glutin_wgl_sys = "0.6" # DX and GLES dependencies @@ -176,7 +178,7 @@ deno_url = "0.143.0" deno_web = "0.174.0" deno_webidl = "0.143.0" deno_webgpu = { version = "0.118.0", path = "./deno_webgpu" } -tokio = "1.39.3" +tokio = "1.40.0" termcolor = "1.4.1" # android dependencies diff --git a/benches/benches/computepass.rs b/benches/benches/computepass.rs index 2af1413605..719f84696e 100644 --- a/benches/benches/computepass.rs +++ b/benches/benches/computepass.rs @@ -389,7 +389,7 @@ impl ComputepassState { let end_idx = start_idx + dispatch_per_pass; for dispatch_idx in start_idx..end_idx { compute_pass.set_pipeline(&self.pipeline); - compute_pass.set_bind_group(0, &self.bind_groups[dispatch_idx], &[]); + compute_pass.set_bind_group(0, Some(&self.bind_groups[dispatch_idx]), &[]); compute_pass.dispatch_workgroups(1, 1, 1); } @@ -412,7 +412,7 @@ impl ComputepassState { }); compute_pass.set_pipeline(self.bindless_pipeline.as_ref().unwrap()); - compute_pass.set_bind_group(0, self.bindless_bind_group.as_ref().unwrap(), &[]); + compute_pass.set_bind_group(0, Some(self.bindless_bind_group.as_ref().unwrap()), &[]); for _ in 0..dispatch_count_bindless { compute_pass.dispatch_workgroups(1, 1, 1); } @@ -447,7 +447,7 @@ fn run_bench(ctx: &mut Criterion) { }; group.bench_function( - &format!("{cpasses} computepasses x {dispatch_per_pass} dispatches ({label})"), + format!("{cpasses} computepasses x {dispatch_per_pass} dispatches ({label})"), |b| { Lazy::force(&state); @@ -496,7 +496,7 @@ fn run_bench(ctx: &mut Criterion) { for threads in [2, 4, 8] { let dispatch_per_pass = dispatch_count / threads; group.bench_function( - &format!("{threads} threads x {dispatch_per_pass} dispatch"), + format!("{threads} threads x {dispatch_per_pass} dispatch"), |b| { Lazy::force(&state); @@ -537,7 +537,7 @@ fn run_bench(ctx: &mut Criterion) { let mut group = ctx.benchmark_group("Computepass: Bindless"); group.throughput(Throughput::Elements(dispatch_count_bindless as _)); - group.bench_function(&format!("{dispatch_count_bindless} dispatch"), |b| { + group.bench_function(format!("{dispatch_count_bindless} dispatch"), |b| { Lazy::force(&state); b.iter_custom(|iters| { diff --git a/benches/benches/renderpass.rs b/benches/benches/renderpass.rs index 7f2e14116e..fe23aa62be 100644 --- a/benches/benches/renderpass.rs +++ b/benches/benches/renderpass.rs @@ -367,7 +367,7 @@ impl RenderpassState { let end_idx = start_idx + draws_per_pass; for draw_idx in start_idx..end_idx { render_pass.set_pipeline(&self.pipeline); - render_pass.set_bind_group(0, &self.bind_groups[draw_idx], &[]); + render_pass.set_bind_group(0, Some(&self.bind_groups[draw_idx]), &[]); for i in 0..VERTEX_BUFFERS_PER_DRAW { render_pass.set_vertex_buffer( i as u32, @@ -410,7 +410,7 @@ impl RenderpassState { }); render_pass.set_pipeline(self.bindless_pipeline.as_ref().unwrap()); - render_pass.set_bind_group(0, self.bindless_bind_group.as_ref().unwrap(), &[]); + render_pass.set_bind_group(0, Some(self.bindless_bind_group.as_ref().unwrap()), &[]); for i in 0..VERTEX_BUFFERS_PER_DRAW { render_pass.set_vertex_buffer(i as u32, self.vertex_buffers[0].slice(..)); } @@ -448,7 +448,7 @@ fn run_bench(ctx: &mut Criterion) { }; group.bench_function( - &format!("{rpasses} renderpasses x {draws_per_pass} draws ({label})"), + format!("{rpasses} renderpasses x {draws_per_pass} draws ({label})"), |b| { Lazy::force(&state); @@ -501,41 +501,38 @@ fn run_bench(ctx: &mut Criterion) { for threads in [2, 4, 8] { let draws_per_pass = draw_count / threads; - group.bench_function( - &format!("{threads} threads x {draws_per_pass} draws"), - |b| { - Lazy::force(&state); + group.bench_function(format!("{threads} threads x {draws_per_pass} draws"), |b| { + Lazy::force(&state); - b.iter_custom(|iters| { - profiling::scope!("benchmark invocation"); + b.iter_custom(|iters| { + profiling::scope!("benchmark invocation"); - // This benchmark hangs on Apple Paravirtualized GPUs. No idea why. - if state.device_state.adapter_info.name.contains("Paravirtual") { - return Duration::from_secs_f32(1.0); - } + // This benchmark hangs on Apple Paravirtualized GPUs. No idea why. + if state.device_state.adapter_info.name.contains("Paravirtual") { + return Duration::from_secs_f32(1.0); + } - let mut duration = Duration::ZERO; + let mut duration = Duration::ZERO; - for _ in 0..iters { - profiling::scope!("benchmark iteration"); + for _ in 0..iters { + profiling::scope!("benchmark iteration"); - let start = Instant::now(); + let start = Instant::now(); - let buffers = (0..threads) - .into_par_iter() - .map(|i| state.run_subpass(i, threads, draw_count)) - .collect::>(); + let buffers = (0..threads) + .into_par_iter() + .map(|i| state.run_subpass(i, threads, draw_count)) + .collect::>(); - duration += start.elapsed(); + duration += start.elapsed(); - state.device_state.queue.submit(buffers); - state.device_state.device.poll(wgpu::Maintain::Wait); - } + state.device_state.queue.submit(buffers); + state.device_state.device.poll(wgpu::Maintain::Wait); + } - duration - }) - }, - ); + duration + }) + }); } group.finish(); @@ -543,7 +540,7 @@ fn run_bench(ctx: &mut Criterion) { let mut group = ctx.benchmark_group("Renderpass: Bindless"); group.throughput(Throughput::Elements(draw_count as _)); - group.bench_function(&format!("{draw_count} draws"), |b| { + group.bench_function(format!("{draw_count} draws"), |b| { Lazy::force(&state); b.iter_custom(|iters| { diff --git a/benches/benches/resource_creation.rs b/benches/benches/resource_creation.rs index c23f132bbe..da0c79a406 100644 --- a/benches/benches/resource_creation.rs +++ b/benches/benches/resource_creation.rs @@ -17,7 +17,7 @@ fn run_bench(ctx: &mut Criterion) { for threads in [1, 2, 4, 8] { let resources_per_thread = RESOURCES_TO_CREATE / threads; group.bench_function( - &format!("{threads} threads x {resources_per_thread} resource"), + format!("{threads} threads x {resources_per_thread} resource"), |b| { Lazy::force(&state); diff --git a/deno_webgpu/bundle.rs b/deno_webgpu/bundle.rs index 0d1421d202..58d179051b 100644 --- a/deno_webgpu/bundle.rs +++ b/deno_webgpu/bundle.rs @@ -150,7 +150,7 @@ pub fn op_webgpu_render_bundle_encoder_set_bind_group( wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_bind_group( &mut render_bundle_encoder_resource.0.borrow_mut(), index, - bind_group_resource.1, + Some(bind_group_resource.1), dynamic_offsets_data.as_ptr(), dynamic_offsets_data.len(), ); diff --git a/deno_webgpu/compute_pass.rs b/deno_webgpu/compute_pass.rs index e3e69860ab..6755e84a6a 100644 --- a/deno_webgpu/compute_pass.rs +++ b/deno_webgpu/compute_pass.rs @@ -136,7 +136,7 @@ pub fn op_webgpu_compute_pass_set_bind_group( .compute_pass_set_bind_group( &mut compute_pass_resource.0.borrow_mut(), index, - bind_group_resource.1, + Some(bind_group_resource.1), dynamic_offsets_data, )?; diff --git a/deno_webgpu/lib.rs b/deno_webgpu/lib.rs index c2dfb240fa..e31812e25f 100644 --- a/deno_webgpu/lib.rs +++ b/deno_webgpu/lib.rs @@ -401,10 +401,7 @@ pub fn op_webgpu_request_adapter( force_fallback_adapter, compatible_surface: None, // windowless }; - let res = instance.request_adapter( - &descriptor, - wgpu_core::instance::AdapterInputs::Mask(backends, |_| None), - ); + let res = instance.request_adapter(&descriptor, backends, None); let adapter = match res { Ok(adapter) => adapter, @@ -414,9 +411,9 @@ pub fn op_webgpu_request_adapter( }) } }; - let adapter_features = instance.adapter_features(adapter)?; + let adapter_features = instance.adapter_features(adapter); let features = deserialize_features(&adapter_features); - let adapter_limits = instance.adapter_limits(adapter)?; + let adapter_limits = instance.adapter_limits(adapter); let instance = instance.clone(); @@ -649,7 +646,7 @@ pub fn op_webgpu_request_device( memory_hints: wgpu_types::MemoryHints::default(), }; - let (device, queue, maybe_err) = instance.adapter_request_device( + let res = instance.adapter_request_device( adapter, &descriptor, std::env::var("DENO_WEBGPU_TRACE") @@ -660,13 +657,12 @@ pub fn op_webgpu_request_device( None, ); adapter_resource.close(); - if let Some(err) = maybe_err { - return Err(DomExceptionOperationError::new(&err.to_string()).into()); - } - let device_features = instance.device_features(device)?; + let (device, queue) = res.map_err(|err| DomExceptionOperationError::new(&err.to_string()))?; + + let device_features = instance.device_features(device); let features = deserialize_features(&device_features); - let limits = instance.device_limits(device)?; + let limits = instance.device_limits(device); let instance = instance.clone(); let instance2 = instance.clone(); @@ -705,7 +701,7 @@ pub fn op_webgpu_request_adapter_info( let adapter = adapter_resource.1; let instance = state.borrow::(); - let info = instance.adapter_get_info(adapter)?; + let info = instance.adapter_get_info(adapter); adapter_resource.close(); Ok(GPUAdapterInfo { diff --git a/deno_webgpu/render_pass.rs b/deno_webgpu/render_pass.rs index 2d4557cf03..4929fbbe90 100644 --- a/deno_webgpu/render_pass.rs +++ b/deno_webgpu/render_pass.rs @@ -231,7 +231,7 @@ pub fn op_webgpu_render_pass_set_bind_group( .render_pass_set_bind_group( &mut render_pass_resource.0.borrow_mut(), index, - bind_group_resource.1, + Some(bind_group_resource.1), dynamic_offsets_data, )?; diff --git a/examples/src/boids/mod.rs b/examples/src/boids/mod.rs index 8c3581824b..c527be96d9 100644 --- a/examples/src/boids/mod.rs +++ b/examples/src/boids/mod.rs @@ -2,7 +2,7 @@ // adapted from https://github.com/austinEng/webgpu-samples/blob/master/src/examples/computeBoids.ts use nanorand::{Rng, WyRand}; -use std::{borrow::Cow, mem}; +use std::{borrow::Cow, mem::size_of}; use wgpu::util::DeviceExt; // number of boid particles to simulate @@ -82,7 +82,7 @@ impl crate::framework::Example for Example { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: wgpu::BufferSize::new( - (sim_param_data.len() * mem::size_of::()) as _, + (sim_param_data.len() * size_of::()) as _, ), }, count: None, @@ -298,7 +298,7 @@ impl crate::framework::Example for Example { timestamp_writes: None, }); cpass.set_pipeline(&self.compute_pipeline); - cpass.set_bind_group(0, &self.particle_bind_groups[self.frame_num % 2], &[]); + cpass.set_bind_group(0, Some(&self.particle_bind_groups[self.frame_num % 2]), &[]); cpass.dispatch_workgroups(self.work_group_count, 1, 1); } command_encoder.pop_debug_group(); diff --git a/examples/src/bunnymark/mod.rs b/examples/src/bunnymark/mod.rs index 54bdc2a941..0c242c7137 100644 --- a/examples/src/bunnymark/mod.rs +++ b/examples/src/bunnymark/mod.rs @@ -34,19 +34,29 @@ impl Bunny { self.position[0] += self.velocity[0] * delta; self.position[1] += self.velocity[1] * delta; self.velocity[1] += GRAVITY * delta; + if (self.velocity[0] > 0.0 && self.position[0] + 0.5 * BUNNY_SIZE > extent[0] as f32) || (self.velocity[0] < 0.0 && self.position[0] - 0.5 * BUNNY_SIZE < 0.0) { self.velocity[0] *= -1.0; } + if self.velocity[1] < 0.0 && self.position[1] < 0.5 * BUNNY_SIZE { self.velocity[1] *= -1.0; } + + // Top boundary check + if self.velocity[1] > 0.0 && self.position[1] + 0.5 * BUNNY_SIZE > extent[1] as f32 { + self.velocity[1] *= -1.0; + } } } /// Example struct holds references to wgpu resources and frame persistent data struct Example { + view: wgpu::TextureView, + sampler: wgpu::Sampler, + global_bind_group_layout: wgpu::BindGroupLayout, global_group: wgpu::BindGroup, local_group: wgpu::BindGroup, pipeline: wgpu::RenderPipeline, @@ -118,11 +128,11 @@ impl Example { occlusion_query_set: None, }); rpass.set_pipeline(&self.pipeline); - rpass.set_bind_group(0, &self.global_group, &[]); + rpass.set_bind_group(0, Some(&self.global_group), &[]); for i in 0..self.bunnies.len() { let offset = (i as wgpu::DynamicOffset) * (uniform_alignment as wgpu::DynamicOffset); - rpass.set_bind_group(1, &self.local_group, &[offset]); + rpass.set_bind_group(1, Some(&self.local_group), &[offset]); rpass.draw(0..4, 0..1); } } @@ -286,6 +296,7 @@ impl crate::framework::Example for Example { size: [BUNNY_SIZE; 2], pad: [0.0; 2], }; + let global_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("global"), contents: bytemuck::bytes_of(&globals), @@ -335,6 +346,9 @@ impl crate::framework::Example for Example { let rng = WyRand::new_seed(42); let mut ex = Example { + view, + sampler, + global_bind_group_layout, pipeline, global_group, local_group, @@ -366,11 +380,51 @@ impl crate::framework::Example for Example { fn resize( &mut self, - _sc_desc: &wgpu::SurfaceConfiguration, - _device: &wgpu::Device, + sc_desc: &wgpu::SurfaceConfiguration, + device: &wgpu::Device, _queue: &wgpu::Queue, ) { - //empty + self.extent = [sc_desc.width, sc_desc.height]; + + let globals = Globals { + mvp: glam::Mat4::orthographic_rh( + 0.0, + sc_desc.width as f32, + 0.0, + sc_desc.height as f32, + -1.0, + 1.0, + ) + .to_cols_array_2d(), + size: [BUNNY_SIZE; 2], + pad: [0.0; 2], + }; + + let global_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("global"), + contents: bytemuck::bytes_of(&globals), + usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::UNIFORM, + }); + + let global_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &self.global_bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: global_buffer.as_entire_binding(), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::TextureView(&self.view), + }, + wgpu::BindGroupEntry { + binding: 2, + resource: wgpu::BindingResource::Sampler(&self.sampler), + }, + ], + label: None, + }); + self.global_group = global_group; } fn render(&mut self, view: &wgpu::TextureView, device: &wgpu::Device, queue: &wgpu::Queue) { diff --git a/examples/src/conservative_raster/mod.rs b/examples/src/conservative_raster/mod.rs index d029134756..46fb8742a0 100644 --- a/examples/src/conservative_raster/mod.rs +++ b/examples/src/conservative_raster/mod.rs @@ -305,7 +305,7 @@ impl crate::framework::Example for Example { }); rpass.set_pipeline(&self.pipeline_upscale); - rpass.set_bind_group(0, &self.bind_group_upscale, &[]); + rpass.set_bind_group(0, Some(&self.bind_group_upscale), &[]); rpass.draw(0..3, 0..1); if let Some(pipeline_lines) = &self.pipeline_lines { diff --git a/examples/src/cube/mod.rs b/examples/src/cube/mod.rs index 608fae0088..78dc06e061 100644 --- a/examples/src/cube/mod.rs +++ b/examples/src/cube/mod.rs @@ -1,5 +1,5 @@ use bytemuck::{Pod, Zeroable}; -use std::{borrow::Cow, f32::consts, mem}; +use std::{borrow::Cow, f32::consts, mem::size_of}; use wgpu::util::DeviceExt; #[repr(C)] @@ -114,7 +114,7 @@ impl crate::framework::Example for Example { queue: &wgpu::Queue, ) -> Self { // Create the vertex and index buffers - let vertex_size = mem::size_of::(); + let vertex_size = size_of::(); let (vertex_data, index_data) = create_vertices(); let vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { @@ -361,7 +361,7 @@ impl crate::framework::Example for Example { }); rpass.push_debug_group("Prepare data for draw."); rpass.set_pipeline(&self.pipeline); - rpass.set_bind_group(0, &self.bind_group, &[]); + rpass.set_bind_group(0, Some(&self.bind_group), &[]); rpass.set_index_buffer(self.index_buf.slice(..), wgpu::IndexFormat::Uint16); rpass.set_vertex_buffer(0, self.vertex_buf.slice(..)); rpass.pop_debug_group(); diff --git a/examples/src/hello_compute/mod.rs b/examples/src/hello_compute/mod.rs index 7f3c3f05bf..e53f49fa43 100644 --- a/examples/src/hello_compute/mod.rs +++ b/examples/src/hello_compute/mod.rs @@ -1,4 +1,4 @@ -use std::{borrow::Cow, str::FromStr}; +use std::{borrow::Cow, mem::size_of_val, str::FromStr}; use wgpu::util::DeviceExt; // Indicates a u32 overflow in an intermediate Collatz value @@ -72,7 +72,7 @@ async fn execute_gpu_inner( }); // Gets the size in bytes of the buffer. - let size = std::mem::size_of_val(numbers) as wgpu::BufferAddress; + let size = size_of_val(numbers) as wgpu::BufferAddress; // Instantiates buffer without data. // `usage` of buffer specifies how it can be used: @@ -135,7 +135,7 @@ async fn execute_gpu_inner( timestamp_writes: None, }); cpass.set_pipeline(&compute_pipeline); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); cpass.insert_debug_marker("compute collatz iterations"); cpass.dispatch_workgroups(numbers.len() as u32, 1, 1); // Number of cells to run, the (x,y,z) size of item being processed } diff --git a/examples/src/hello_synchronization/mod.rs b/examples/src/hello_synchronization/mod.rs index 397af48c98..fad5d7a9da 100644 --- a/examples/src/hello_synchronization/mod.rs +++ b/examples/src/hello_synchronization/mod.rs @@ -1,3 +1,5 @@ +use std::mem::size_of_val; + const ARR_SIZE: usize = 128; struct ExecuteResults { @@ -61,13 +63,13 @@ async fn execute( let storage_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: None, - size: std::mem::size_of_val(local_patient_workgroup_results.as_slice()) as u64, + size: size_of_val(local_patient_workgroup_results.as_slice()) as u64, usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_SRC, mapped_at_creation: false, }); let output_staging_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: None, - size: std::mem::size_of_val(local_patient_workgroup_results.as_slice()) as u64, + size: size_of_val(local_patient_workgroup_results.as_slice()) as u64, usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, mapped_at_creation: false, }); @@ -126,7 +128,7 @@ async fn execute( timestamp_writes: None, }); compute_pass.set_pipeline(&patient_pipeline); - compute_pass.set_bind_group(0, &bind_group, &[]); + compute_pass.set_bind_group(0, Some(&bind_group), &[]); compute_pass.dispatch_workgroups(local_patient_workgroup_results.len() as u32, 1, 1); } queue.submit(Some(command_encoder.finish())); @@ -148,7 +150,7 @@ async fn execute( timestamp_writes: None, }); compute_pass.set_pipeline(&hasty_pipeline); - compute_pass.set_bind_group(0, &bind_group, &[]); + compute_pass.set_bind_group(0, Some(&bind_group), &[]); compute_pass.dispatch_workgroups(local_patient_workgroup_results.len() as u32, 1, 1); } queue.submit(Some(command_encoder.finish())); @@ -182,7 +184,7 @@ async fn get_data( 0, staging_buffer, 0, - std::mem::size_of_val(output) as u64, + size_of_val(output) as u64, ); queue.submit(Some(command_encoder.finish())); let buffer_slice = staging_buffer.slice(..); diff --git a/examples/src/hello_workgroups/mod.rs b/examples/src/hello_workgroups/mod.rs index 3260aa8628..7a653cf3e8 100644 --- a/examples/src/hello_workgroups/mod.rs +++ b/examples/src/hello_workgroups/mod.rs @@ -7,6 +7,8 @@ //! //! Only parts specific to this example will be commented. +use std::mem::size_of_val; + use wgpu::util::DeviceExt; async fn run() { @@ -56,7 +58,7 @@ async fn run() { }); let output_staging_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: None, - size: std::mem::size_of_val(&local_a) as u64, + size: size_of_val(&local_a) as u64, usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, mapped_at_creation: false, }); @@ -125,7 +127,7 @@ async fn run() { timestamp_writes: None, }); compute_pass.set_pipeline(&pipeline); - compute_pass.set_bind_group(0, &bind_group, &[]); + compute_pass.set_bind_group(0, Some(&bind_group), &[]); /* Note that since each workgroup will cover both arrays, we only need to cover the length of one array. */ compute_pass.dispatch_workgroups(local_a.len() as u32, 1, 1); @@ -169,7 +171,7 @@ async fn get_data( 0, staging_buffer, 0, - std::mem::size_of_val(output) as u64, + size_of_val(output) as u64, ); queue.submit(Some(command_encoder.finish())); let buffer_slice = staging_buffer.slice(..); diff --git a/examples/src/mipmap/mod.rs b/examples/src/mipmap/mod.rs index 33e23a474a..179970ad7f 100644 --- a/examples/src/mipmap/mod.rs +++ b/examples/src/mipmap/mod.rs @@ -1,5 +1,5 @@ use bytemuck::{Pod, Zeroable}; -use std::{borrow::Cow, f32::consts, mem}; +use std::{borrow::Cow, f32::consts, mem::size_of}; use wgpu::util::DeviceExt; const TEXTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb; @@ -54,8 +54,7 @@ type TimestampQueries = [TimestampData; MIP_PASS_COUNT as usize]; type PipelineStatisticsQueries = [u64; MIP_PASS_COUNT as usize]; fn pipeline_statistics_offset() -> wgpu::BufferAddress { - (mem::size_of::() as wgpu::BufferAddress) - .max(wgpu::QUERY_RESOLVE_BUFFER_ALIGNMENT) + (size_of::() as wgpu::BufferAddress).max(wgpu::QUERY_RESOLVE_BUFFER_ALIGNMENT) } struct Example { @@ -181,7 +180,7 @@ impl Example { ); } rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bind_group, &[]); + rpass.set_bind_group(0, Some(&bind_group), &[]); rpass.draw(0..3, 0..1); if let Some(ref query_sets) = query_sets { rpass.write_timestamp(&query_sets.timestamp, timestamp_query_index_base + 1); @@ -363,7 +362,7 @@ impl crate::framework::Example for Example { // This databuffer has to store all of the query results, 2 * passes timestamp queries // and 1 * passes statistics queries. Each query returns a u64 value. let buffer_size = pipeline_statistics_offset() - + mem::size_of::() as wgpu::BufferAddress; + + size_of::() as wgpu::BufferAddress; let data_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: Some("query buffer"), size: buffer_size, @@ -420,7 +419,7 @@ impl crate::framework::Example for Example { // This is guaranteed to be ready. let timestamp_view = query_sets .mapping_buffer - .slice(..mem::size_of::() as wgpu::BufferAddress) + .slice(..size_of::() as wgpu::BufferAddress) .get_mapped_range(); let pipeline_stats_view = query_sets .mapping_buffer @@ -498,7 +497,7 @@ impl crate::framework::Example for Example { occlusion_query_set: None, }); rpass.set_pipeline(&self.draw_pipeline); - rpass.set_bind_group(0, &self.bind_group, &[]); + rpass.set_bind_group(0, Some(&self.bind_group), &[]); rpass.draw(0..4, 0..1); } diff --git a/examples/src/msaa_line/mod.rs b/examples/src/msaa_line/mod.rs index e57a4461ab..431fe02bab 100644 --- a/examples/src/msaa_line/mod.rs +++ b/examples/src/msaa_line/mod.rs @@ -7,7 +7,7 @@ //! * Set the primitive_topology to PrimitiveTopology::LineList. //! * Vertices and Indices describe the two points that make up a line. -use std::{borrow::Cow, iter}; +use std::{borrow::Cow, iter, mem::size_of}; use bytemuck::{Pod, Zeroable}; use wgpu::util::DeviceExt; @@ -56,7 +56,7 @@ impl Example { entry_point: Some("vs_main"), compilation_options: Default::default(), buffers: &[wgpu::VertexBufferLayout { - array_stride: std::mem::size_of::() as wgpu::BufferAddress, + array_stride: size_of::() as wgpu::BufferAddress, step_mode: wgpu::VertexStepMode::Vertex, attributes: &wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x4], }], diff --git a/examples/src/repeated_compute/mod.rs b/examples/src/repeated_compute/mod.rs index 5dac9ce7c2..83dcd4099e 100644 --- a/examples/src/repeated_compute/mod.rs +++ b/examples/src/repeated_compute/mod.rs @@ -59,7 +59,7 @@ async fn compute(local_buffer: &mut [u32], context: &WgpuContext) { timestamp_writes: None, }); compute_pass.set_pipeline(&context.pipeline); - compute_pass.set_bind_group(0, &context.bind_group, &[]); + compute_pass.set_bind_group(0, Some(&context.bind_group), &[]); compute_pass.dispatch_workgroups(local_buffer.len() as u32, 1, 1); } // We finish the compute pass by dropping it. diff --git a/examples/src/shadow/mod.rs b/examples/src/shadow/mod.rs index 7047ab598c..a7edcce7e8 100644 --- a/examples/src/shadow/mod.rs +++ b/examples/src/shadow/mod.rs @@ -1,4 +1,4 @@ -use std::{borrow::Cow, f32::consts, iter, mem, ops::Range, sync::Arc}; +use std::{borrow::Cow, f32::consts, iter, mem::size_of, ops::Range, sync::Arc}; use bytemuck::{Pod, Zeroable}; use wgpu::util::{align_to, DeviceExt}; @@ -219,7 +219,7 @@ impl crate::framework::Example for Example { && device.limits().max_storage_buffers_per_shader_stage > 0; // Create the vertex and index buffers - let vertex_size = mem::size_of::(); + let vertex_size = size_of::(); let (cube_vertex_data, cube_index_data) = create_cube(); let cube_vertex_buf = Arc::new(device.create_buffer_init( &wgpu::util::BufferInitDescriptor { @@ -283,7 +283,7 @@ impl crate::framework::Example for Example { }, ]; - let entity_uniform_size = mem::size_of::() as wgpu::BufferAddress; + let entity_uniform_size = size_of::() as wgpu::BufferAddress; let num_entities = 1 + cube_descs.len() as wgpu::BufferAddress; // Make the `uniform_alignment` >= `entity_uniform_size` and aligned to `min_uniform_buffer_offset_alignment`. let uniform_alignment = { @@ -427,8 +427,7 @@ impl crate::framework::Example for Example { target_view: shadow_target_views[1].take().unwrap(), }, ]; - let light_uniform_size = - (Self::MAX_LIGHTS * mem::size_of::()) as wgpu::BufferAddress; + let light_uniform_size = (Self::MAX_LIGHTS * size_of::()) as wgpu::BufferAddress; let light_storage_buf = device.create_buffer(&wgpu::BufferDescriptor { label: None, size: light_uniform_size, @@ -454,7 +453,7 @@ impl crate::framework::Example for Example { }); let shadow_pass = { - let uniform_size = mem::size_of::() as wgpu::BufferAddress; + let uniform_size = size_of::() as wgpu::BufferAddress; // Create pipeline layout let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { @@ -548,7 +547,7 @@ impl crate::framework::Example for Example { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: wgpu::BufferSize::new( - mem::size_of::() as _, + size_of::() as _, ), }, count: None, @@ -737,7 +736,7 @@ impl crate::framework::Example for Example { for (i, light) in self.lights.iter().enumerate() { queue.write_buffer( &self.light_storage_buf, - (i * mem::size_of::()) as wgpu::BufferAddress, + (i * size_of::()) as wgpu::BufferAddress, bytemuck::bytes_of(&light.to_raw()), ); } @@ -757,7 +756,7 @@ impl crate::framework::Example for Example { // let's just copy it over to the shadow uniform buffer. encoder.copy_buffer_to_buffer( &self.light_storage_buf, - (i * mem::size_of::()) as wgpu::BufferAddress, + (i * size_of::()) as wgpu::BufferAddress, &self.shadow_pass.uniform_buf, 0, 64, @@ -780,10 +779,10 @@ impl crate::framework::Example for Example { occlusion_query_set: None, }); pass.set_pipeline(&self.shadow_pass.pipeline); - pass.set_bind_group(0, &self.shadow_pass.bind_group, &[]); + pass.set_bind_group(0, Some(&self.shadow_pass.bind_group), &[]); for entity in &self.entities { - pass.set_bind_group(1, &self.entity_bind_group, &[entity.uniform_offset]); + pass.set_bind_group(1, Some(&self.entity_bind_group), &[entity.uniform_offset]); pass.set_index_buffer(entity.index_buf.slice(..), entity.index_format); pass.set_vertex_buffer(0, entity.vertex_buf.slice(..)); pass.draw_indexed(0..entity.index_count as u32, 0, 0..1); @@ -824,10 +823,10 @@ impl crate::framework::Example for Example { occlusion_query_set: None, }); pass.set_pipeline(&self.forward_pass.pipeline); - pass.set_bind_group(0, &self.forward_pass.bind_group, &[]); + pass.set_bind_group(0, Some(&self.forward_pass.bind_group), &[]); for entity in &self.entities { - pass.set_bind_group(1, &self.entity_bind_group, &[entity.uniform_offset]); + pass.set_bind_group(1, Some(&self.entity_bind_group), &[entity.uniform_offset]); pass.set_index_buffer(entity.index_buf.slice(..), entity.index_format); pass.set_vertex_buffer(0, entity.vertex_buf.slice(..)); pass.draw_indexed(0..entity.index_count as u32, 0, 0..1); diff --git a/examples/src/skybox/mod.rs b/examples/src/skybox/mod.rs index fd5532e6d1..82e58ef6d5 100644 --- a/examples/src/skybox/mod.rs +++ b/examples/src/skybox/mod.rs @@ -1,5 +1,5 @@ use bytemuck::{Pod, Zeroable}; -use std::{borrow::Cow, f32::consts}; +use std::{borrow::Cow, f32::consts, mem::size_of}; use wgpu::{util::DeviceExt, AstcBlock, AstcChannel}; const IMAGE_SIZE: u32 = 256; @@ -231,7 +231,7 @@ impl crate::framework::Example for Example { entry_point: Some("vs_entity"), compilation_options: Default::default(), buffers: &[wgpu::VertexBufferLayout { - array_stride: std::mem::size_of::() as wgpu::BufferAddress, + array_stride: size_of::() as wgpu::BufferAddress, step_mode: wgpu::VertexStepMode::Vertex, attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3], }], @@ -451,7 +451,7 @@ impl crate::framework::Example for Example { occlusion_query_set: None, }); - rpass.set_bind_group(0, &self.bind_group, &[]); + rpass.set_bind_group(0, Some(&self.bind_group), &[]); rpass.set_pipeline(&self.entity_pipeline); for entity in self.entities.iter() { diff --git a/examples/src/srgb_blend/mod.rs b/examples/src/srgb_blend/mod.rs index 63e5e79cb5..822d95d3c4 100644 --- a/examples/src/srgb_blend/mod.rs +++ b/examples/src/srgb_blend/mod.rs @@ -202,7 +202,7 @@ impl crate::framework::Example for Example { }); rpass.push_debug_group("Prepare data for draw."); rpass.set_pipeline(&self.pipeline); - rpass.set_bind_group(0, &self.bind_group, &[]); + rpass.set_bind_group(0, Some(&self.bind_group), &[]); rpass.set_index_buffer(self.index_buf.slice(..), wgpu::IndexFormat::Uint16); rpass.set_vertex_buffer(0, self.vertex_buf.slice(..)); rpass.pop_debug_group(); diff --git a/examples/src/stencil_triangles/mod.rs b/examples/src/stencil_triangles/mod.rs index d497eccc32..bb433af11c 100644 --- a/examples/src/stencil_triangles/mod.rs +++ b/examples/src/stencil_triangles/mod.rs @@ -1,6 +1,6 @@ use bytemuck::{Pod, Zeroable}; use std::borrow::Cow; -use std::mem; +use std::mem::size_of; use wgpu::util::DeviceExt; #[repr(C)] @@ -31,7 +31,7 @@ impl crate::framework::Example for Example { _queue: &wgpu::Queue, ) -> Self { // Create the vertex and index buffers - let vertex_size = mem::size_of::(); + let vertex_size = size_of::(); let outer_vertices = [vertex(-1.0, -1.0), vertex(1.0, -1.0), vertex(0.0, 1.0)]; let mask_vertices = [vertex(-0.5, 0.0), vertex(0.0, -1.0), vertex(0.5, 0.0)]; diff --git a/examples/src/storage_texture/mod.rs b/examples/src/storage_texture/mod.rs index 76b95d09dd..a687584196 100644 --- a/examples/src/storage_texture/mod.rs +++ b/examples/src/storage_texture/mod.rs @@ -14,6 +14,8 @@ //! A lot of things aren't explained here via comments. See hello-compute and //! repeated-compute for code that is more thoroughly commented. +use std::mem::size_of_val; + #[cfg(not(target_arch = "wasm32"))] use crate::utils::output_image_native; #[cfg(target_arch = "wasm32")] @@ -64,7 +66,7 @@ async fn run(_path: Option) { let storage_texture_view = storage_texture.create_view(&wgpu::TextureViewDescriptor::default()); let output_staging_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: None, - size: std::mem::size_of_val(&texture_data[..]) as u64, + size: size_of_val(&texture_data[..]) as u64, usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, mapped_at_creation: false, }); @@ -115,7 +117,7 @@ async fn run(_path: Option) { label: None, timestamp_writes: None, }); - compute_pass.set_bind_group(0, &bind_group, &[]); + compute_pass.set_bind_group(0, Some(&bind_group), &[]); compute_pass.set_pipeline(&pipeline); compute_pass.dispatch_workgroups(TEXTURE_DIMS.0 as u32, TEXTURE_DIMS.1 as u32, 1); } diff --git a/examples/src/texture_arrays/mod.rs b/examples/src/texture_arrays/mod.rs index 785b461802..8c81950e9a 100644 --- a/examples/src/texture_arrays/mod.rs +++ b/examples/src/texture_arrays/mod.rs @@ -1,5 +1,8 @@ use bytemuck::{Pod, Zeroable}; -use std::num::{NonZeroU32, NonZeroU64}; +use std::{ + mem::size_of, + num::{NonZeroU32, NonZeroU64}, +}; use wgpu::util::DeviceExt; #[repr(C)] @@ -124,7 +127,7 @@ impl crate::framework::Example for Example { println!("Using fragment entry point '{fragment_entry_point}'"); - let vertex_size = std::mem::size_of::(); + let vertex_size = size_of::(); let vertex_data = create_vertices(); let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("Vertex Buffer"), @@ -388,12 +391,12 @@ impl crate::framework::Example for Example { rpass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); rpass.set_index_buffer(self.index_buffer.slice(..), self.index_format); if self.uniform_workaround { - rpass.set_bind_group(0, &self.bind_group, &[0]); + rpass.set_bind_group(0, Some(&self.bind_group), &[0]); rpass.draw_indexed(0..6, 0, 0..1); - rpass.set_bind_group(0, &self.bind_group, &[256]); + rpass.set_bind_group(0, Some(&self.bind_group), &[256]); rpass.draw_indexed(6..12, 0, 0..1); } else { - rpass.set_bind_group(0, &self.bind_group, &[0]); + rpass.set_bind_group(0, Some(&self.bind_group), &[0]); rpass.draw_indexed(0..12, 0, 0..1); } diff --git a/examples/src/timestamp_queries/mod.rs b/examples/src/timestamp_queries/mod.rs index 3edcd7b83c..2921ae4c85 100644 --- a/examples/src/timestamp_queries/mod.rs +++ b/examples/src/timestamp_queries/mod.rs @@ -5,7 +5,7 @@ //! * passing `wgpu::RenderPassTimestampWrites`/`wgpu::ComputePassTimestampWrites` during render/compute pass creation. //! This writes timestamps for the beginning and end of a given pass. //! (enabled with wgpu::Features::TIMESTAMP_QUERY) -//! * `wgpu::CommandEncoder::write_timestamp` writes a between any commands recorded on an encoder. +//! * `wgpu::CommandEncoder::write_timestamp` writes a timestamp between any commands recorded on an encoder. //! (enabled with wgpu::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS) //! * `wgpu::RenderPass/ComputePass::write_timestamp` writes a timestamp within commands of a render pass. //! Note that some GPU architectures do not support this. @@ -17,6 +17,8 @@ //! The period, i.e. the unit of time, of the timestamps in wgpu is undetermined and needs to be queried with `wgpu::Queue::get_timestamp_period` //! in order to get comparable results. +use std::mem::size_of; + use wgpu::util::DeviceExt; struct Queries { @@ -123,13 +125,13 @@ impl Queries { }), resolve_buffer: device.create_buffer(&wgpu::BufferDescriptor { label: Some("query resolve buffer"), - size: std::mem::size_of::() as u64 * num_queries, + size: size_of::() as u64 * num_queries, usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::QUERY_RESOLVE, mapped_at_creation: false, }), destination_buffer: device.create_buffer(&wgpu::BufferDescriptor { label: Some("query dest buffer"), - size: std::mem::size_of::() as u64 * num_queries, + size: size_of::() as u64 * num_queries, usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, mapped_at_creation: false, }), @@ -164,7 +166,7 @@ impl Queries { let timestamps = { let timestamp_view = self .destination_buffer - .slice(..(std::mem::size_of::() as wgpu::BufferAddress * self.num_queries)) + .slice(..(size_of::() as wgpu::BufferAddress * self.num_queries)) .get_mapped_range(); bytemuck::cast_slice(×tamp_view).to_vec() }; @@ -322,7 +324,7 @@ fn compute_pass( }); *next_unused_query += 2; cpass.set_pipeline(&compute_pipeline); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); cpass.dispatch_workgroups(1, 1, 1); if device .features() diff --git a/examples/src/uniform_values/mod.rs b/examples/src/uniform_values/mod.rs index f275853ba2..1ef58de09a 100644 --- a/examples/src/uniform_values/mod.rs +++ b/examples/src/uniform_values/mod.rs @@ -16,7 +16,7 @@ //! The usage of the uniform buffer within the shader itself is pretty self-explanatory given //! some understanding of WGSL. -use std::sync::Arc; +use std::{mem::size_of, sync::Arc}; // We won't bring StorageBuffer into scope as that might be too easy to confuse // with actual GPU-allocated WGPU storage buffers. use encase::ShaderType; @@ -132,7 +132,7 @@ impl WgpuContext { // (2) let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: None, - size: std::mem::size_of::() as u64, + size: size_of::() as u64, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, mapped_at_creation: false, }); @@ -327,7 +327,11 @@ async fn run(event_loop: EventLoop<()>, window: Arc) { }); render_pass.set_pipeline(&wgpu_context_ref.pipeline); // (9) - render_pass.set_bind_group(0, &wgpu_context_ref.bind_group, &[]); + render_pass.set_bind_group( + 0, + Some(&wgpu_context_ref.bind_group), + &[], + ); render_pass.draw(0..3, 0..1); } wgpu_context_ref.queue.submit(Some(encoder.finish())); diff --git a/examples/src/water/mod.rs b/examples/src/water/mod.rs index 6b4943d45e..505f5707e5 100644 --- a/examples/src/water/mod.rs +++ b/examples/src/water/mod.rs @@ -3,7 +3,7 @@ mod point_gen; use bytemuck::{Pod, Zeroable}; use glam::Vec3; use nanorand::{Rng, WyRand}; -use std::{borrow::Cow, f32::consts, iter, mem}; +use std::{borrow::Cow, f32::consts, iter, mem::size_of}; use wgpu::util::DeviceExt; /// @@ -273,12 +273,12 @@ impl crate::framework::Example for Example { queue: &wgpu::Queue, ) -> Self { // Size of one water vertex - let water_vertex_size = mem::size_of::(); + let water_vertex_size = size_of::(); let water_vertices = point_gen::HexWaterMesh::generate(SIZE).generate_points(); // Size of one terrain vertex - let terrain_vertex_size = mem::size_of::(); + let terrain_vertex_size = size_of::(); // Noise generation let terrain_noise = noise::OpenSimplex::default(); @@ -359,7 +359,7 @@ impl crate::framework::Example for Example { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: wgpu::BufferSize::new( - mem::size_of::() as _, + size_of::() as _, ), }, count: None, @@ -415,7 +415,7 @@ impl crate::framework::Example for Example { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: wgpu::BufferSize::new( - mem::size_of::() as _, + size_of::() as _ ), }, count: None, @@ -440,21 +440,21 @@ impl crate::framework::Example for Example { let water_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor { label: Some("Water Uniforms"), - size: mem::size_of::() as _, + size: size_of::() as _, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, mapped_at_creation: false, }); let terrain_normal_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor { label: Some("Normal Terrain Uniforms"), - size: mem::size_of::() as _, + size: size_of::() as _, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, mapped_at_creation: false, }); let terrain_flipped_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor { label: Some("Flipped Terrain Uniforms"), - size: mem::size_of::() as _, + size: size_of::() as _, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, mapped_at_creation: false, }); @@ -630,7 +630,7 @@ impl crate::framework::Example for Example { multiview: None, }); encoder.set_pipeline(&terrain_pipeline); - encoder.set_bind_group(0, &terrain_flipped_bind_group, &[]); + encoder.set_bind_group(0, Some(&terrain_flipped_bind_group), &[]); encoder.set_vertex_buffer(0, terrain_vertex_buf.slice(..)); encoder.draw(0..terrain_vertices.len() as u32, 0..1); encoder.finish(&wgpu::RenderBundleDescriptor::default()) @@ -712,7 +712,7 @@ impl crate::framework::Example for Example { let (water_sin, water_cos) = ((self.current_frame as f32) / 600.0).sin_cos(); queue.write_buffer( &self.water_uniform_buf, - mem::size_of::<[f32; 16]>() as wgpu::BufferAddress * 2, + size_of::<[f32; 16]>() as wgpu::BufferAddress * 2, bytemuck::cast_slice(&[water_sin, water_cos]), ); @@ -784,7 +784,7 @@ impl crate::framework::Example for Example { occlusion_query_set: None, }); rpass.set_pipeline(&self.terrain_pipeline); - rpass.set_bind_group(0, &self.terrain_normal_bind_group, &[]); + rpass.set_bind_group(0, Some(&self.terrain_normal_bind_group), &[]); rpass.set_vertex_buffer(0, self.terrain_vertex_buf.slice(..)); rpass.draw(0..self.terrain_vertex_count as u32, 0..1); } @@ -811,7 +811,7 @@ impl crate::framework::Example for Example { }); rpass.set_pipeline(&self.water_pipeline); - rpass.set_bind_group(0, &self.water_bind_group, &[]); + rpass.set_bind_group(0, Some(&self.water_bind_group), &[]); rpass.set_vertex_buffer(0, self.water_vertex_buf.slice(..)); rpass.draw(0..self.water_vertex_count as u32, 0..1); } diff --git a/lock-analyzer/Cargo.toml b/lock-analyzer/Cargo.toml new file mode 100644 index 0000000000..20eaee0f80 --- /dev/null +++ b/lock-analyzer/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "lock-analyzer" +edition.workspace = true +rust-version.workspace = true +keywords.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +authors.workspace = true + +[dependencies] +ron.workspace = true +anyhow.workspace = true + +[dependencies.serde] +workspace = true +features = ["serde_derive"] diff --git a/lock-analyzer/src/main.rs b/lock-analyzer/src/main.rs new file mode 100644 index 0000000000..18f131ab8b --- /dev/null +++ b/lock-analyzer/src/main.rs @@ -0,0 +1,254 @@ +//! Analyzer for data produced by `wgpu-core`'s `observe_locks` feature. +//! +//! When `wgpu-core`'s `observe_locks` feature is enabled, if the +//! `WGPU_CORE_LOCK_OBSERVE_DIR` environment variable is set to the +//! path of an existing directory, then every thread that acquires a +//! lock in `wgpu-core` will write its own log file to that directory. +//! You can then run this program to read those files and summarize +//! the results. +//! +//! This program also consults the `WGPU_CORE_LOCK_OBSERVE_DIR` +//! environment variable to find the log files written by `wgpu-core`. +//! +//! See `wgpu_core/src/lock/observing.rs` for a general explanation of +//! this analysis. + +use std::sync::Arc; +use std::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap}, + fmt, + path::PathBuf, +}; + +use anyhow::{Context, Result}; + +fn main() -> Result<()> { + let mut ranks: BTreeMap = BTreeMap::default(); + + let Ok(dir) = std::env::var("WGPU_CORE_LOCK_OBSERVE_DIR") else { + eprintln!(concat!( + "Please set the `WGPU_CORE_LOCK_OBSERVE_DIR` environment variable\n", + "to the path of the directory containing the files written by\n", + "`wgpu-core`'s `observe_locks` feature." + )); + anyhow::bail!("`WGPU_CORE_LOCK_OBSERVE_DIR` environment variable is not set"); + }; + let entries = + std::fs::read_dir(&dir).with_context(|| format!("failed to read directory {dir}"))?; + for entry in entries { + let entry = entry.with_context(|| format!("failed to read directory entry from {dir}"))?; + let name = PathBuf::from(&entry.file_name()); + let Some(extension) = name.extension() else { + eprintln!("Ignoring {}", name.display()); + continue; + }; + if extension != "ron" { + eprintln!("Ignoring {}", name.display()); + continue; + } + + let contents = std::fs::read(entry.path()) + .with_context(|| format!("failed to read lock observations from {}", name.display()))?; + // The addresses of `&'static Location<'static>` values could + // vary from run to run. + let mut locations: HashMap> = HashMap::default(); + for line in contents.split(|&b| b == b'\n') { + if line.is_empty() { + continue; + } + let action = ron::de::from_bytes::(line) + .with_context(|| format!("Error parsing action from {}", name.display()))?; + match action { + Action::Location { + address, + file, + line, + column, + } => { + let file = match file.split_once("src/") { + Some((_, after)) => after.to_string(), + None => file, + }; + assert!(locations + .insert(address, Arc::new(Location { file, line, column })) + .is_none()); + } + Action::Rank { + bit, + member_name, + const_name, + } => match ranks.entry(bit) { + Entry::Occupied(occupied) => { + let rank = occupied.get(); + assert_eq!(rank.member_name, member_name); + assert_eq!(rank.const_name, const_name); + } + Entry::Vacant(vacant) => { + vacant.insert(Rank { + member_name, + const_name, + acquisitions: BTreeMap::default(), + }); + } + }, + Action::Acquisition { + older_rank, + older_location, + newer_rank, + newer_location, + } => { + let older_location = locations[&older_location].clone(); + let newer_location = locations[&newer_location].clone(); + ranks + .get_mut(&older_rank) + .unwrap() + .acquisitions + .entry(newer_rank) + .or_default() + .entry(older_location) + .or_default() + .insert(newer_location); + } + } + } + } + + for older_rank in ranks.values() { + if older_rank.is_leaf() { + // We'll print leaf locks separately, below. + continue; + } + println!( + " rank {} {:?} followed by {{", + older_rank.const_name, older_rank.member_name + ); + let mut acquired_any_leaf_locks = false; + let mut first_newer = true; + for (newer_rank, locations) in &older_rank.acquisitions { + // List acquisitions of leaf locks at the end. + if ranks[newer_rank].is_leaf() { + acquired_any_leaf_locks = true; + continue; + } + if !first_newer { + println!(); + } + for (older_location, newer_locations) in locations { + if newer_locations.len() == 1 { + for newer_loc in newer_locations { + println!(" // holding {older_location} while locking {newer_loc}"); + } + } else { + println!(" // holding {older_location} while locking:"); + for newer_loc in newer_locations { + println!(" // {newer_loc}"); + } + } + } + println!(" {},", ranks[newer_rank].const_name); + first_newer = false; + } + + if acquired_any_leaf_locks { + // We checked that older_rank isn't a leaf lock, so we + // must have printed something above. + if !first_newer { + println!(); + } + println!(" // leaf lock acquisitions:"); + for newer_rank in older_rank.acquisitions.keys() { + if !ranks[newer_rank].is_leaf() { + continue; + } + println!(" {},", ranks[newer_rank].const_name); + } + } + println!(" }};"); + println!(); + } + + for older_rank in ranks.values() { + if !older_rank.is_leaf() { + continue; + } + + println!( + " rank {} {:?} followed by {{ }};", + older_rank.const_name, older_rank.member_name + ); + } + + Ok(()) +} + +#[derive(Debug, serde::Deserialize)] +#[serde(deny_unknown_fields)] +enum Action { + /// A location that we will refer to in later actions. + Location { + address: LocationAddress, + file: String, + line: u32, + column: u32, + }, + + /// A lock rank that we will refer to in later actions. + Rank { + bit: u32, + member_name: String, + const_name: String, + }, + + /// An attempt to acquire a lock while holding another lock. + Acquisition { + /// The number of the already acquired lock's rank. + older_rank: u32, + + /// The source position at which we acquired it. Specifically, + /// its `Location`'s address, as an integer. + older_location: LocationAddress, + + /// The number of the rank of the lock we are acquiring. + newer_rank: u32, + + /// The source position at which we are acquiring it. + /// Specifically, its `Location`'s address, as an integer. + newer_location: LocationAddress, + }, +} + +/// The memory address at which the `Location` was stored in the +/// observed process. +/// +/// This is not `usize` because it does not represent an address in +/// this `lock-analyzer` process. We might generate logs on a 64-bit +/// machine and analyze them on a 32-bit machine. The `u64` type is a +/// reasonable universal type for addresses on any machine. +type LocationAddress = u64; + +struct Rank { + member_name: String, + const_name: String, + acquisitions: BTreeMap, +} + +impl Rank { + fn is_leaf(&self) -> bool { + self.acquisitions.is_empty() + } +} + +type LocationSet = BTreeMap, BTreeSet>>; + +#[derive(Eq, Ord, PartialEq, PartialOrd)] +struct Location { + file: String, + line: u32, + column: u32, +} + +impl fmt::Display for Location { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}:{}", self.file, self.line) + } +} diff --git a/naga-cli/src/bin/naga.rs b/naga-cli/src/bin/naga.rs index d97d96de76..e28519cb06 100644 --- a/naga-cli/src/bin/naga.rs +++ b/naga-cli/src/bin/naga.rs @@ -465,6 +465,7 @@ fn run() -> anyhow::Result<()> { let Parsed { mut module, input_text, + language, } = parse_input(input_path, input, ¶ms)?; // Include debugging information if requested. @@ -477,6 +478,7 @@ fn run() -> anyhow::Result<()> { params.spv_out.debug_info = Some(naga::back::spv::DebugInfo { source_code: input_text, file_name: input_path, + language, }) } else { eprintln!( @@ -579,6 +581,7 @@ fn run() -> anyhow::Result<()> { struct Parsed { module: naga::Module, input_text: Option, + language: naga::back::spv::SourceLanguage, } fn parse_input(input_path: &Path, input: Vec, params: &Parameters) -> anyhow::Result { @@ -593,16 +596,26 @@ fn parse_input(input_path: &Path, input: Vec, params: &Parameters) -> anyhow .context("Unable to determine --input-kind from filename")?, }; - let (module, input_text) = match input_kind { - InputKind::Bincode => (bincode::deserialize(&input)?, None), - InputKind::SpirV => { - naga::front::spv::parse_u8_slice(&input, ¶ms.spv_in).map(|m| (m, None))? - } + Ok(match input_kind { + InputKind::Bincode => Parsed { + module: bincode::deserialize(&input)?, + input_text: None, + language: naga::back::spv::SourceLanguage::Unknown, + }, + InputKind::SpirV => Parsed { + module: naga::front::spv::parse_u8_slice(&input, ¶ms.spv_in)?, + input_text: None, + language: naga::back::spv::SourceLanguage::Unknown, + }, InputKind::Wgsl => { let input = String::from_utf8(input)?; let result = naga::front::wgsl::parse_str(&input); match result { - Ok(v) => (v, Some(input)), + Ok(v) => Parsed { + module: v, + input_text: Some(input), + language: naga::back::spv::SourceLanguage::WGSL, + }, Err(ref e) => { let message = anyhow!( "Could not parse WGSL:\n{}", @@ -631,8 +644,8 @@ fn parse_input(input_path: &Path, input: Vec, params: &Parameters) -> anyhow }; let input = String::from_utf8(input)?; let mut parser = naga::front::glsl::Frontend::default(); - ( - parser + Parsed { + module: parser .parse( &naga::front::glsl::Options { stage: shader_stage.0, @@ -649,12 +662,11 @@ fn parse_input(input_path: &Path, input: Vec, params: &Parameters) -> anyhow error.emit_to_writer_with_path(&mut writer, &input, filename); std::process::exit(1); }), - Some(input), - ) + input_text: Some(input), + language: naga::back::spv::SourceLanguage::GLSL, + } } - }; - - Ok(Parsed { module, input_text }) + }) } fn write_output( @@ -833,7 +845,11 @@ fn bulk_validate(args: Args, params: &Parameters) -> anyhow::Result<()> { let path = Path::new(&input_path); let input = fs::read(path)?; - let Parsed { module, input_text } = match parse_input(path, input, params) { + let Parsed { + module, + input_text, + language: _, + } = match parse_input(path, input, params) { Ok(parsed) => parsed, Err(error) => { invalid.push(input_path.clone()); diff --git a/naga/Cargo.toml b/naga/Cargo.toml index 3458f4d394..19912a36a8 100644 --- a/naga/Cargo.toml +++ b/naga/Cargo.toml @@ -77,11 +77,11 @@ indexmap.workspace = true log = "0.4" spirv = { version = "0.3", optional = true } thiserror.workspace = true -serde = { version = "1.0.208", features = ["derive"], optional = true } +serde = { version = "1.0.210", features = ["derive"], optional = true } petgraph = { version = "0.6", optional = true } pp-rs = { version = "0.2.1", optional = true } hexf-parse = { version = "0.2.1", optional = true } -unicode-xid = { version = "0.2.5", optional = true } +unicode-xid = { version = "0.2.6", optional = true } [build-dependencies] cfg_aliases.workspace = true @@ -89,6 +89,7 @@ cfg_aliases.workspace = true [dev-dependencies] diff = "0.1" env_logger.workspace = true +itertools.workspace = true # This _cannot_ have a version specified. If it does, crates.io will look # for a version of the package on crates when we publish naga. Path dependencies # are allowed through though. diff --git a/naga/src/back/glsl/features.rs b/naga/src/back/glsl/features.rs index a42b26785e..6780cfd990 100644 --- a/naga/src/back/glsl/features.rs +++ b/naga/src/back/glsl/features.rs @@ -399,7 +399,7 @@ impl<'a, W> Writer<'a, W> { | StorageFormat::Rg16Float | StorageFormat::Rgb10a2Uint | StorageFormat::Rgb10a2Unorm - | StorageFormat::Rg11b10UFloat + | StorageFormat::Rg11b10Ufloat | StorageFormat::R64Uint | StorageFormat::Rg32Uint | StorageFormat::Rg32Sint diff --git a/naga/src/back/glsl/mod.rs b/naga/src/back/glsl/mod.rs index 8f8ec41ef5..054e41e778 100644 --- a/naga/src/back/glsl/mod.rs +++ b/naga/src/back/glsl/mod.rs @@ -47,7 +47,7 @@ pub use features::Features; use crate::{ back::{self, Baked}, - proc::{self, NameKey}, + proc::{self, ExpressionKindTracker, NameKey}, valid, Handle, ShaderStage, TypeInner, }; use features::FeaturesManager; @@ -498,6 +498,9 @@ pub enum Error { Custom(String), #[error("overrides should not be present at this stage")] Override, + /// [`crate::Sampling::First`] is unsupported. + #[error("`{:?}` sampling is unsupported", crate::Sampling::First)] + FirstSamplingNotSupported, } /// Binary operation with a different logic on the GLSL side. @@ -1534,7 +1537,7 @@ impl<'a, W: Write> Writer<'a, W> { // here, regardless of the version. if let Some(sampling) = sampling { if emit_interpolation_and_auxiliary { - if let Some(qualifier) = glsl_sampling(sampling) { + if let Some(qualifier) = glsl_sampling(sampling)? { write!(self.out, "{qualifier} ")?; } } @@ -1584,6 +1587,7 @@ impl<'a, W: Write> Writer<'a, W> { info, expressions: &func.expressions, named_expressions: &func.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&func.expressions), }; self.named_expressions.clear(); @@ -4831,14 +4835,15 @@ const fn glsl_interpolation(interpolation: crate::Interpolation) -> &'static str } /// Return the GLSL auxiliary qualifier for the given sampling value. -const fn glsl_sampling(sampling: crate::Sampling) -> Option<&'static str> { +const fn glsl_sampling(sampling: crate::Sampling) -> BackendResult> { use crate::Sampling as S; - match sampling { - S::Center => None, + Ok(match sampling { + S::First => return Err(Error::FirstSamplingNotSupported), + S::Center | S::Either => None, S::Centroid => Some("centroid"), S::Sample => Some("sample"), - } + }) } /// Helper function that returns the glsl dimension string of [`ImageDimension`](crate::ImageDimension) @@ -4881,7 +4886,7 @@ fn glsl_storage_format(format: crate::StorageFormat) -> Result<&'static str, Err Sf::Rgba8Sint => "rgba8i", Sf::Rgb10a2Uint => "rgb10_a2ui", Sf::Rgb10a2Unorm => "rgb10_a2", - Sf::Rg11b10UFloat => "r11f_g11f_b10f", + Sf::Rg11b10Ufloat => "r11f_g11f_b10f", Sf::R64Uint => "r64ui", Sf::Rg32Uint => "rg32ui", Sf::Rg32Sint => "rg32i", diff --git a/naga/src/back/hlsl/conv.rs b/naga/src/back/hlsl/conv.rs index 17fe0dbaad..9c3b82db5d 100644 --- a/naga/src/back/hlsl/conv.rs +++ b/naga/src/back/hlsl/conv.rs @@ -132,7 +132,7 @@ impl crate::StorageFormat { Self::Rg8Sint | Self::Rg16Sint | Self::Rg32Uint => "int2", Self::Rg8Uint | Self::Rg16Uint | Self::Rg32Sint => "uint2", - Self::Rg11b10UFloat => "float3", + Self::Rg11b10Ufloat => "float3", Self::Rgba16Float | Self::Rgba32Float => "float4", Self::Rgba8Unorm | Self::Bgra8Unorm | Self::Rgba16Unorm | Self::Rgb10a2Unorm => { @@ -202,7 +202,7 @@ impl crate::Sampling { /// Return the HLSL auxiliary qualifier for the given sampling value. pub(super) const fn to_hlsl_str(self) -> Option<&'static str> { match self { - Self::Center => None, + Self::Center | Self::First | Self::Either => None, Self::Centroid => Some("centroid"), Self::Sample => Some("sample"), } diff --git a/naga/src/back/hlsl/writer.rs b/naga/src/back/hlsl/writer.rs index 047a7d1ff1..bae797ca8a 100644 --- a/naga/src/back/hlsl/writer.rs +++ b/naga/src/back/hlsl/writer.rs @@ -8,7 +8,7 @@ use super::{ }; use crate::{ back::{self, Baked}, - proc::{self, NameKey}, + proc::{self, ExpressionKindTracker, NameKey}, valid, Handle, Module, Scalar, ScalarKind, ShaderStage, TypeInner, }; use std::{fmt, mem}; @@ -346,6 +346,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { info, expressions: &function.expressions, named_expressions: &function.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&function.expressions), }; let name = self.names[&NameKey::Function(handle)].clone(); @@ -386,6 +387,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { info, expressions: &ep.function.expressions, named_expressions: &ep.function.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&ep.function.expressions), }; self.write_wrapped_functions(module, &ctx)?; diff --git a/naga/src/back/mod.rs b/naga/src/back/mod.rs index 352adc37ec..58c7fa02cb 100644 --- a/naga/src/back/mod.rs +++ b/naga/src/back/mod.rs @@ -3,6 +3,8 @@ Backend functions that export shader [`Module`](super::Module)s into binary and */ #![allow(dead_code)] // can be dead if none of the enabled backends need it +use crate::proc::ExpressionKindTracker; + #[cfg(dot_out)] pub mod dot; #[cfg(glsl_out)] @@ -118,6 +120,8 @@ pub struct FunctionCtx<'a> { pub expressions: &'a crate::Arena, /// Map of expressions that have associated variable names pub named_expressions: &'a crate::NamedExpressions, + /// For constness checks + pub expr_kind_tracker: ExpressionKindTracker, } impl FunctionCtx<'_> { diff --git a/naga/src/back/msl/mod.rs b/naga/src/back/msl/mod.rs index 626475debc..96dd142a50 100644 --- a/naga/src/back/msl/mod.rs +++ b/naga/src/back/msl/mod.rs @@ -627,6 +627,7 @@ impl ResolvedInterpolation { (I::Linear, S::Centroid) => Self::CentroidNoPerspective, (I::Linear, S::Sample) => Self::SampleNoPerspective, (I::Flat, _) => Self::Flat, + _ => unreachable!(), } } diff --git a/naga/src/back/msl/writer.rs b/naga/src/back/msl/writer.rs index 656e398963..17b7e29d9c 100644 --- a/naga/src/back/msl/writer.rs +++ b/naga/src/back/msl/writer.rs @@ -376,6 +376,11 @@ pub struct Writer { /// Set of (struct type, struct field index) denoting which fields require /// padding inserted **before** them (i.e. between fields at index - 1 and index) struct_member_pads: FastHashSet<(Handle, u32)>, + + /// Name of the loop reachability macro. + /// + /// See `emit_loop_reachable_macro` for details. + loop_reachable_macro_name: String, } impl crate::Scalar { @@ -665,6 +670,7 @@ impl Writer { #[cfg(test)] put_block_stack_pointers: Default::default(), struct_member_pads: FastHashSet::default(), + loop_reachable_macro_name: String::default(), } } @@ -675,6 +681,128 @@ impl Writer { self.out } + /// Define a macro to invoke before loops, to defeat MSL infinite loop + /// reasoning. + /// + /// If we haven't done so already, emit the definition of a preprocessor + /// macro to be invoked before each loop in the generated MSL, to ensure + /// that the MSL compiler's optimizations do not remove bounds checks. + /// + /// Only the first call to this function for a given module actually causes + /// the macro definition to be written. Subsequent loops can simply use the + /// prior macro definition, since macros aren't block-scoped. + /// + /// # What is this trying to solve? + /// + /// In Metal Shading Language, an infinite loop has undefined behavior. + /// (This rule is inherited from C++14.) This means that, if the MSL + /// compiler determines that a given loop will never exit, it may assume + /// that it is never reached. It may thus assume that any conditions + /// sufficient to cause the loop to be reached must be false. Like many + /// optimizing compilers, MSL uses this kind of analysis to establish limits + /// on the range of values variables involved in those conditions might + /// hold. + /// + /// For example, suppose the MSL compiler sees the code: + /// + /// ```ignore + /// if (i >= 10) { + /// while (true) { } + /// } + /// ``` + /// + /// It will recognize that the `while` loop will never terminate, conclude + /// that it must be unreachable, and thus infer that, if this code is + /// reached, then `i < 10` at that point. + /// + /// Now suppose that, at some point where `i` has the same value as above, + /// the compiler sees the code: + /// + /// ```ignore + /// if (i < 10) { + /// a[i] = 1; + /// } + /// ``` + /// + /// Because the compiler is confident that `i < 10`, it will make the + /// assignment to `a[i]` unconditional, rewriting this code as, simply: + /// + /// ```ignore + /// a[i] = 1; + /// ``` + /// + /// If that `if` condition was injected by Naga to implement a bounds check, + /// the MSL compiler's optimizations could allow out-of-bounds array + /// accesses to occur. + /// + /// Naga cannot feasibly anticipate whether the MSL compiler will determine + /// that a loop is infinite, so an attacker could craft a Naga module + /// containing an infinite loop protected by conditions that cause the Metal + /// compiler to remove bounds checks that Naga injected elsewhere in the + /// function. + /// + /// This rewrite could occur even if the conditional assignment appears + /// *before* the `while` loop, as long as `i < 10` by the time the loop is + /// reached. This would allow the attacker to save the results of + /// unauthorized reads somewhere accessible before entering the infinite + /// loop. But even worse, the MSL compiler has been observed to simply + /// delete the infinite loop entirely, so that even code dominated by the + /// loop becomes reachable. This would make the attack even more flexible, + /// since shaders that would appear to never terminate would actually exit + /// nicely, after having stolen data from elsewhere in the GPU address + /// space. + /// + /// Ideally, Naga would prevent UB entirely via some means that persuades + /// the MSL compiler that no loop Naga generates is infinite. One approach + /// would be to add inline assembly to each loop that is annotated as + /// potentially branching out of the loop, but which in fact generates no + /// instructions. Unfortunately, inline assembly is not handled correctly by + /// some Metal device drivers. Further experimentation hasn't produced a + /// satisfactory approach. + /// + /// Instead, we accept that the MSL compiler may determine that some loops + /// are infinite, and focus instead on preventing the range analysis from + /// being affected. We transform *every* loop into something like this: + /// + /// ```ignore + /// if (volatile bool unpredictable = true; unpredictable) + /// while (true) { } + /// ``` + /// + /// Since the `volatile` qualifier prevents the compiler from assuming that + /// the `if` condition is true, it cannot be sure the infinite loop is + /// reached, and thus it cannot assume the entire structure is unreachable. + /// This prevents the range analysis impact described above. + /// + /// Unfortunately, what makes this a kludge, not a hack, is that this + /// solution leaves the GPU executing a pointless conditional branch, at + /// runtime, before each loop. There's no part of the system that has a + /// global enough view to be sure that `unpredictable` is true, and remove + /// it from the code. + /// + /// To make our output a bit more legible, we pull the condition out into a + /// preprocessor macro defined at the top of the module. + /// + /// This approach is also used by Chromium WebGPU's Dawn shader compiler, as of + /// . + fn emit_loop_reachable_macro(&mut self) -> BackendResult { + if !self.loop_reachable_macro_name.is_empty() { + return Ok(()); + } + + self.loop_reachable_macro_name = self.namer.call("LOOP_IS_REACHABLE"); + let loop_reachable_volatile_name = self.namer.call("unpredictable_jump_over_loop"); + writeln!( + self.out, + "#define {} if (volatile bool {} = true; {})", + self.loop_reachable_macro_name, + loop_reachable_volatile_name, + loop_reachable_volatile_name, + )?; + + Ok(()) + } + fn put_call_parameters( &mut self, parameters: impl Iterator>, @@ -2949,10 +3077,15 @@ impl Writer { ref continuing, break_if, } => { + self.emit_loop_reachable_macro()?; if !continuing.is_empty() || break_if.is_some() { let gate_name = self.namer.call("loop_init"); writeln!(self.out, "{level}bool {gate_name} = true;")?; - writeln!(self.out, "{level}while(true) {{")?; + writeln!( + self.out, + "{level}{} while(true) {{", + self.loop_reachable_macro_name, + )?; let lif = level.next(); let lcontinuing = lif.next(); writeln!(self.out, "{lif}if (!{gate_name}) {{")?; @@ -2967,7 +3100,11 @@ impl Writer { writeln!(self.out, "{lif}}}")?; writeln!(self.out, "{lif}{gate_name} = false;")?; } else { - writeln!(self.out, "{level}while(true) {{")?; + writeln!( + self.out, + "{level}{} while(true) {{", + self.loop_reachable_macro_name, + )?; } self.put_block(level.next(), body, context)?; writeln!(self.out, "{level}}}")?; @@ -3419,6 +3556,7 @@ impl Writer { &[CLAMPED_LOD_LOAD_PREFIX], &mut self.names, ); + self.loop_reachable_macro_name.clear(); self.struct_member_pads.clear(); writeln!( diff --git a/naga/src/back/pipeline_constants.rs b/naga/src/back/pipeline_constants.rs index bb5fb6fb01..6c6cf4c8e5 100644 --- a/naga/src/back/pipeline_constants.rs +++ b/naga/src/back/pipeline_constants.rs @@ -289,6 +289,7 @@ fn process_function( &mut local_expression_kind_tracker, &mut emitter, &mut block, + false, ); for (old_h, mut expr, span) in expressions.drain() { diff --git a/naga/src/back/spv/instructions.rs b/naga/src/back/spv/instructions.rs index 517038a91b..279c9ab7d5 100644 --- a/naga/src/back/spv/instructions.rs +++ b/naga/src/back/spv/instructions.rs @@ -1203,7 +1203,7 @@ impl From for spirv::ImageFormat { Sf::Bgra8Unorm => Self::Unknown, Sf::Rgb10a2Uint => Self::Rgb10a2ui, Sf::Rgb10a2Unorm => Self::Rgb10A2, - Sf::Rg11b10UFloat => Self::R11fG11fB10f, + Sf::Rg11b10Ufloat => Self::R11fG11fB10f, Sf::R64Uint => Self::R64ui, Sf::Rg32Uint => Self::Rg32ui, Sf::Rg32Sint => Self::Rg32i, diff --git a/naga/src/back/spv/mod.rs b/naga/src/back/spv/mod.rs index 91407561ab..32bd1fcecf 100644 --- a/naga/src/back/spv/mod.rs +++ b/naga/src/back/spv/mod.rs @@ -16,7 +16,7 @@ mod selection; mod subgroup; mod writer; -pub use spirv::Capability; +pub use spirv::{Capability, SourceLanguage}; use crate::arena::{Handle, HandleVec}; use crate::proc::{BoundsCheckPolicies, TypeResolution}; @@ -89,6 +89,7 @@ impl IdGenerator { pub struct DebugInfo<'a> { pub source_code: &'a str, pub file_name: &'a std::path::Path, + pub language: SourceLanguage, } /// A SPIR-V block to which we are still adding instructions. diff --git a/naga/src/back/spv/writer.rs b/naga/src/back/spv/writer.rs index d1c1e82a20..678dcb4246 100644 --- a/naga/src/back/spv/writer.rs +++ b/naga/src/back/spv/writer.rs @@ -1511,7 +1511,12 @@ impl Writer { } match sampling { // Center sampling is the default in SPIR-V. - None | Some(crate::Sampling::Center) => (), + None + | Some( + crate::Sampling::Center + | crate::Sampling::First + | crate::Sampling::Either, + ) => (), Some(crate::Sampling::Centroid) => { self.decorate(id, Decoration::Centroid, &[]); } @@ -1962,7 +1967,7 @@ impl Writer { source_file_id, }); self.debugs.append(&mut Instruction::source_auto_continued( - spirv::SourceLanguage::Unknown, + debug_info.language, 0, &debug_info_inner, )); diff --git a/naga/src/back/wgsl/writer.rs b/naga/src/back/wgsl/writer.rs index f0e15e2159..a424ffc882 100644 --- a/naga/src/back/wgsl/writer.rs +++ b/naga/src/back/wgsl/writer.rs @@ -1,7 +1,7 @@ use super::Error; use crate::{ back::{self, Baked}, - proc::{self, NameKey}, + proc::{self, ExpressionKindTracker, NameKey}, valid, Handle, Module, ShaderStage, TypeInner, }; use std::fmt::Write; @@ -166,6 +166,7 @@ impl Writer { info: fun_info, expressions: &function.expressions, named_expressions: &function.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&function.expressions), }; // Write the function @@ -193,6 +194,7 @@ impl Writer { info: info.get_entry_point(index), expressions: &ep.function.expressions, named_expressions: &ep.function.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&ep.function.expressions), }; self.write_function(module, &ep.function, &func_ctx)?; @@ -1136,8 +1138,14 @@ impl Writer { func_ctx: &back::FunctionCtx, name: &str, ) -> BackendResult { + // Some functions are marked as const, but are not yet implemented as constant expression + let quantifier = if func_ctx.expr_kind_tracker.is_impl_const(handle) { + "const" + } else { + "let" + }; // Write variable name - write!(self.out, "let {name}")?; + write!(self.out, "{quantifier} {name}")?; if self.flags.contains(WriterFlags::EXPLICIT_TYPES) { write!(self.out, ": ")?; let ty = &func_ctx.info[handle].ty; @@ -2036,7 +2044,7 @@ const fn storage_format_str(format: crate::StorageFormat) -> &'static str { Sf::Bgra8Unorm => "bgra8unorm", Sf::Rgb10a2Uint => "rgb10a2uint", Sf::Rgb10a2Unorm => "rgb10a2unorm", - Sf::Rg11b10UFloat => "rg11b10float", + Sf::Rg11b10Ufloat => "rg11b10float", Sf::R64Uint => "r64uint", Sf::Rg32Uint => "rg32uint", Sf::Rg32Sint => "rg32sint", @@ -2075,6 +2083,8 @@ const fn sampling_str(sampling: crate::Sampling) -> &'static str { S::Center => "", S::Centroid => "centroid", S::Sample => "sample", + S::First => "first", + S::Either => "either", } } diff --git a/naga/src/front/atomic_upgrade.rs b/naga/src/front/atomic_upgrade.rs index c59969ace9..d2fbc12893 100644 --- a/naga/src/front/atomic_upgrade.rs +++ b/naga/src/front/atomic_upgrade.rs @@ -44,12 +44,8 @@ pub enum Error { MultiMemberStruct, #[error("encountered unsupported global initializer in an atomic variable")] GlobalInitUnsupported, -} - -impl From for crate::front::spv::Error { - fn from(source: Error) -> Self { - crate::front::spv::Error::AtomicUpgradeError(source) - } + #[error("expected to find a global variable")] + GlobalVariableMissing, } #[derive(Clone, Default)] diff --git a/naga/src/front/glsl/parser/types.rs b/naga/src/front/glsl/parser/types.rs index 422c5ff200..2f3c18d3e4 100644 --- a/naga/src/front/glsl/parser/types.rs +++ b/naga/src/front/glsl/parser/types.rs @@ -397,7 +397,7 @@ fn map_image_format(word: &str) -> Option { "rgba16f" => Sf::Rgba16Float, "rg32f" => Sf::Rg32Float, "rg16f" => Sf::Rg16Float, - "r11f_g11f_b10f" => Sf::Rg11b10UFloat, + "r11f_g11f_b10f" => Sf::Rg11b10Ufloat, "r32f" => Sf::R32Float, "r16f" => Sf::R16Float, "rgba16" => Sf::Rgba16Unorm, diff --git a/naga/src/front/spv/convert.rs b/naga/src/front/spv/convert.rs index 98552ccdb4..2619eb7f44 100644 --- a/naga/src/front/spv/convert.rs +++ b/naga/src/front/spv/convert.rs @@ -104,7 +104,7 @@ pub(super) fn map_image_format(word: spirv::Word) -> Result Ok(crate::StorageFormat::Rgba8Sint), Some(spirv::ImageFormat::Rgb10a2ui) => Ok(crate::StorageFormat::Rgb10a2Uint), Some(spirv::ImageFormat::Rgb10A2) => Ok(crate::StorageFormat::Rgb10a2Unorm), - Some(spirv::ImageFormat::R11fG11fB10f) => Ok(crate::StorageFormat::Rg11b10UFloat), + Some(spirv::ImageFormat::R11fG11fB10f) => Ok(crate::StorageFormat::Rg11b10Ufloat), Some(spirv::ImageFormat::R64ui) => Ok(crate::StorageFormat::R64Uint), Some(spirv::ImageFormat::Rg32ui) => Ok(crate::StorageFormat::Rg32Uint), Some(spirv::ImageFormat::Rg32i) => Ok(crate::StorageFormat::Rg32Sint), diff --git a/naga/src/front/spv/error.rs b/naga/src/front/spv/error.rs index 42df9d807b..219048e102 100644 --- a/naga/src/front/spv/error.rs +++ b/naga/src/front/spv/error.rs @@ -159,3 +159,9 @@ impl Error { String::from_utf8(writer.into_inner()).unwrap() } } + +impl From for Error { + fn from(source: atomic_upgrade::Error) -> Self { + Error::AtomicUpgradeError(source) + } +} diff --git a/naga/src/front/spv/mod.rs b/naga/src/front/spv/mod.rs index d27a0b9416..2e5c918d30 100644 --- a/naga/src/front/spv/mod.rs +++ b/naga/src/front/spv/mod.rs @@ -565,11 +565,15 @@ impl<'a> BlockContext<'a> { /// Descend into the expression with the given handle, locating a contained /// global variable. /// + /// If the expression doesn't actually refer to something in a global + /// variable, we can't upgrade its type in a way that Naga validation would + /// pass, so reject the input instead. + /// /// This is used to track atomic upgrades. fn get_contained_global_variable( &self, mut handle: Handle, - ) -> Option> { + ) -> Result, Error> { log::debug!("\t\tlocating global variable in {handle:?}"); loop { match self.expressions[handle] { @@ -583,14 +587,16 @@ impl<'a> BlockContext<'a> { } crate::Expression::GlobalVariable(h) => { log::debug!("\t\t found {h:?}"); - return Some(h); + return Ok(h); } _ => { break; } } } - None + Err(Error::AtomicUpgradeError( + crate::front::atomic_upgrade::Error::GlobalVariableMissing, + )) } } @@ -1323,6 +1329,109 @@ impl> Frontend { )) } + /// Return the Naga [`Expression`] for `pointer_id`, and its referent [`Type`]. + /// + /// Return a [`Handle`] for a Naga [`Expression`] that holds the value of + /// the SPIR-V instruction `pointer_id`, along with the [`Type`] to which it + /// is a pointer. + /// + /// This may entail spilling `pointer_id`'s value to a temporary: + /// see [`get_expr_handle`]'s documentation. + /// + /// [`Expression`]: crate::Expression + /// [`Type`]: crate::Type + /// [`Handle`]: crate::Handle + /// [`get_expr_handle`]: Frontend::get_expr_handle + fn get_exp_and_base_ty_handles( + &self, + pointer_id: spirv::Word, + ctx: &mut BlockContext, + emitter: &mut crate::proc::Emitter, + block: &mut crate::Block, + body_idx: usize, + ) -> Result<(Handle, Handle), Error> { + log::trace!("\t\t\tlooking up pointer expr {:?}", pointer_id); + let p_lexp_handle; + let p_lexp_ty_id; + { + let lexp = self.lookup_expression.lookup(pointer_id)?; + p_lexp_handle = self.get_expr_handle(pointer_id, lexp, ctx, emitter, block, body_idx); + p_lexp_ty_id = lexp.type_id; + }; + + log::trace!("\t\t\tlooking up pointer type {pointer_id:?}"); + let p_ty = self.lookup_type.lookup(p_lexp_ty_id)?; + let p_ty_base_id = p_ty.base_id.ok_or(Error::InvalidAccessType(p_lexp_ty_id))?; + + log::trace!("\t\t\tlooking up pointer base type {p_ty_base_id:?} of {p_ty:?}"); + let p_base_ty = self.lookup_type.lookup(p_ty_base_id)?; + + Ok((p_lexp_handle, p_base_ty.handle)) + } + + #[allow(clippy::too_many_arguments)] + fn parse_atomic_expr_with_value( + &mut self, + inst: Instruction, + emitter: &mut crate::proc::Emitter, + ctx: &mut BlockContext, + block: &mut crate::Block, + block_id: spirv::Word, + body_idx: usize, + atomic_function: crate::AtomicFunction, + ) -> Result<(), Error> { + inst.expect(7)?; + let start = self.data_offset; + let result_type_id = self.next()?; + let result_id = self.next()?; + let pointer_id = self.next()?; + let _scope_id = self.next()?; + let _memory_semantics_id = self.next()?; + let value_id = self.next()?; + let span = self.span_from_with_op(start); + + let (p_lexp_handle, p_base_ty_handle) = + self.get_exp_and_base_ty_handles(pointer_id, ctx, emitter, block, body_idx)?; + + log::trace!("\t\t\tlooking up value expr {value_id:?}"); + let v_lexp_handle = self.lookup_expression.lookup(value_id)?.handle; + + block.extend(emitter.finish(ctx.expressions)); + // Create an expression for our result + let r_lexp_handle = { + let expr = crate::Expression::AtomicResult { + ty: p_base_ty_handle, + comparison: false, + }; + let handle = ctx.expressions.append(expr, span); + self.lookup_expression.insert( + result_id, + LookupExpression { + handle, + type_id: result_type_id, + block_id, + }, + ); + handle + }; + emitter.start(ctx.expressions); + + // Create a statement for the op itself + let stmt = crate::Statement::Atomic { + pointer: p_lexp_handle, + fun: atomic_function, + value: v_lexp_handle, + result: Some(r_lexp_handle), + }; + block.push(stmt, span); + + // Store any associated global variables so we can upgrade their types later + self.upgrade_atomics + .insert(ctx.get_contained_global_variable(p_lexp_handle)?); + + Ok(()) + } + /// Add the next SPIR-V block's contents to `block_ctx`. /// /// Except for the function's entry block, `block_id` should be the label of @@ -3985,35 +4094,91 @@ impl> Frontend { ); emitter.start(ctx.expressions); } - Op::AtomicIIncrement => { + Op::AtomicLoad => { inst.expect(6)?; let start = self.data_offset; - let span = self.span_from_with_op(start); let result_type_id = self.next()?; let result_id = self.next()?; let pointer_id = self.next()?; let _scope_id = self.next()?; let _memory_semantics_id = self.next()?; + let span = self.span_from_with_op(start); log::trace!("\t\t\tlooking up expr {:?}", pointer_id); - let (p_lexp_handle, p_lexp_ty_id) = { - let lexp = self.lookup_expression.lookup(pointer_id)?; - let handle = get_expr_handle!(pointer_id, &lexp); - (handle, lexp.type_id) + let p_lexp_handle = + get_expr_handle!(pointer_id, self.lookup_expression.lookup(pointer_id)?); + + // Create an expression for our result + let expr = crate::Expression::Load { + pointer: p_lexp_handle, + }; + let handle = ctx.expressions.append(expr, span); + self.lookup_expression.insert( + result_id, + LookupExpression { + handle, + type_id: result_type_id, + block_id, + }, + ); + + // Store any associated global variables so we can upgrade their types later + self.upgrade_atomics + .insert(ctx.get_contained_global_variable(p_lexp_handle)?); + } + Op::AtomicStore => { + inst.expect(5)?; + let start = self.data_offset; + let pointer_id = self.next()?; + let _scope_id = self.next()?; + let _memory_semantics_id = self.next()?; + let value_id = self.next()?; + let span = self.span_from_with_op(start); + + log::trace!("\t\t\tlooking up pointer expr {:?}", pointer_id); + let p_lexp_handle = + get_expr_handle!(pointer_id, self.lookup_expression.lookup(pointer_id)?); + + log::trace!("\t\t\tlooking up value expr {:?}", pointer_id); + let v_lexp_handle = + get_expr_handle!(value_id, self.lookup_expression.lookup(value_id)?); + + block.extend(emitter.finish(ctx.expressions)); + // Create a statement for the op itself + let stmt = crate::Statement::Store { + pointer: p_lexp_handle, + value: v_lexp_handle, }; + block.push(stmt, span); + emitter.start(ctx.expressions); - log::trace!("\t\t\tlooking up type {pointer_id:?}"); - let p_ty = self.lookup_type.lookup(p_lexp_ty_id)?; - let p_ty_base_id = - p_ty.base_id.ok_or(Error::InvalidAccessType(p_lexp_ty_id))?; + // Store any associated global variables so we can upgrade their types later + self.upgrade_atomics + .insert(ctx.get_contained_global_variable(p_lexp_handle)?); + } + Op::AtomicIIncrement | Op::AtomicIDecrement => { + inst.expect(6)?; + let start = self.data_offset; + let result_type_id = self.next()?; + let result_id = self.next()?; + let pointer_id = self.next()?; + let _scope_id = self.next()?; + let _memory_semantics_id = self.next()?; + let span = self.span_from_with_op(start); - log::trace!("\t\t\tlooking up base type {p_ty_base_id:?} of {p_ty:?}"); - let p_base_ty = self.lookup_type.lookup(p_ty_base_id)?; + let (p_exp_h, p_base_ty_h) = self.get_exp_and_base_ty_handles( + pointer_id, + ctx, + &mut emitter, + &mut block, + body_idx, + )?; + block.extend(emitter.finish(ctx.expressions)); // Create an expression for our result let r_lexp_handle = { let expr = crate::Expression::AtomicResult { - ty: p_base_ty.handle, + ty: p_base_ty_h, comparison: false, }; let handle = ctx.expressions.append(expr, span); @@ -4027,22 +4192,26 @@ impl> Frontend { ); handle }; + emitter.start(ctx.expressions); - // Create a literal "1" since WGSL lacks an increment operation + // Create a literal "1" to use as our value let one_lexp_handle = make_index_literal( ctx, 1, &mut block, &mut emitter, - p_base_ty.handle, - p_lexp_ty_id, + p_base_ty_h, + result_type_id, span, )?; // Create a statement for the op itself let stmt = crate::Statement::Atomic { - pointer: p_lexp_handle, - fun: crate::AtomicFunction::Add, + pointer: p_exp_h, + fun: match inst.op { + Op::AtomicIIncrement => crate::AtomicFunction::Add, + _ => crate::AtomicFunction::Subtract, + }, value: one_lexp_handle, result: Some(r_lexp_handle), }; @@ -4050,8 +4219,38 @@ impl> Frontend { // Store any associated global variables so we can upgrade their types later self.upgrade_atomics - .extend(ctx.get_contained_global_variable(p_lexp_handle)); + .insert(ctx.get_contained_global_variable(p_exp_h)?); } + Op::AtomicExchange + | Op::AtomicIAdd + | Op::AtomicISub + | Op::AtomicSMin + | Op::AtomicUMin + | Op::AtomicSMax + | Op::AtomicUMax + | Op::AtomicAnd + | Op::AtomicOr + | Op::AtomicXor => self.parse_atomic_expr_with_value( + inst, + &mut emitter, + ctx, + &mut block, + block_id, + body_idx, + match inst.op { + Op::AtomicExchange => crate::AtomicFunction::Exchange { compare: None }, + Op::AtomicIAdd => crate::AtomicFunction::Add, + Op::AtomicISub => crate::AtomicFunction::Subtract, + Op::AtomicSMin => crate::AtomicFunction::Min, + Op::AtomicUMin => crate::AtomicFunction::Min, + Op::AtomicSMax => crate::AtomicFunction::Max, + Op::AtomicUMax => crate::AtomicFunction::Max, + Op::AtomicAnd => crate::AtomicFunction::And, + Op::AtomicOr => crate::AtomicFunction::InclusiveOr, + _ => crate::AtomicFunction::ExclusiveOr, + }, + )?, + _ => { return Err(Error::UnsupportedInstruction(self.state, inst.op)); } @@ -5710,33 +5909,48 @@ mod test { ]; let _ = super::parse_u8_slice(&bin, &Default::default()).unwrap(); } +} - #[cfg(all(feature = "wgsl-in", wgsl_out))] - #[test] - fn atomic_i_inc() { +#[cfg(all(test, feature = "wgsl-in", wgsl_out))] +mod test_atomic { + fn atomic_test(bytes: &[u8]) { let _ = env_logger::builder().is_test(true).try_init(); - let bytes = include_bytes!("../../../tests/in/spv/atomic_i_increment.spv"); - let m = super::parse_u8_slice(bytes, &Default::default()).unwrap(); - let mut validator = crate::valid::Validator::new( + let m = crate::front::spv::parse_u8_slice(bytes, &Default::default()).unwrap(); + + let mut wgsl = String::new(); + let mut should_panic = false; + + for vflags in [ + crate::valid::ValidationFlags::all(), crate::valid::ValidationFlags::empty(), - Default::default(), - ); - let info = match validator.validate(&m) { - Err(e) => { - log::error!("{}", e.emit_to_string("")); - return; - } - Ok(i) => i, - }; - let wgsl = - crate::back::wgsl::write_string(&m, &info, crate::back::wgsl::WriterFlags::empty()) - .unwrap(); - log::info!("atomic_i_increment:\n{wgsl}"); + ] { + let mut validator = crate::valid::Validator::new(vflags, Default::default()); + match validator.validate(&m) { + Err(e) => { + log::error!("SPIR-V validation {}", e.emit_to_string("")); + should_panic = true; + } + Ok(i) => { + wgsl = crate::back::wgsl::write_string( + &m, + &i, + crate::back::wgsl::WriterFlags::empty(), + ) + .unwrap(); + log::info!("wgsl-out:\n{wgsl}"); + break; + } + }; + } + + if should_panic { + panic!("validation error"); + } let m = match crate::front::wgsl::parse_str(&wgsl) { Ok(m) => m, Err(e) => { - log::error!("{}", e.emit_to_string(&wgsl)); + log::error!("round trip WGSL validation {}", e.emit_to_string(&wgsl)); panic!("invalid module"); } }; @@ -5747,4 +5961,35 @@ mod test { panic!("invalid generated wgsl"); } } + + #[test] + fn atomic_i_inc() { + atomic_test(include_bytes!( + "../../../tests/in/spv/atomic_i_increment.spv" + )); + } + + #[test] + fn atomic_load_and_store() { + atomic_test(include_bytes!( + "../../../tests/in/spv/atomic_load_and_store.spv" + )); + } + + #[test] + fn atomic_exchange() { + atomic_test(include_bytes!("../../../tests/in/spv/atomic_exchange.spv")); + } + + #[test] + fn atomic_i_decrement() { + atomic_test(include_bytes!( + "../../../tests/in/spv/atomic_i_decrement.spv" + )); + } + + #[test] + fn atomic_i_add_and_sub() { + atomic_test(include_bytes!("../../../tests/in/spv/atomic_i_add_sub.spv")); + } } diff --git a/naga/src/front/wgsl/error.rs b/naga/src/front/wgsl/error.rs index bfaba48946..3d4ac62183 100644 --- a/naga/src/front/wgsl/error.rs +++ b/naga/src/front/wgsl/error.rs @@ -10,6 +10,9 @@ use std::ops::Range; use termcolor::{ColorChoice, NoColor, StandardStream}; use thiserror::Error; +#[cfg(test)] +use std::mem::size_of; + #[derive(Clone, Debug)] pub struct ParseError { message: String, @@ -144,7 +147,7 @@ pub enum InvalidAssignmentType { } #[derive(Clone, Debug)] -pub enum Error<'a> { +pub(crate) enum Error<'a> { Unexpected(Span, ExpectedToken<'a>), UnexpectedComponents(Span), UnexpectedOperationInConstContext(Span), @@ -154,8 +157,8 @@ pub enum Error<'a> { BadTexture(Span), BadTypeCast { span: Span, - from_type: String, - to_type: String, + from_type: Box, + to_type: Box, }, BadTextureSampleType { span: Span, @@ -188,8 +191,8 @@ pub enum Error<'a> { TypeNotInferable(Span), InitializationTypeMismatch { name: Span, - expected: String, - got: String, + expected: Box, + got: Box, }, DeclMissingTypeAndInit(Span), MissingAttribute(&'static str, Span), @@ -232,7 +235,7 @@ pub enum Error<'a> { /// name is `decl` has an identifier at `reference` whose definition is /// the next declaration in the cycle. The last pair's `reference` is /// the same identifier as `ident`, above. - path: Vec<(Span, Span)>, + path: Box<[(Span, Span)]>, }, InvalidSwitchValue { uint: bool, @@ -251,30 +254,41 @@ pub enum Error<'a> { ExpectedNonNegative(Span), ExpectedPositiveArrayLength(Span), MissingWorkgroupSize(Span), - ConstantEvaluatorError(ConstantEvaluatorError, Span), - AutoConversion { - dest_span: Span, - dest_type: String, - source_span: Span, - source_type: String, - }, - AutoConversionLeafScalar { - dest_span: Span, - dest_scalar: String, - source_span: Span, - source_type: String, - }, - ConcretizationFailed { - expr_span: Span, - expr_type: String, - scalar: String, - inner: ConstantEvaluatorError, - }, + ConstantEvaluatorError(Box, Span), + AutoConversion(Box), + AutoConversionLeafScalar(Box), + ConcretizationFailed(Box), ExceededLimitForNestedBraces { span: Span, limit: u8, }, PipelineConstantIDValue(Span), + NotBool(Span), + ConstAssertFailed(Span), +} + +#[derive(Clone, Debug)] +pub(crate) struct AutoConversionError { + pub dest_span: Span, + pub dest_type: Box, + pub source_span: Span, + pub source_type: Box, +} + +#[derive(Clone, Debug)] +pub(crate) struct AutoConversionLeafScalarError { + pub dest_span: Span, + pub dest_scalar: Box, + pub source_span: Span, + pub source_type: Box, +} + +#[derive(Clone, Debug)] +pub(crate) struct ConcretizationFailedError { + pub expr_span: Span, + pub expr_type: Box, + pub scalar: Box, + pub inner: ConstantEvaluatorError, } impl<'a> Error<'a> { @@ -738,45 +752,55 @@ impl<'a> Error<'a> { )], notes: vec![], }, - Error::AutoConversion { dest_span, ref dest_type, source_span, ref source_type } => ParseError { - message: format!("automatic conversions cannot convert `{source_type}` to `{dest_type}`"), - labels: vec![ - ( - dest_span, - format!("a value of type {dest_type} is required here").into(), - ), - ( - source_span, - format!("this expression has type {source_type}").into(), - ) - ], - notes: vec![], + Error::AutoConversion(ref error) => { + // destructuring ensures all fields are handled + let AutoConversionError { dest_span, ref dest_type, source_span, ref source_type } = **error; + ParseError { + message: format!("automatic conversions cannot convert `{source_type}` to `{dest_type}`"), + labels: vec![ + ( + dest_span, + format!("a value of type {dest_type} is required here").into(), + ), + ( + source_span, + format!("this expression has type {source_type}").into(), + ) + ], + notes: vec![], + } }, - Error::AutoConversionLeafScalar { dest_span, ref dest_scalar, source_span, ref source_type } => ParseError { - message: format!("automatic conversions cannot convert elements of `{source_type}` to `{dest_scalar}`"), - labels: vec![ - ( - dest_span, - format!("a value with elements of type {dest_scalar} is required here").into(), - ), - ( - source_span, - format!("this expression has type {source_type}").into(), - ) - ], - notes: vec![], + Error::AutoConversionLeafScalar(ref error) => { + let AutoConversionLeafScalarError { dest_span, ref dest_scalar, source_span, ref source_type } = **error; + ParseError { + message: format!("automatic conversions cannot convert elements of `{source_type}` to `{dest_scalar}`"), + labels: vec![ + ( + dest_span, + format!("a value with elements of type {dest_scalar} is required here").into(), + ), + ( + source_span, + format!("this expression has type {source_type}").into(), + ) + ], + notes: vec![], + } }, - Error::ConcretizationFailed { expr_span, ref expr_type, ref scalar, ref inner } => ParseError { - message: format!("failed to convert expression to a concrete type: {}", inner), - labels: vec![ - ( - expr_span, - format!("this expression has type {}", expr_type).into(), - ) - ], - notes: vec![ - format!("the expression should have been converted to have {} scalar type", scalar), - ] + Error::ConcretizationFailed(ref error) => { + let ConcretizationFailedError { expr_span, ref expr_type, ref scalar, ref inner } = **error; + ParseError { + message: format!("failed to convert expression to a concrete type: {}", inner), + labels: vec![ + ( + expr_span, + format!("this expression has type {}", expr_type).into(), + ) + ], + notes: vec![ + format!("the expression should have been converted to have {} scalar type", scalar), + ] + } }, Error::ExceededLimitForNestedBraces { span, limit } => ParseError { message: "brace nesting limit reached".into(), @@ -793,6 +817,27 @@ impl<'a> Error<'a> { )], notes: vec![], }, + Error::NotBool(span) => ParseError { + message: "must be a const-expression that resolves to a bool".to_string(), + labels: vec![( + span, + "must resolve to bool".into(), + )], + notes: vec![], + }, + Error::ConstAssertFailed(span) => ParseError { + message: "const_assert failure".to_string(), + labels: vec![( + span, + "evaluates to false".into(), + )], + notes: vec![], + }, } } } + +#[test] +fn test_error_size() { + assert!(size_of::>() <= 48); +} diff --git a/naga/src/front/wgsl/index.rs b/naga/src/front/wgsl/index.rs index 593405508f..bc0af670ff 100644 --- a/naga/src/front/wgsl/index.rs +++ b/naga/src/front/wgsl/index.rs @@ -20,13 +20,16 @@ impl<'a> Index<'a> { // While doing so, reject conflicting definitions. let mut globals = FastHashMap::with_capacity_and_hasher(tu.decls.len(), Default::default()); for (handle, decl) in tu.decls.iter() { - let ident = decl_ident(decl); - let name = ident.name; - if let Some(old) = globals.insert(name, handle) { - return Err(Error::Redefinition { - previous: decl_ident(&tu.decls[old]).span, - current: ident.span, - }); + if let Some(ident) = decl_ident(decl) { + let name = ident.name; + if let Some(old) = globals.insert(name, handle) { + return Err(Error::Redefinition { + previous: decl_ident(&tu.decls[old]) + .expect("decl should have ident for redefinition") + .span, + current: ident.span, + }); + } } } @@ -130,7 +133,7 @@ impl<'a> DependencySolver<'a, '_> { return if dep_id == id { // A declaration refers to itself directly. Err(Error::RecursiveDeclaration { - ident: decl_ident(decl).span, + ident: decl_ident(decl).expect("decl should have ident").span, usage: dep.usage, }) } else { @@ -146,14 +149,19 @@ impl<'a> DependencySolver<'a, '_> { .unwrap_or(0); Err(Error::CyclicDeclaration { - ident: decl_ident(&self.module.decls[dep_id]).span, + ident: decl_ident(&self.module.decls[dep_id]) + .expect("decl should have ident") + .span, path: self.path[start_at..] .iter() .map(|curr_dep| { let curr_id = curr_dep.decl; let curr_decl = &self.module.decls[curr_id]; - (decl_ident(curr_decl).span, curr_dep.usage) + ( + decl_ident(curr_decl).expect("decl should have ident").span, + curr_dep.usage, + ) }) .collect(), }) @@ -182,13 +190,14 @@ impl<'a> DependencySolver<'a, '_> { } } -const fn decl_ident<'a>(decl: &ast::GlobalDecl<'a>) -> ast::Ident<'a> { +const fn decl_ident<'a>(decl: &ast::GlobalDecl<'a>) -> Option> { match decl.kind { - ast::GlobalDeclKind::Fn(ref f) => f.name, - ast::GlobalDeclKind::Var(ref v) => v.name, - ast::GlobalDeclKind::Const(ref c) => c.name, - ast::GlobalDeclKind::Override(ref o) => o.name, - ast::GlobalDeclKind::Struct(ref s) => s.name, - ast::GlobalDeclKind::Type(ref t) => t.name, + ast::GlobalDeclKind::Fn(ref f) => Some(f.name), + ast::GlobalDeclKind::Var(ref v) => Some(v.name), + ast::GlobalDeclKind::Const(ref c) => Some(c.name), + ast::GlobalDeclKind::Override(ref o) => Some(o.name), + ast::GlobalDeclKind::Struct(ref s) => Some(s.name), + ast::GlobalDeclKind::Type(ref t) => Some(t.name), + ast::GlobalDeclKind::ConstAssert(_) => None, } } diff --git a/naga/src/front/wgsl/lower/construction.rs b/naga/src/front/wgsl/lower/construction.rs index de0d11d227..dff2e9d6e4 100644 --- a/naga/src/front/wgsl/lower/construction.rs +++ b/naga/src/front/wgsl/lower/construction.rs @@ -530,11 +530,11 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { // Bad conversion (type cast) (Components::One { span, ty_inner, .. }, constructor) => { - let from_type = ty_inner.to_wgsl(&ctx.module.to_ctx()); + let from_type = ty_inner.to_wgsl(&ctx.module.to_ctx()).into(); return Err(Error::BadTypeCast { span, from_type, - to_type: constructor.to_error_string(ctx), + to_type: constructor.to_error_string(ctx).into(), }); } @@ -578,8 +578,13 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { Constructor::Type(ty) } ast::ConstructorType::PartialVector { size } => Constructor::PartialVector { size }, - ast::ConstructorType::Vector { size, scalar } => { - let ty = ctx.ensure_type_exists(scalar.to_inner_vector(size)); + ast::ConstructorType::Vector { size, ty, ty_span } => { + let ty = self.resolve_ast_type(ty, &mut ctx.as_global())?; + let scalar = match ctx.module.types[ty].inner { + crate::TypeInner::Scalar(sc) => sc, + _ => return Err(Error::UnknownScalarType(ty_span)), + }; + let ty = ctx.ensure_type_exists(crate::TypeInner::Vector { size, scalar }); Constructor::Type(ty) } ast::ConstructorType::PartialMatrix { columns, rows } => { @@ -588,13 +593,22 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { ast::ConstructorType::Matrix { rows, columns, - width, + ty, + ty_span, } => { - let ty = ctx.ensure_type_exists(crate::TypeInner::Matrix { - columns, - rows, - scalar: crate::Scalar::float(width), - }); + let ty = self.resolve_ast_type(ty, &mut ctx.as_global())?; + let scalar = match ctx.module.types[ty].inner { + crate::TypeInner::Scalar(sc) => sc, + _ => return Err(Error::UnknownScalarType(ty_span)), + }; + let ty = match scalar.kind { + crate::ScalarKind::Float => ctx.ensure_type_exists(crate::TypeInner::Matrix { + columns, + rows, + scalar, + }), + _ => return Err(Error::BadMatrixScalarKind(ty_span, scalar)), + }; Constructor::Type(ty) } ast::ConstructorType::PartialArray => Constructor::PartialArray, diff --git a/naga/src/front/wgsl/lower/conversion.rs b/naga/src/front/wgsl/lower/conversion.rs index 2a2690f096..0b2f884639 100644 --- a/naga/src/front/wgsl/lower/conversion.rs +++ b/naga/src/front/wgsl/lower/conversion.rs @@ -1,5 +1,8 @@ //! WGSL's automatic conversions for abstract types. +use crate::front::wgsl::error::{ + AutoConversionError, AutoConversionLeafScalarError, ConcretizationFailedError, +}; use crate::{Handle, Span}; impl<'source, 'temp, 'out> super::ExpressionContext<'source, 'temp, 'out> { @@ -39,15 +42,17 @@ impl<'source, 'temp, 'out> super::ExpressionContext<'source, 'temp, 'out> { Some(scalars) => scalars, None => { let gctx = &self.module.to_ctx(); - let source_type = expr_resolution.to_wgsl(gctx); - let dest_type = goal_ty.to_wgsl(gctx); - - return Err(super::Error::AutoConversion { - dest_span: goal_span, - dest_type, - source_span: expr_span, - source_type, - }); + let source_type = expr_resolution.to_wgsl(gctx).into(); + let dest_type = goal_ty.to_wgsl(gctx).into(); + + return Err(super::Error::AutoConversion(Box::new( + AutoConversionError { + dest_span: goal_span, + dest_type, + source_span: expr_span, + source_type, + }, + ))); } }; @@ -79,13 +84,13 @@ impl<'source, 'temp, 'out> super::ExpressionContext<'source, 'temp, 'out> { let make_error = || { let gctx = &self.module.to_ctx(); - let source_type = expr_resolution.to_wgsl(gctx); - super::Error::AutoConversionLeafScalar { + let source_type = expr_resolution.to_wgsl(gctx).into(); + super::Error::AutoConversionLeafScalar(Box::new(AutoConversionLeafScalarError { dest_span: goal_span, - dest_scalar: goal_scalar.to_wgsl(), + dest_scalar: goal_scalar.to_wgsl().into(), source_span: expr_span, source_type, - } + })) }; let expr_scalar = match expr_inner.scalar() { @@ -116,7 +121,7 @@ impl<'source, 'temp, 'out> super::ExpressionContext<'source, 'temp, 'out> { if let crate::TypeInner::Array { .. } = *expr_inner { self.as_const_evaluator() .cast_array(expr, goal_scalar, expr_span) - .map_err(|err| super::Error::ConstantEvaluatorError(err, expr_span)) + .map_err(|err| super::Error::ConstantEvaluatorError(err.into(), expr_span)) } else { let cast = crate::Expression::As { expr, @@ -254,12 +259,12 @@ impl<'source, 'temp, 'out> super::ExpressionContext<'source, 'temp, 'out> { // it has one. Also, avoid holding the borrow of `inner` // across the call to `cast_array`. let expr_type = &self.typifier()[expr]; - super::Error::ConcretizationFailed { + super::Error::ConcretizationFailed(Box::new(ConcretizationFailedError { expr_span, - expr_type: expr_type.to_wgsl(&self.module.to_ctx()), - scalar: concretized.to_wgsl(), + expr_type: expr_type.to_wgsl(&self.module.to_ctx()).into(), + scalar: concretized.to_wgsl().into(), inner: err, - } + })) })?; } } diff --git a/naga/src/front/wgsl/lower/mod.rs b/naga/src/front/wgsl/lower/mod.rs index aef4c4bf1e..80131d8a2a 100644 --- a/naga/src/front/wgsl/lower/mod.rs +++ b/naga/src/front/wgsl/lower/mod.rs @@ -98,7 +98,7 @@ impl<'source> GlobalContext<'source, '_, '_> { types: self.types, module: self.module, const_typifier: self.const_typifier, - expr_type: ExpressionContextType::Constant, + expr_type: ExpressionContextType::Constant(None), global_expression_kind_tracker: self.global_expression_kind_tracker, } } @@ -160,7 +160,8 @@ pub struct StatementContext<'source, 'temp, 'out> { /// /// [`LocalVariable`]: crate::Expression::LocalVariable /// [`FunctionArgument`]: crate::Expression::FunctionArgument - local_table: &'temp mut FastHashMap, Typed>>, + local_table: + &'temp mut FastHashMap, Declared>>>, const_typifier: &'temp mut Typifier, typifier: &'temp mut Typifier, @@ -184,6 +185,32 @@ pub struct StatementContext<'source, 'temp, 'out> { } impl<'a, 'temp> StatementContext<'a, 'temp, '_> { + fn as_const<'t>( + &'t mut self, + block: &'t mut crate::Block, + emitter: &'t mut Emitter, + ) -> ExpressionContext<'a, 't, '_> + where + 'temp: 't, + { + ExpressionContext { + globals: self.globals, + types: self.types, + ast_expressions: self.ast_expressions, + const_typifier: self.const_typifier, + global_expression_kind_tracker: self.global_expression_kind_tracker, + module: self.module, + expr_type: ExpressionContextType::Constant(Some(LocalExpressionContext { + local_table: self.local_table, + function: self.function, + block, + emitter, + typifier: self.typifier, + local_expression_kind_tracker: self.local_expression_kind_tracker, + })), + } + } + fn as_expression<'t>( &'t mut self, block: &'t mut crate::Block, @@ -199,7 +226,7 @@ impl<'a, 'temp> StatementContext<'a, 'temp, '_> { const_typifier: self.const_typifier, global_expression_kind_tracker: self.global_expression_kind_tracker, module: self.module, - expr_type: ExpressionContextType::Runtime(RuntimeExpressionContext { + expr_type: ExpressionContextType::Runtime(LocalExpressionContext { local_table: self.local_table, function: self.function, block, @@ -235,12 +262,12 @@ impl<'a, 'temp> StatementContext<'a, 'temp, '_> { } } -pub struct RuntimeExpressionContext<'temp, 'out> { +pub struct LocalExpressionContext<'temp, 'out> { /// A map from [`ast::Local`] handles to the Naga expressions we've built for them. /// /// This is always [`StatementContext::local_table`] for the /// enclosing statement; see that documentation for details. - local_table: &'temp FastHashMap, Typed>>, + local_table: &'temp FastHashMap, Declared>>>, function: &'out mut crate::Function, block: &'temp mut crate::Block, @@ -259,18 +286,18 @@ pub enum ExpressionContextType<'temp, 'out> { /// We are lowering to an arbitrary runtime expression, to be /// included in a function's body. /// - /// The given [`RuntimeExpressionContext`] holds information about local + /// The given [`LocalExpressionContext`] holds information about local /// variables, arguments, and other definitions available only to runtime /// expressions, not constant or override expressions. - Runtime(RuntimeExpressionContext<'temp, 'out>), + Runtime(LocalExpressionContext<'temp, 'out>), /// We are lowering to a constant expression, to be included in the module's /// constant expression arena. /// - /// Everything constant expressions are allowed to refer to is - /// available in the [`ExpressionContext`], so this variant - /// carries no further information. - Constant, + /// Everything global constant expressions are allowed to refer to is + /// available in the [`ExpressionContext`], but local constant expressions can + /// also refer to other + Constant(Option>), /// We are lowering to an override expression, to be included in the module's /// constant expression arena. @@ -352,7 +379,7 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { ast_expressions: self.ast_expressions, const_typifier: self.const_typifier, module: self.module, - expr_type: ExpressionContextType::Constant, + expr_type: ExpressionContextType::Constant(None), global_expression_kind_tracker: self.global_expression_kind_tracker, } } @@ -376,8 +403,19 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { rctx.local_expression_kind_tracker, rctx.emitter, rctx.block, + false, ), - ExpressionContextType::Constant => ConstantEvaluator::for_wgsl_module( + ExpressionContextType::Constant(Some(ref mut rctx)) => { + ConstantEvaluator::for_wgsl_function( + self.module, + &mut rctx.function.expressions, + rctx.local_expression_kind_tracker, + rctx.emitter, + rctx.block, + true, + ) + } + ExpressionContextType::Constant(None) => ConstantEvaluator::for_wgsl_module( self.module, self.global_expression_kind_tracker, false, @@ -397,7 +435,7 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { ) -> Result, Error<'source>> { let mut eval = self.as_const_evaluator(); eval.try_eval_and_append(expr, span) - .map_err(|e| Error::ConstantEvaluatorError(e, span)) + .map_err(|e| Error::ConstantEvaluatorError(e.into(), span)) } fn const_access(&self, handle: Handle) -> Option { @@ -412,15 +450,27 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { .eval_expr_to_u32_from(handle, &ctx.function.expressions) .ok() } - ExpressionContextType::Constant => self.module.to_ctx().eval_expr_to_u32(handle).ok(), + ExpressionContextType::Constant(Some(ref ctx)) => { + assert!(ctx.local_expression_kind_tracker.is_const(handle)); + self.module + .to_ctx() + .eval_expr_to_u32_from(handle, &ctx.function.expressions) + .ok() + } + ExpressionContextType::Constant(None) => { + self.module.to_ctx().eval_expr_to_u32(handle).ok() + } ExpressionContextType::Override => None, } } fn get_expression_span(&self, handle: Handle) -> Span { match self.expr_type { - ExpressionContextType::Runtime(ref ctx) => ctx.function.expressions.get_span(handle), - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Runtime(ref ctx) + | ExpressionContextType::Constant(Some(ref ctx)) => { + ctx.function.expressions.get_span(handle) + } + ExpressionContextType::Constant(None) | ExpressionContextType::Override => { self.module.global_expressions.get_span(handle) } } @@ -428,20 +478,35 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { fn typifier(&self) -> &Typifier { match self.expr_type { - ExpressionContextType::Runtime(ref ctx) => ctx.typifier, - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Runtime(ref ctx) + | ExpressionContextType::Constant(Some(ref ctx)) => ctx.typifier, + ExpressionContextType::Constant(None) | ExpressionContextType::Override => { self.const_typifier } } } + fn local( + &mut self, + local: &Handle, + span: Span, + ) -> Result>, Error<'source>> { + match self.expr_type { + ExpressionContextType::Runtime(ref ctx) => Ok(ctx.local_table[local].runtime()), + ExpressionContextType::Constant(Some(ref ctx)) => ctx.local_table[local] + .const_time() + .ok_or(Error::UnexpectedOperationInConstContext(span)), + _ => Err(Error::UnexpectedOperationInConstContext(span)), + } + } + fn runtime_expression_ctx( &mut self, span: Span, - ) -> Result<&mut RuntimeExpressionContext<'temp, 'out>, Error<'source>> { + ) -> Result<&mut LocalExpressionContext<'temp, 'out>, Error<'source>> { match self.expr_type { ExpressionContextType::Runtime(ref mut ctx) => Ok(ctx), - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Constant(_) | ExpressionContextType::Override => { Err(Error::UnexpectedOperationInConstContext(span)) } } @@ -480,7 +545,7 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { } // This means a `gather` operation appeared in a constant expression. // This error refers to the `gather` itself, not its "component" argument. - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Constant(_) | ExpressionContextType::Override => { Err(Error::UnexpectedOperationInConstContext(gather_span)) } } @@ -505,8 +570,9 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { // except that this lets the borrow checker see that it's okay // to also borrow self.module.types mutably below. let typifier = match self.expr_type { - ExpressionContextType::Runtime(ref ctx) => ctx.typifier, - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Runtime(ref ctx) + | ExpressionContextType::Constant(Some(ref ctx)) => ctx.typifier, + ExpressionContextType::Constant(None) | ExpressionContextType::Override => { &*self.const_typifier } }; @@ -542,7 +608,8 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { let typifier; let expressions; match self.expr_type { - ExpressionContextType::Runtime(ref mut ctx) => { + ExpressionContextType::Runtime(ref mut ctx) + | ExpressionContextType::Constant(Some(ref mut ctx)) => { resolve_ctx = ResolveContext::with_locals( self.module, &ctx.function.local_variables, @@ -551,7 +618,7 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { typifier = &mut *ctx.typifier; expressions = &ctx.function.expressions; } - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Constant(None) | ExpressionContextType::Override => { resolve_ctx = ResolveContext::with_locals(self.module, &empty_arena, &[]); typifier = self.const_typifier; expressions = &self.module.global_expressions; @@ -643,18 +710,20 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { span: Span, ) -> Result, Error<'source>> { match self.expr_type { - ExpressionContextType::Runtime(ref mut rctx) => { + ExpressionContextType::Runtime(ref mut rctx) + | ExpressionContextType::Constant(Some(ref mut rctx)) => { rctx.block .extend(rctx.emitter.finish(&rctx.function.expressions)); } - ExpressionContextType::Constant | ExpressionContextType::Override => {} + ExpressionContextType::Constant(None) | ExpressionContextType::Override => {} } let result = self.append_expression(expression, span); match self.expr_type { - ExpressionContextType::Runtime(ref mut rctx) => { + ExpressionContextType::Runtime(ref mut rctx) + | ExpressionContextType::Constant(Some(ref mut rctx)) => { rctx.emitter.start(&rctx.function.expressions); } - ExpressionContextType::Constant | ExpressionContextType::Override => {} + ExpressionContextType::Constant(None) | ExpressionContextType::Override => {} } result } @@ -718,6 +787,30 @@ impl<'source> ArgumentContext<'_, 'source> { } } +#[derive(Debug, Copy, Clone)] +enum Declared { + /// Value declared as const + Const(T), + + /// Value declared as non-const + Runtime(T), +} + +impl Declared { + fn runtime(self) -> T { + match self { + Declared::Const(t) | Declared::Runtime(t) => t, + } + } + + fn const_time(self) -> Option { + match self { + Declared::Const(t) => Some(t), + Declared::Runtime(_) => None, + } + } +} + /// WGSL type annotations on expressions, types, values, etc. /// /// Naga and WGSL types are very close, but Naga lacks WGSL's `ref` types, which @@ -814,7 +907,13 @@ impl Components { *comp = Self::letter_component(ch).ok_or(Error::BadAccessor(name_span))?; } - Ok(Components::Swizzle { size, pattern }) + if name.chars().all(|c| matches!(c, 'x' | 'y' | 'z' | 'w')) + || name.chars().all(|c| matches!(c, 'r' | 'g' | 'b' | 'a')) + { + Ok(Components::Swizzle { size, pattern }) + } else { + Err(Error::BadAccessor(name_span)) + } } } @@ -935,31 +1034,41 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { ctx.globals.insert(f.name.name, lowered_decl); } ast::GlobalDeclKind::Var(ref v) => { - let ty = self.resolve_ast_type(v.ty, &mut ctx)?; - - let init; - if let Some(init_ast) = v.init { - let mut ectx = ctx.as_override(); - let lowered = self.expression_for_abstract(init_ast, &mut ectx)?; - let ty_res = crate::proc::TypeResolution::Handle(ty); - let converted = ectx - .try_automatic_conversions(lowered, &ty_res, v.name.span) - .map_err(|error| match error { - Error::AutoConversion { - dest_span: _, - dest_type, - source_span: _, - source_type, - } => Error::InitializationTypeMismatch { + let explicit_ty = + v.ty.map(|ast| self.resolve_ast_type(ast, &mut ctx)) + .transpose()?; + + let mut ectx = ctx.as_override(); + + let ty; + let initializer; + match (v.init, explicit_ty) { + (Some(init), Some(explicit_ty)) => { + let init = self.expression_for_abstract(init, &mut ectx)?; + let ty_res = crate::proc::TypeResolution::Handle(explicit_ty); + let init = ectx + .try_automatic_conversions(init, &ty_res, v.name.span) + .map_err(|error| match error { + Error::AutoConversion(e) => Error::InitializationTypeMismatch { name: v.name.span, - expected: dest_type, - got: source_type, + expected: e.dest_type, + got: e.source_type, }, other => other, })?; - init = Some(converted); - } else { - init = None; + ty = explicit_ty; + initializer = Some(init); + } + (Some(init), None) => { + let concretized = self.expression(init, &mut ectx)?; + ty = ectx.register_type(concretized)?; + initializer = Some(concretized); + } + (None, Some(explicit_ty)) => { + ty = explicit_ty; + initializer = None; + } + (None, None) => return Err(Error::DeclMissingTypeAndInit(v.name.span)), } let binding = if let Some(ref binding) = v.binding { @@ -977,7 +1086,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { space: v.space, binding, ty, - init, + init: initializer, }, span, ); @@ -997,15 +1106,10 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { init = ectx .try_automatic_conversions(init, &explicit_ty_res, c.name.span) .map_err(|error| match error { - Error::AutoConversion { - dest_span: _, - dest_type, - source_span: _, - source_type, - } => Error::InitializationTypeMismatch { + Error::AutoConversion(e) => Error::InitializationTypeMismatch { name: c.name.span, - expected: dest_type, - got: source_type, + expected: e.dest_type, + got: e.source_type, }, other => other, })?; @@ -1061,8 +1165,8 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { let gctx = ctx.module.to_ctx(); return Err(Error::InitializationTypeMismatch { name: o.name.span, - expected: explicit_ty.to_wgsl(&gctx), - got: inferred_type.to_wgsl(&gctx), + expected: explicit_ty.to_wgsl(&gctx).into(), + got: inferred_type.to_wgsl(&gctx).into(), }); } } @@ -1100,6 +1204,20 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { ctx.globals .insert(alias.name.name, LoweredGlobalDecl::Type(ty)); } + ast::GlobalDeclKind::ConstAssert(condition) => { + let condition = self.expression(condition, &mut ctx.as_const())?; + + let span = ctx.module.global_expressions.get_span(condition); + match ctx + .module + .to_ctx() + .eval_expr_to_bool_from(condition, &ctx.module.global_expressions) + { + Some(true) => Ok(()), + Some(false) => Err(Error::ConstAssertFailed(span)), + _ => Err(Error::NotBool(span)), + }?; + } } } @@ -1130,7 +1248,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { let ty = self.resolve_ast_type(arg.ty, ctx)?; let expr = expressions .append(crate::Expression::FunctionArgument(i as u32), arg.name.span); - local_table.insert(arg.handle, Typed::Plain(expr)); + local_table.insert(arg.handle, Declared::Runtime(Typed::Plain(expr))); named_expressions.insert(expr, (arg.name.name.to_string(), arg.name.span)); local_expression_kind_tracker.insert(expr, crate::proc::ExpressionKind::Runtime); @@ -1271,14 +1389,15 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { let gctx = &ctx.module.to_ctx(); return Err(Error::InitializationTypeMismatch { name: l.name.span, - expected: ty.to_wgsl(gctx), - got: init_ty.to_wgsl(gctx), + expected: ty.to_wgsl(gctx).into(), + got: init_ty.to_wgsl(gctx).into(), }); } } block.extend(emitter.finish(&ctx.function.expressions)); - ctx.local_table.insert(l.handle, Typed::Plain(value)); + ctx.local_table + .insert(l.handle, Declared::Runtime(Typed::Plain(value))); ctx.named_expressions .insert(value, (l.name.name.to_string(), l.name.span)); @@ -1302,15 +1421,10 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { let init = ectx .try_automatic_conversions(init, &ty_res, v.name.span) .map_err(|error| match error { - Error::AutoConversion { - dest_span: _, - dest_type, - source_span: _, - source_type, - } => Error::InitializationTypeMismatch { + Error::AutoConversion(e) => Error::InitializationTypeMismatch { name: v.name.span, - expected: dest_type, - got: source_type, + expected: e.dest_type, + got: e.source_type, }, other => other, })?; @@ -1365,7 +1479,8 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { Span::UNDEFINED, )?; block.extend(emitter.finish(&ctx.function.expressions)); - ctx.local_table.insert(v.handle, Typed::Reference(handle)); + ctx.local_table + .insert(v.handle, Declared::Runtime(Typed::Reference(handle))); match initializer { Some(initializer) => crate::Statement::Store { @@ -1375,6 +1490,41 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { None => return Ok(()), } } + ast::LocalDecl::Const(ref c) => { + let mut emitter = Emitter::default(); + emitter.start(&ctx.function.expressions); + + let ectx = &mut ctx.as_const(block, &mut emitter); + + let mut init = self.expression_for_abstract(c.init, ectx)?; + + if let Some(explicit_ty) = c.ty { + let explicit_ty = + self.resolve_ast_type(explicit_ty, &mut ectx.as_global())?; + let explicit_ty_res = crate::proc::TypeResolution::Handle(explicit_ty); + init = ectx + .try_automatic_conversions(init, &explicit_ty_res, c.name.span) + .map_err(|error| match error { + Error::AutoConversion(error) => Error::InitializationTypeMismatch { + name: c.name.span, + expected: error.dest_type, + got: error.source_type, + }, + other => other, + })?; + } else { + init = ectx.concretize(init)?; + ectx.register_type(init)?; + } + + block.extend(emitter.finish(&ctx.function.expressions)); + ctx.local_table + .insert(c.handle, Declared::Const(Typed::Plain(init))); + ctx.named_expressions + .insert(init, (c.name.name.to_string(), c.name.span)); + + return Ok(()); + } }, ast::StatementKind::If { condition, @@ -1606,6 +1756,28 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { value, } } + ast::StatementKind::ConstAssert(condition) => { + let mut emitter = Emitter::default(); + emitter.start(&ctx.function.expressions); + + let condition = + self.expression(condition, &mut ctx.as_const(block, &mut emitter))?; + + let span = ctx.function.expressions.get_span(condition); + match ctx + .module + .to_ctx() + .eval_expr_to_bool_from(condition, &ctx.function.expressions) + { + Some(true) => Ok(()), + Some(false) => Err(Error::ConstAssertFailed(span)), + _ => Err(Error::NotBool(span)), + }?; + + block.extend(emitter.finish(&ctx.function.expressions)); + + return Ok(()); + } ast::StatementKind::Ignore(expr) => { let mut emitter = Emitter::default(); emitter.start(&ctx.function.expressions); @@ -1673,8 +1845,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { return Ok(Typed::Plain(handle)); } ast::Expression::Ident(ast::IdentExpr::Local(local)) => { - let rctx = ctx.runtime_expression_ctx(span)?; - return Ok(rctx.local_table[&local]); + return ctx.local(&local, span); } ast::Expression::Ident(ast::IdentExpr::Unresolved(name)) => { let global = ctx @@ -1854,9 +2025,9 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { let ty = resolve!(ctx, expr); let gctx = &ctx.module.to_ctx(); return Err(Error::BadTypeCast { - from_type: ty.to_wgsl(gctx), + from_type: ty.to_wgsl(gctx).into(), span: ty_span, - to_type: to_resolved.to_wgsl(gctx), + to_type: to_resolved.to_wgsl(gctx).into(), }); } }; @@ -2884,16 +3055,34 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { ) -> Result, Error<'source>> { let inner = match ctx.types[handle] { ast::Type::Scalar(scalar) => scalar.to_inner_scalar(), - ast::Type::Vector { size, scalar } => scalar.to_inner_vector(size), + ast::Type::Vector { size, ty, ty_span } => { + let ty = self.resolve_ast_type(ty, ctx)?; + let scalar = match ctx.module.types[ty].inner { + crate::TypeInner::Scalar(sc) => sc, + _ => return Err(Error::UnknownScalarType(ty_span)), + }; + crate::TypeInner::Vector { size, scalar } + } ast::Type::Matrix { rows, columns, - width, - } => crate::TypeInner::Matrix { - columns, - rows, - scalar: crate::Scalar::float(width), - }, + ty, + ty_span, + } => { + let ty = self.resolve_ast_type(ty, ctx)?; + let scalar = match ctx.module.types[ty].inner { + crate::TypeInner::Scalar(sc) => sc, + _ => return Err(Error::UnknownScalarType(ty_span)), + }; + match scalar.kind { + crate::ScalarKind::Float => crate::TypeInner::Matrix { + columns, + rows, + scalar, + }, + _ => return Err(Error::BadMatrixScalarKind(ty_span, scalar)), + } + } ast::Type::Atomic(scalar) => scalar.to_inner_atomic(), ast::Type::Pointer { base, space } => { let base = self.resolve_ast_type(base, ctx)?; diff --git a/naga/src/front/wgsl/parse/ast.rs b/naga/src/front/wgsl/parse/ast.rs index 7df5c8a1c9..c4a7984115 100644 --- a/naga/src/front/wgsl/parse/ast.rs +++ b/naga/src/front/wgsl/parse/ast.rs @@ -85,6 +85,7 @@ pub enum GlobalDeclKind<'a> { Override(Override<'a>), Struct(Struct<'a>), Type(TypeAlias<'a>), + ConstAssert(Handle>), } #[derive(Debug)] @@ -109,7 +110,7 @@ pub struct EntryPoint<'a> { } #[cfg(doc)] -use crate::front::wgsl::lower::{RuntimeExpressionContext, StatementContext}; +use crate::front::wgsl::lower::{LocalExpressionContext, StatementContext}; #[derive(Debug)] pub struct Function<'a> { @@ -142,7 +143,7 @@ pub struct GlobalVariable<'a> { pub name: Ident<'a>, pub space: crate::AddressSpace, pub binding: Option>, - pub ty: Handle>, + pub ty: Option>>, pub init: Option>>, } @@ -198,12 +199,14 @@ pub enum Type<'a> { Scalar(Scalar), Vector { size: crate::VectorSize, - scalar: Scalar, + ty: Handle>, + ty_span: Span, }, Matrix { columns: crate::VectorSize, rows: crate::VectorSize, - width: crate::Bytes, + ty: Handle>, + ty_span: Span, }, Atomic(Scalar), Pointer { @@ -282,6 +285,7 @@ pub enum StatementKind<'a> { Increment(Handle>), Decrement(Handle>), Ignore(Handle>), + ConstAssert(Handle>), } #[derive(Debug)] @@ -330,7 +334,8 @@ pub enum ConstructorType<'a> { /// `vec3(1.0)`. Vector { size: crate::VectorSize, - scalar: Scalar, + ty: Handle>, + ty_span: Span, }, /// A matrix construction whose component type is inferred from the @@ -345,7 +350,8 @@ pub enum ConstructorType<'a> { Matrix { columns: crate::VectorSize, rows: crate::VectorSize, - width: crate::Bytes, + ty: Handle>, + ty_span: Span, }, /// An array whose component type and size are inferred from the arguments: @@ -460,14 +466,23 @@ pub struct Let<'a> { pub handle: Handle, } +#[derive(Debug)] +pub struct LocalConst<'a> { + pub name: Ident<'a>, + pub ty: Option>>, + pub init: Handle>, + pub handle: Handle, +} + #[derive(Debug)] pub enum LocalDecl<'a> { Var(LocalVariable<'a>), Let(Let<'a>), + Const(LocalConst<'a>), } #[derive(Debug)] /// A placeholder for a local variable declaration. /// -/// See [`Function::locals`] for more information. +/// See [`super::ExpressionContext::locals`] for more information. pub struct Local; diff --git a/naga/src/front/wgsl/parse/conv.rs b/naga/src/front/wgsl/parse/conv.rs index 71952d093f..ecbf84fd54 100644 --- a/naga/src/front/wgsl/parse/conv.rs +++ b/naga/src/front/wgsl/parse/conv.rs @@ -58,6 +58,8 @@ pub fn map_sampling(word: &str, span: Span) -> Result "center" => Ok(crate::Sampling::Center), "centroid" => Ok(crate::Sampling::Centroid), "sample" => Ok(crate::Sampling::Sample), + "first" => Ok(crate::Sampling::First), + "either" => Ok(crate::Sampling::Either), _ => Err(Error::UnknownAttribute(span)), } } @@ -92,7 +94,7 @@ pub fn map_storage_format(word: &str, span: Span) -> Result Sf::Rgba8Sint, "rgb10a2uint" => Sf::Rgb10a2Uint, "rgb10a2unorm" => Sf::Rgb10a2Unorm, - "rg11b10float" => Sf::Rg11b10UFloat, + "rg11b10float" => Sf::Rg11b10Ufloat, "r64uint" => Sf::R64Uint, "rg32uint" => Sf::Rg32Uint, "rg32sint" => Sf::Rg32Sint, diff --git a/naga/src/front/wgsl/parse/mod.rs b/naga/src/front/wgsl/parse/mod.rs index 8870a5f6bb..7292f9350c 100644 --- a/naga/src/front/wgsl/parse/mod.rs +++ b/naga/src/front/wgsl/parse/mod.rs @@ -31,20 +31,20 @@ struct ExpressionContext<'input, 'temp, 'out> { /// A map from identifiers in scope to the locals/arguments they represent. /// - /// The handles refer to the [`Function::locals`] area; see that field's + /// The handles refer to the [`locals`] arena; see that field's /// documentation for details. /// - /// [`Function::locals`]: ast::Function::locals + /// [`locals`]: ExpressionContext::locals local_table: &'temp mut SymbolTable<&'input str, Handle>, /// Local variable and function argument arena for the function we're building. /// - /// Note that the `Local` here is actually a zero-sized type. The AST keeps - /// all the detailed information about locals - names, types, etc. - in - /// [`LocalDecl`] statements. For arguments, that information is kept in - /// [`arguments`]. This `Arena`'s only role is to assign a unique `Handle` - /// to each of them, and track their definitions' spans for use in - /// diagnostics. + /// Note that the [`ast::Local`] here is actually a zero-sized type. This + /// `Arena`'s only role is to assign a unique `Handle` to each local + /// identifier, and track its definition's span for use in diagnostics. All + /// the detailed information about locals - names, types, etc. - is kept in + /// the [`LocalDecl`] statements we parsed from their declarations. For + /// arguments, that information is kept in [`arguments`]. /// /// In the AST, when an [`Ident`] expression refers to a local variable or /// argument, its [`IdentExpr`] holds the referent's `Handle` in this @@ -53,14 +53,15 @@ struct ExpressionContext<'input, 'temp, 'out> { /// During lowering, [`LocalDecl`] statements add entries to a per-function /// table that maps `Handle` values to their Naga representations, /// accessed via [`StatementContext::local_table`] and - /// [`RuntimeExpressionContext::local_table`]. This table is then consulted when + /// [`LocalExpressionContext::local_table`]. This table is then consulted when /// lowering subsequent [`Ident`] expressions. /// - /// [`LocalDecl`]: StatementKind::LocalDecl - /// [`arguments`]: Function::arguments - /// [`Ident`]: Expression::Ident - /// [`StatementContext::local_table`]: StatementContext::local_table - /// [`RuntimeExpressionContext::local_table`]: RuntimeExpressionContext::local_table + /// [`LocalDecl`]: ast::StatementKind::LocalDecl + /// [`arguments`]: ast::Function::arguments + /// [`Ident`]: ast::Expression::Ident + /// [`IdentExpr`]: ast::IdentExpr + /// [`StatementContext::local_table`]: super::lower::StatementContext::local_table + /// [`LocalExpressionContext::local_table`]: super::lower::LocalExpressionContext::local_table locals: &'out mut Arena, /// Identifiers used by the current global declaration that have no local definition. @@ -111,6 +112,11 @@ impl<'a> ExpressionContext<'a, '_, '_> { Ok(handle) } } + + fn new_scalar(&mut self, scalar: Scalar) -> Handle> { + self.types + .append(ast::Type::Scalar(scalar), Span::UNDEFINED) + } } /// Which grammar rule we are in the midst of parsing. @@ -310,25 +316,22 @@ impl Parser { "vec2i" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Bi, - scalar: Scalar { - kind: crate::ScalarKind::Sint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, })) } "vec2u" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Bi, - scalar: Scalar { - kind: crate::ScalarKind::Uint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, })) } "vec2f" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Bi, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "vec3" => ast::ConstructorType::PartialVector { @@ -337,19 +340,22 @@ impl Parser { "vec3i" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Tri, - scalar: Scalar::I32, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, })) } "vec3u" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Tri, - scalar: Scalar::U32, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, })) } "vec3f" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Tri, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "vec4" => ast::ConstructorType::PartialVector { @@ -358,19 +364,22 @@ impl Parser { "vec4i" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Quad, - scalar: Scalar::I32, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, })) } "vec4u" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Quad, - scalar: Scalar::U32, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, })) } "vec4f" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Quad, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat2x2" => ast::ConstructorType::PartialMatrix { @@ -381,7 +390,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat2x3" => ast::ConstructorType::PartialMatrix { @@ -392,7 +402,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat2x4" => ast::ConstructorType::PartialMatrix { @@ -403,7 +414,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat3x2" => ast::ConstructorType::PartialMatrix { @@ -414,7 +426,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat3x3" => ast::ConstructorType::PartialMatrix { @@ -425,7 +438,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat3x4" => ast::ConstructorType::PartialMatrix { @@ -436,7 +450,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat4x2" => ast::ConstructorType::PartialMatrix { @@ -447,7 +462,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat4x3" => ast::ConstructorType::PartialMatrix { @@ -458,7 +474,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat4x4" => ast::ConstructorType::PartialMatrix { @@ -469,7 +486,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "array" => ast::ConstructorType::PartialArray, @@ -502,19 +520,17 @@ impl Parser { // parse component type if present match (lexer.peek().0, partial) { (Token::Paren('<'), ast::ConstructorType::PartialVector { size }) => { - let scalar = lexer.next_scalar_generic()?; - Ok(Some(ast::ConstructorType::Vector { size, scalar })) + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; + Ok(Some(ast::ConstructorType::Vector { size, ty, ty_span })) } (Token::Paren('<'), ast::ConstructorType::PartialMatrix { columns, rows }) => { - let (scalar, span) = lexer.next_scalar_generic_with_span()?; - match scalar.kind { - crate::ScalarKind::Float => Ok(Some(ast::ConstructorType::Matrix { - columns, - rows, - width: scalar.width, - })), - _ => Err(Error::BadMatrixScalarKind(span, scalar)), - } + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; + Ok(Some(ast::ConstructorType::Matrix { + columns, + rows, + ty, + ty_span, + })) } (Token::Paren('<'), ast::ConstructorType::PartialArray) => { lexer.expect_generic_paren('<')?; @@ -570,11 +586,7 @@ impl Parser { let expr = match name { // bitcast looks like a function call, but it's an operator and must be handled differently. "bitcast" => { - lexer.expect_generic_paren('<')?; - let start = lexer.start_byte_offset(); - let to = self.type_decl(lexer, ctx)?; - let span = lexer.span_from(start); - lexer.expect_generic_paren('>')?; + let (to, span) = self.singular_generic(lexer, ctx)?; lexer.open_arguments()?; let expr = self.general_expression(lexer, ctx)?; @@ -980,8 +992,12 @@ impl Parser { lexer.expect(Token::Paren('>'))?; } let name = lexer.next_ident()?; - lexer.expect(Token::Separator(':'))?; - let ty = self.type_decl(lexer, ctx)?; + + let ty = if lexer.skip(Token::Separator(':')) { + Some(self.type_decl(lexer, ctx)?) + } else { + None + }; let init = if lexer.skip(Token::Operation('=')) { let handle = self.general_expression(lexer, ctx)?; @@ -1058,21 +1074,34 @@ impl Parser { Ok(members) } - fn matrix_scalar_type<'a>( + /// Parses ``, returning T and span of T + fn singular_generic<'a>( + &mut self, + lexer: &mut Lexer<'a>, + ctx: &mut ExpressionContext<'a, '_, '_>, + ) -> Result<(Handle>, Span), Error<'a>> { + lexer.expect_generic_paren('<')?; + let start = lexer.start_byte_offset(); + let ty = self.type_decl(lexer, ctx)?; + let span = lexer.span_from(start); + lexer.expect_generic_paren('>')?; + Ok((ty, span)) + } + + fn matrix_with_type<'a>( &mut self, lexer: &mut Lexer<'a>, + ctx: &mut ExpressionContext<'a, '_, '_>, columns: crate::VectorSize, rows: crate::VectorSize, ) -> Result, Error<'a>> { - let (scalar, span) = lexer.next_scalar_generic_with_span()?; - match scalar.kind { - crate::ScalarKind::Float => Ok(ast::Type::Matrix { - columns, - rows, - width: scalar.width, - }), - _ => Err(Error::BadMatrixScalarKind(span, scalar)), - } + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; + Ok(ast::Type::Matrix { + columns, + rows, + ty, + ty_span, + }) } fn type_decl_impl<'a>( @@ -1087,151 +1116,154 @@ impl Parser { Ok(Some(match word { "vec2" => { - let scalar = lexer.next_scalar_generic()?; + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; ast::Type::Vector { size: crate::VectorSize::Bi, - scalar, + ty, + ty_span, } } "vec2i" => ast::Type::Vector { size: crate::VectorSize::Bi, - scalar: Scalar { - kind: crate::ScalarKind::Sint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, }, "vec2u" => ast::Type::Vector { size: crate::VectorSize::Bi, - scalar: Scalar { - kind: crate::ScalarKind::Uint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, }, "vec2f" => ast::Type::Vector { size: crate::VectorSize::Bi, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "vec3" => { - let scalar = lexer.next_scalar_generic()?; + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; ast::Type::Vector { size: crate::VectorSize::Tri, - scalar, + ty, + ty_span, } } "vec3i" => ast::Type::Vector { size: crate::VectorSize::Tri, - scalar: Scalar { - kind: crate::ScalarKind::Sint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, }, "vec3u" => ast::Type::Vector { size: crate::VectorSize::Tri, - scalar: Scalar { - kind: crate::ScalarKind::Uint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, }, "vec3f" => ast::Type::Vector { size: crate::VectorSize::Tri, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "vec4" => { - let scalar = lexer.next_scalar_generic()?; + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; ast::Type::Vector { size: crate::VectorSize::Quad, - scalar, + ty, + ty_span, } } "vec4i" => ast::Type::Vector { size: crate::VectorSize::Quad, - scalar: Scalar { - kind: crate::ScalarKind::Sint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, }, "vec4u" => ast::Type::Vector { size: crate::VectorSize::Quad, - scalar: Scalar { - kind: crate::ScalarKind::Uint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, }, "vec4f" => ast::Type::Vector { size: crate::VectorSize::Quad, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat2x2" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Bi, crate::VectorSize::Bi)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Bi, crate::VectorSize::Bi)? } "mat2x2f" => ast::Type::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat2x3" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Bi, crate::VectorSize::Tri)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Bi, crate::VectorSize::Tri)? } "mat2x3f" => ast::Type::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat2x4" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Bi, crate::VectorSize::Quad)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Bi, crate::VectorSize::Quad)? } "mat2x4f" => ast::Type::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat3x2" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Tri, crate::VectorSize::Bi)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Tri, crate::VectorSize::Bi)? } "mat3x2f" => ast::Type::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat3x3" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Tri, crate::VectorSize::Tri)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Tri, crate::VectorSize::Tri)? } "mat3x3f" => ast::Type::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat3x4" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Tri, crate::VectorSize::Quad)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Tri, crate::VectorSize::Quad)? } "mat3x4f" => ast::Type::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat4x2" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Quad, crate::VectorSize::Bi)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Quad, crate::VectorSize::Bi)? } "mat4x2f" => ast::Type::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat4x3" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Quad, crate::VectorSize::Tri)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Quad, crate::VectorSize::Tri)? } "mat4x3f" => ast::Type::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat4x4" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Quad, crate::VectorSize::Quad)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Quad, crate::VectorSize::Quad)? } "mat4x4f" => ast::Type::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "atomic" => { let scalar = lexer.next_scalar_generic()?; @@ -1692,6 +1724,28 @@ impl Parser { handle, })) } + "const" => { + let _ = lexer.next(); + let name = lexer.next_ident()?; + + let given_ty = if lexer.skip(Token::Separator(':')) { + let ty = self.type_decl(lexer, ctx)?; + Some(ty) + } else { + None + }; + lexer.expect(Token::Operation('='))?; + let expr_id = self.general_expression(lexer, ctx)?; + lexer.expect(Token::Separator(';'))?; + + let handle = ctx.declare_local(name)?; + ast::StatementKind::LocalDecl(ast::LocalDecl::Const(ast::LocalConst { + name, + ty: given_ty, + init: expr_id, + handle, + })) + } "var" => { let _ = lexer.next(); @@ -1967,6 +2021,20 @@ impl Parser { lexer.expect(Token::Separator(';'))?; ast::StatementKind::Kill } + // https://www.w3.org/TR/WGSL/#const-assert-statement + "const_assert" => { + let _ = lexer.next(); + // parentheses are optional + let paren = lexer.skip(Token::Paren('(')); + + let condition = self.general_expression(lexer, ctx)?; + + if paren { + lexer.expect(Token::Paren(')'))?; + } + lexer.expect(Token::Separator(';'))?; + ast::StatementKind::ConstAssert(condition) + } // assignment or a function call _ => { self.function_call_or_assignment_statement(lexer, ctx, block)?; @@ -2370,6 +2438,18 @@ impl Parser { ..function })) } + (Token::Word("const_assert"), _) => { + // parentheses are optional + let paren = lexer.skip(Token::Paren('(')); + + let condition = self.general_expression(lexer, &mut ctx)?; + + if paren { + lexer.expect(Token::Paren(')'))?; + } + lexer.expect(Token::Separator(';'))?; + Some(ast::GlobalDeclKind::ConstAssert(condition)) + } (Token::End, _) => return Ok(()), other => return Err(Error::Unexpected(other.1, ExpectedToken::GlobalItem)), }; diff --git a/naga/src/front/wgsl/to_wgsl.rs b/naga/src/front/wgsl/to_wgsl.rs index 72436d42bd..189010e537 100644 --- a/naga/src/front/wgsl/to_wgsl.rs +++ b/naga/src/front/wgsl/to_wgsl.rs @@ -175,7 +175,7 @@ impl crate::StorageFormat { Sf::Bgra8Unorm => "bgra8unorm", Sf::Rgb10a2Uint => "rgb10a2uint", Sf::Rgb10a2Unorm => "rgb10a2unorm", - Sf::Rg11b10UFloat => "rg11b10float", + Sf::Rg11b10Ufloat => "rg11b10float", Sf::R64Uint => "r64uint", Sf::Rg32Uint => "rg32uint", Sf::Rg32Sint => "rg32sint", diff --git a/naga/src/lib.rs b/naga/src/lib.rs index 4dc211c997..7b71c36787 100644 --- a/naga/src/lib.rs +++ b/naga/src/lib.rs @@ -530,6 +530,13 @@ pub enum Sampling { /// Interpolate the value at each sample location. In multisampling, invoke /// the fragment shader once per sample. Sample, + + /// Use the value provided by the first vertex of the current primitive. + First, + + /// Use the value provided by the first or last vertex of the current primitive. The exact + /// choice is implementation-dependent. + Either, } /// Member of a user-defined structure. @@ -615,7 +622,7 @@ pub enum StorageFormat { // Packed 32-bit formats Rgb10a2Uint, Rgb10a2Unorm, - Rg11b10UFloat, + Rg11b10Ufloat, // 64-bit formats R64Uint, diff --git a/naga/src/proc/constant_evaluator.rs b/naga/src/proc/constant_evaluator.rs index deaa9c93c7..1b7f5cf910 100644 --- a/naga/src/proc/constant_evaluator.rs +++ b/naga/src/proc/constant_evaluator.rs @@ -317,7 +317,7 @@ pub struct ConstantEvaluator<'a> { #[derive(Debug)] enum WgslRestrictions<'a> { /// - const-expressions will be evaluated and inserted in the arena - Const, + Const(Option>), /// - const-expressions will be evaluated and inserted in the arena /// - override-expressions will be inserted in the arena Override, @@ -347,6 +347,8 @@ struct FunctionLocalData<'a> { #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] pub enum ExpressionKind { + /// If const is also implemented as const + ImplConst, Const, Override, Runtime, @@ -372,14 +374,23 @@ impl ExpressionKindTracker { pub fn insert(&mut self, value: Handle, expr_type: ExpressionKind) { self.inner.insert(value, expr_type); } + pub fn is_const(&self, h: Handle) -> bool { - matches!(self.type_of(h), ExpressionKind::Const) + matches!( + self.type_of(h), + ExpressionKind::Const | ExpressionKind::ImplConst + ) + } + + /// Returns `true` if naga can also evaluate expression as const + pub fn is_impl_const(&self, h: Handle) -> bool { + matches!(self.type_of(h), ExpressionKind::ImplConst) } pub fn is_const_or_override(&self, h: Handle) -> bool { matches!( self.type_of(h), - ExpressionKind::Const | ExpressionKind::Override + ExpressionKind::Const | ExpressionKind::Override | ExpressionKind::ImplConst ) } @@ -400,13 +411,14 @@ impl ExpressionKindTracker { } fn type_of_with_expr(&self, expr: &Expression) -> ExpressionKind { + use crate::MathFunction as Mf; match *expr { Expression::Literal(_) | Expression::ZeroValue(_) | Expression::Constant(_) => { - ExpressionKind::Const + ExpressionKind::ImplConst } Expression::Override(_) => ExpressionKind::Override, Expression::Compose { ref components, .. } => { - let mut expr_type = ExpressionKind::Const; + let mut expr_type = ExpressionKind::ImplConst; for component in components { expr_type = expr_type.max(self.type_of(*component)) } @@ -417,13 +429,16 @@ impl ExpressionKindTracker { Expression::Access { base, index } => self.type_of(base).max(self.type_of(index)), Expression::Swizzle { vector, .. } => self.type_of(vector), Expression::Unary { expr, .. } => self.type_of(expr), - Expression::Binary { left, right, .. } => self.type_of(left).max(self.type_of(right)), + Expression::Binary { left, right, .. } => self + .type_of(left) + .max(self.type_of(right)) + .max(ExpressionKind::Const), Expression::Math { + fun, arg, arg1, arg2, arg3, - .. } => self .type_of(arg) .max( @@ -437,8 +452,34 @@ impl ExpressionKindTracker { .max( arg3.map(|arg| self.type_of(arg)) .unwrap_or(ExpressionKind::Const), + ) + .max( + if matches!( + fun, + Mf::Dot + | Mf::Outer + | Mf::Cross + | Mf::Distance + | Mf::Length + | Mf::Normalize + | Mf::FaceForward + | Mf::Reflect + | Mf::Refract + | Mf::Ldexp + | Mf::Modf + | Mf::Mix + | Mf::Frexp + ) { + ExpressionKind::Const + } else { + ExpressionKind::ImplConst + }, ), - Expression::As { expr, .. } => self.type_of(expr), + Expression::As { convert, expr, .. } => self.type_of(expr).max(if convert.is_some() { + ExpressionKind::ImplConst + } else { + ExpressionKind::Const + }), Expression::Select { condition, accept, @@ -446,7 +487,8 @@ impl ExpressionKindTracker { } => self .type_of(condition) .max(self.type_of(accept)) - .max(self.type_of(reject)), + .max(self.type_of(reject)) + .max(ExpressionKind::Const), Expression::Relational { argument, .. } => self.type_of(argument), Expression::ArrayLength(expr) => self.type_of(expr), _ => ExpressionKind::Runtime, @@ -556,7 +598,7 @@ impl<'a> ConstantEvaluator<'a> { Behavior::Wgsl(if in_override_ctx { WgslRestrictions::Override } else { - WgslRestrictions::Const + WgslRestrictions::Const(None) }), module, global_expression_kind_tracker, @@ -603,13 +645,19 @@ impl<'a> ConstantEvaluator<'a> { local_expression_kind_tracker: &'a mut ExpressionKindTracker, emitter: &'a mut super::Emitter, block: &'a mut crate::Block, + is_const: bool, ) -> Self { + let local_data = FunctionLocalData { + global_expressions: &module.global_expressions, + emitter, + block, + }; Self { - behavior: Behavior::Wgsl(WgslRestrictions::Runtime(FunctionLocalData { - global_expressions: &module.global_expressions, - emitter, - block, - })), + behavior: Behavior::Wgsl(if is_const { + WgslRestrictions::Const(Some(local_data)) + } else { + WgslRestrictions::Runtime(local_data) + }), types: &mut module.types, constants: &module.constants, overrides: &module.overrides, @@ -718,6 +766,7 @@ impl<'a> ConstantEvaluator<'a> { span: Span, ) -> Result, ConstantEvaluatorError> { match self.expression_kind_tracker.type_of_with_expr(&expr) { + ExpressionKind::ImplConst => self.try_eval_and_append_impl(&expr, span), ExpressionKind::Const => { let eval_result = self.try_eval_and_append_impl(&expr, span); // We should be able to evaluate `Const` expressions at this @@ -740,7 +789,7 @@ impl<'a> ConstantEvaluator<'a> { Behavior::Wgsl(WgslRestrictions::Override | WgslRestrictions::Runtime(_)) => { Ok(self.append_expr(expr, span, ExpressionKind::Override)) } - Behavior::Wgsl(WgslRestrictions::Const) => { + Behavior::Wgsl(WgslRestrictions::Const(_)) => { Err(ConstantEvaluatorError::OverrideExpr) } Behavior::Glsl(_) => { @@ -761,14 +810,17 @@ impl<'a> ConstantEvaluator<'a> { const fn is_global_arena(&self) -> bool { matches!( self.behavior, - Behavior::Wgsl(WgslRestrictions::Const | WgslRestrictions::Override) + Behavior::Wgsl(WgslRestrictions::Const(None) | WgslRestrictions::Override) | Behavior::Glsl(GlslRestrictions::Const) ) } const fn function_local_data(&self) -> Option<&FunctionLocalData<'a>> { match self.behavior { - Behavior::Wgsl(WgslRestrictions::Runtime(ref function_local_data)) + Behavior::Wgsl( + WgslRestrictions::Runtime(ref function_local_data) + | WgslRestrictions::Const(Some(ref function_local_data)), + ) | Behavior::Glsl(GlslRestrictions::Runtime(ref function_local_data)) => { Some(function_local_data) } @@ -1779,9 +1831,13 @@ impl<'a> ConstantEvaluator<'a> { _ => return Err(ConstantEvaluatorError::InvalidBinaryOpArgs), }), (Literal::I32(a), Literal::U32(b)) => Literal::I32(match op { - BinaryOperator::ShiftLeft => a - .checked_shl(b) - .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, + BinaryOperator::ShiftLeft => { + if (if a.is_negative() { !a } else { a }).leading_zeros() <= b { + return Err(ConstantEvaluatorError::Overflow("<<".to_string())); + } + a.checked_shl(b) + .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)? + } BinaryOperator::ShiftRight => a .checked_shr(b) .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, @@ -1807,8 +1863,11 @@ impl<'a> ConstantEvaluator<'a> { BinaryOperator::ExclusiveOr => a ^ b, BinaryOperator::InclusiveOr => a | b, BinaryOperator::ShiftLeft => a - .checked_shl(b) - .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, + .checked_mul( + 1u32.checked_shl(b) + .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, + ) + .ok_or(ConstantEvaluatorError::Overflow("<<".to_string()))?, BinaryOperator::ShiftRight => a .checked_shr(b) .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, @@ -2057,7 +2116,10 @@ impl<'a> ConstantEvaluator<'a> { expr_type: ExpressionKind, ) -> Handle { let h = match self.behavior { - Behavior::Wgsl(WgslRestrictions::Runtime(ref mut function_local_data)) + Behavior::Wgsl( + WgslRestrictions::Runtime(ref mut function_local_data) + | WgslRestrictions::Const(Some(ref mut function_local_data)), + ) | Behavior::Glsl(GlslRestrictions::Runtime(ref mut function_local_data)) => { let is_running = function_local_data.emitter.is_running(); let needs_pre_emit = expr.needs_pre_emit(); @@ -2480,7 +2542,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, @@ -2566,7 +2628,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, @@ -2684,7 +2746,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, @@ -2777,7 +2839,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, @@ -2859,7 +2921,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, diff --git a/naga/src/proc/mod.rs b/naga/src/proc/mod.rs index 46d7bfce6e..4ae38f08e1 100644 --- a/naga/src/proc/mod.rs +++ b/naga/src/proc/mod.rs @@ -48,7 +48,7 @@ impl From for super::ScalarKind { Sf::Bgra8Unorm => Sk::Float, Sf::Rgb10a2Uint => Sk::Uint, Sf::Rgb10a2Unorm => Sk::Float, - Sf::Rg11b10UFloat => Sk::Float, + Sf::Rg11b10Ufloat => Sk::Float, Sf::R64Uint => Sk::Uint, Sf::Rg32Uint => Sk::Uint, Sf::Rg32Sint => Sk::Sint, @@ -675,6 +675,19 @@ impl GlobalCtx<'_> { } } + /// Try to evaluate the expression in the `arena` using its `handle` and return it as a `bool`. + #[allow(dead_code)] + pub(super) fn eval_expr_to_bool_from( + &self, + handle: crate::Handle, + arena: &crate::Arena, + ) -> Option { + match self.eval_expr_to_literal_from(handle, arena) { + Some(crate::Literal::Bool(value)) => Some(value), + _ => None, + } + } + #[allow(dead_code)] pub(crate) fn eval_expr_to_literal( &self, diff --git a/naga/src/valid/analyzer.rs b/naga/src/valid/analyzer.rs index 569a17bf4b..3dd236e0ae 100644 --- a/naga/src/valid/analyzer.rs +++ b/naga/src/valid/analyzer.rs @@ -589,23 +589,16 @@ impl FunctionInfo { requirements: UniformityRequirements::empty(), } } - // depends on the builtin or interpolation + // depends on the builtin E::FunctionArgument(index) => { let arg = &resolve_context.arguments[index as usize]; let uniform = match arg.binding { Some(crate::Binding::BuiltIn( - // per-polygon built-ins are uniform - crate::BuiltIn::FrontFacing // per-work-group built-ins are uniform - | crate::BuiltIn::WorkGroupId + crate::BuiltIn::WorkGroupId | crate::BuiltIn::WorkGroupSize - | crate::BuiltIn::NumWorkGroups) - ) => true, - // only flat inputs are uniform - Some(crate::Binding::Location { - interpolation: Some(crate::Interpolation::Flat), - .. - }) => true, + | crate::BuiltIn::NumWorkGroups, + )) => true, _ => false, }; Uniformity { diff --git a/naga/src/valid/expression.rs b/naga/src/valid/expression.rs index 1d1420aef6..0b0d115c57 100644 --- a/naga/src/valid/expression.rs +++ b/naga/src/valid/expression.rs @@ -1161,7 +1161,7 @@ impl super::Validator { )); } } - Mf::Outer | Mf::Cross | Mf::Reflect => { + Mf::Outer | Mf::Reflect => { let arg1_ty = match (arg1_ty, arg2_ty, arg3_ty) { (Some(ty1), None, None) => ty1, _ => return Err(ExpressionError::WrongArgumentCount(fun)), @@ -1184,6 +1184,29 @@ impl super::Validator { )); } } + Mf::Cross => { + let arg1_ty = match (arg1_ty, arg2_ty, arg3_ty) { + (Some(ty1), None, None) => ty1, + _ => return Err(ExpressionError::WrongArgumentCount(fun)), + }; + match *arg_ty { + Ti::Vector { + scalar: + Sc { + kind: Sk::Float, .. + }, + size: crate::VectorSize::Tri, + } => {} + _ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)), + } + if arg1_ty != arg_ty { + return Err(ExpressionError::InvalidArgumentType( + fun, + 1, + arg1.unwrap(), + )); + } + } Mf::Refract => { let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty, arg3_ty) { (Some(ty1), Some(ty2), None) => (ty1, ty2), diff --git a/naga/src/valid/interface.rs b/naga/src/valid/interface.rs index 7fce9c8fd9..150a1f9df5 100644 --- a/naga/src/valid/interface.rs +++ b/naga/src/valid/interface.rs @@ -50,6 +50,11 @@ pub enum VaryingError { NotIOShareableType(Handle), #[error("Interpolation is not valid")] InvalidInterpolation, + #[error("Cannot combine {interpolation:?} interpolation with the {sampling:?} sample type")] + InvalidInterpolationSamplingCombination { + interpolation: crate::Interpolation, + sampling: crate::Sampling, + }, #[error("Interpolation must be specified on vertex shader outputs and fragment shader inputs")] MissingInterpolation, #[error("Built-in {0:?} is not available at this stage")] @@ -339,6 +344,31 @@ impl VaryingContext<'_> { } } + if let Some(interpolation) = interpolation { + let invalid_sampling = match (interpolation, sampling) { + (_, None) + | ( + crate::Interpolation::Perspective | crate::Interpolation::Linear, + Some( + crate::Sampling::Center + | crate::Sampling::Centroid + | crate::Sampling::Sample, + ), + ) + | ( + crate::Interpolation::Flat, + Some(crate::Sampling::First | crate::Sampling::Either), + ) => None, + (_, Some(invalid_sampling)) => Some(invalid_sampling), + }; + if let Some(sampling) = invalid_sampling { + return Err(VaryingError::InvalidInterpolationSamplingCombination { + interpolation, + sampling, + }); + } + } + let needs_interpolation = match self.stage { crate::ShaderStage::Vertex => self.output, crate::ShaderStage::Fragment => !self.output, diff --git a/naga/tests/in/abstract-types-var.wgsl b/naga/tests/in/abstract-types-var.wgsl index a733888530..c573f73d57 100644 --- a/naga/tests/in/abstract-types-var.wgsl +++ b/naga/tests/in/abstract-types-var.wgsl @@ -43,6 +43,20 @@ var xafpaiaf: array = array(1, 2.0); var xafpafai: array = array(1.0, 2); var xafpafaf: array = array(1.0, 2.0); +var ivispai = vec2(1); +var ivfspaf = vec2(1.0); +var ivis_ai = vec2(1); +var ivus_ai = vec2(1); +var ivfs_ai = vec2(1); +var ivfs_af = vec2(1.0); + +var iafafaf = array(1.0, 2.0); +var iafaiai = array(1, 2); + +var iafpafaf = array(1.0, 2.0); +var iafpaiaf = array(1, 2.0); +var iafpafai = array(1.0, 2); + fn all_constant_arguments() { var xvipaiai: vec2 = vec2(42, 43); var xvupaiai: vec2 = vec2(44, 45); diff --git a/naga/tests/in/const_assert.wgsl b/naga/tests/in/const_assert.wgsl new file mode 100644 index 0000000000..23f57fbb1a --- /dev/null +++ b/naga/tests/in/const_assert.wgsl @@ -0,0 +1,11 @@ +// Sourced from https://www.w3.org/TR/WGSL/#const-assert-statement +const x = 1; +const y = 2; +const_assert x < y; // valid at module-scope. +const_assert(y != 0); // parentheses are optional. + +fn foo() { + const z = x + y - 2; + const_assert z > 0; // valid in functions. + const_assert(z > 0); +} \ No newline at end of file diff --git a/naga/tests/in/cross.wgsl b/naga/tests/in/cross.wgsl new file mode 100644 index 0000000000..cfdd7622c3 --- /dev/null +++ b/naga/tests/in/cross.wgsl @@ -0,0 +1,4 @@ +// NOTE: invalid combinations are tested in the `validation::bad_cross_builtin_args` test. +@compute @workgroup_size(1) fn main() { + let a = cross(vec3(0., 1., 2.), vec3(0., 1., 2.)); +} diff --git a/naga/tests/in/interpolate.wgsl b/naga/tests/in/interpolate.wgsl index 2f6967b3e7..11657f10b8 100644 --- a/naga/tests/in/interpolate.wgsl +++ b/naga/tests/in/interpolate.wgsl @@ -1,14 +1,20 @@ //TODO: merge with "interface"? +// NOTE: invalid combinations are tested in the +// `validation::incompatible_interpolation_and_sampling_types` test. struct FragmentInput { @builtin(position) position: vec4, @location(0) @interpolate(flat) _flat : u32, - @location(1) @interpolate(linear) _linear : f32, - @location(2) @interpolate(linear, centroid) linear_centroid : vec2, - @location(3) @interpolate(linear, sample) linear_sample : vec3, - @location(4) @interpolate(perspective) perspective : vec4, - @location(5) @interpolate(perspective, centroid) perspective_centroid : f32, - @location(6) @interpolate(perspective, sample) perspective_sample : f32, + @location(1) @interpolate(flat, first) flat_first : u32, + @location(2) @interpolate(flat, either) flat_either : u32, + @location(3) @interpolate(linear) _linear : f32, + @location(4) @interpolate(linear, centroid) linear_centroid : vec2, + @location(6) @interpolate(linear, sample) linear_sample : vec3, + @location(7) @interpolate(linear, center) linear_center : vec3, + @location(8) @interpolate(perspective) perspective : vec4, + @location(9) @interpolate(perspective, centroid) perspective_centroid : f32, + @location(10) @interpolate(perspective, sample) perspective_sample : f32, + @location(11) @interpolate(perspective, center) perspective_center : f32, } @vertex @@ -17,12 +23,16 @@ fn vert_main() -> FragmentInput { out.position = vec4(2.0, 4.0, 5.0, 6.0); out._flat = 8u; + out.flat_first = 9u; + out.flat_either = 10u; out._linear = 27.0; out.linear_centroid = vec2(64.0, 125.0); out.linear_sample = vec3(216.0, 343.0, 512.0); + out.linear_center = vec3(255.0, 511.0, 1024.0); out.perspective = vec4(729.0, 1000.0, 1331.0, 1728.0); out.perspective_centroid = 2197.0; out.perspective_sample = 2744.0; + out.perspective_center = 2812.0; return out; } diff --git a/naga/tests/in/interpolate_compat.param.ron b/naga/tests/in/interpolate_compat.param.ron new file mode 100644 index 0000000000..b6d629c4ea --- /dev/null +++ b/naga/tests/in/interpolate_compat.param.ron @@ -0,0 +1,15 @@ +( + spv: ( + version: (1, 0), + capabilities: [ Shader, SampleRateShading ], + debug: true, + force_point_size: true, + adjust_coordinate_space: true, + ), + glsl: ( + version: Desktop(400), + writer_flags: (""), + binding_map: {}, + zero_initialize_workgroup_memory: true, + ), +) diff --git a/naga/tests/in/interpolate_compat.wgsl b/naga/tests/in/interpolate_compat.wgsl new file mode 100644 index 0000000000..9ba6e7b818 --- /dev/null +++ b/naga/tests/in/interpolate_compat.wgsl @@ -0,0 +1,43 @@ +// NOTE: This is basically the same as `interpolate.wgsl`, except for the removal of +// `@interpolate(flat, first)`, which is unsupported in GLSL and `compat`. + +// NOTE: invalid combinations are tested in the +// `validation::incompatible_interpolation_and_sampling_types` test. +struct FragmentInput { + @builtin(position) position: vec4, + @location(0) @interpolate(flat) _flat : u32, + // NOTE: not supported in `compat` or GLSL + // // @location(1) @interpolate(flat, first) flat_first : u32, + @location(2) @interpolate(flat, either) flat_either : u32, + @location(3) @interpolate(linear) _linear : f32, + @location(4) @interpolate(linear, centroid) linear_centroid : vec2, + @location(6) @interpolate(linear, sample) linear_sample : vec3, + @location(7) @interpolate(linear, center) linear_center : vec3, + @location(8) @interpolate(perspective) perspective : vec4, + @location(9) @interpolate(perspective, centroid) perspective_centroid : f32, + @location(10) @interpolate(perspective, sample) perspective_sample : f32, + @location(11) @interpolate(perspective, center) perspective_center : f32, +} + +@vertex +fn vert_main() -> FragmentInput { + var out: FragmentInput; + + out.position = vec4(2.0, 4.0, 5.0, 6.0); + out._flat = 8u; + // out.flat_first = 9u; + out.flat_either = 10u; + out._linear = 27.0; + out.linear_centroid = vec2(64.0, 125.0); + out.linear_sample = vec3(216.0, 343.0, 512.0); + out.linear_center = vec3(255.0, 511.0, 1024.0); + out.perspective = vec4(729.0, 1000.0, 1331.0, 1728.0); + out.perspective_centroid = 2197.0; + out.perspective_sample = 2744.0; + out.perspective_center = 2812.0; + + return out; +} + +@fragment +fn frag_main(val : FragmentInput) { } diff --git a/naga/tests/in/local-const.param.ron b/naga/tests/in/local-const.param.ron new file mode 100644 index 0000000000..dd626a0f31 --- /dev/null +++ b/naga/tests/in/local-const.param.ron @@ -0,0 +1 @@ +() \ No newline at end of file diff --git a/naga/tests/in/local-const.wgsl b/naga/tests/in/local-const.wgsl new file mode 100644 index 0000000000..18c932e1e0 --- /dev/null +++ b/naga/tests/in/local-const.wgsl @@ -0,0 +1,26 @@ +const ga = 4; // AbstractInt with a value of 4. +const gb : i32 = 4; // i32 with a value of 4. +const gc : u32 = 4; // u32 with a value of 4. +const gd : f32 = 4; // f32 with a value of 4. +const ge = vec3(ga, ga, ga); // vec3 of AbstractInt with a value of (4, 4, 4). +const gf = 2.0; // AbstractFloat with a value of 2. + +fn const_in_fn() { + const a = 4; // AbstractInt with a value of 4. + const b: i32 = 4; // i32 with a value of 4. + const c: u32 = 4; // u32 with a value of 4. + const d: f32 = 4; // f32 with a value of 4. + const e = vec3(a, a, a); // vec3 of AbstractInt with a value of (4, 4, 4). + const f = 2.0; // AbstractFloat with a value of 2. + // TODO: Make it per spec, currently not possible + // because naga does not support automatic conversions + // of Abstract types + + // Check that we can access global constants + const ag = ga; + const bg = gb; + const cg = gc; + const dg = gd; + const eg = ge; + const fg = gf; +} diff --git a/naga/tests/in/spv/atomic_exchange.spv b/naga/tests/in/spv/atomic_exchange.spv new file mode 100644 index 0000000000..cc64ce9aa8 Binary files /dev/null and b/naga/tests/in/spv/atomic_exchange.spv differ diff --git a/naga/tests/in/spv/atomic_exchange.spvasm b/naga/tests/in/spv/atomic_exchange.spvasm new file mode 100644 index 0000000000..09258f0584 --- /dev/null +++ b/naga/tests/in/spv/atomic_exchange.spvasm @@ -0,0 +1,88 @@ +; SPIR-V +; Version: 1.5 +; Generator: Google rspirv; 0 +; Bound: 63 +; Schema: 0 + OpCapability Shader + OpCapability VulkanMemoryModel + OpMemoryModel Logical Vulkan + OpEntryPoint GLCompute %1 "stage::test_atomic_exchange" %2 %3 + OpExecutionMode %1 LocalSize 32 1 1 + OpMemberDecorate %_struct_11 0 Offset 0 + OpMemberDecorate %_struct_11 1 Offset 4 + OpDecorate %_struct_12 Block + OpMemberDecorate %_struct_12 0 Offset 0 + OpDecorate %2 Binding 0 + OpDecorate %2 DescriptorSet 0 + OpDecorate %3 NonWritable + OpDecorate %3 Binding 1 + OpDecorate %3 DescriptorSet 0 + %uint = OpTypeInt 32 0 + %void = OpTypeVoid + %15 = OpTypeFunction %void + %bool = OpTypeBool + %uint_0 = OpConstant %uint 0 + %uint_2 = OpConstant %uint 2 + %false = OpConstantFalse %bool +%_ptr_StorageBuffer_uint = OpTypePointer StorageBuffer %uint + %uint_1 = OpConstant %uint 1 + %_struct_11 = OpTypeStruct %uint %uint + %22 = OpUndef %_struct_11 + %int = OpTypeInt 32 1 + %true = OpConstantTrue %bool + %_struct_12 = OpTypeStruct %uint +%_ptr_StorageBuffer__struct_12 = OpTypePointer StorageBuffer %_struct_12 + %2 = OpVariable %_ptr_StorageBuffer__struct_12 StorageBuffer + %3 = OpVariable %_ptr_StorageBuffer__struct_12 StorageBuffer + %26 = OpUndef %uint + %1 = OpFunction %void None %15 + %27 = OpLabel + %28 = OpAccessChain %_ptr_StorageBuffer_uint %2 %uint_0 + %29 = OpAccessChain %_ptr_StorageBuffer_uint %3 %uint_0 + %30 = OpLoad %uint %29 + %31 = OpCompositeConstruct %_struct_11 %uint_0 %30 + OpBranch %32 + %32 = OpLabel + %33 = OpPhi %_struct_11 %31 %27 %34 %35 + %36 = OpPhi %uint %uint_0 %27 %37 %35 + OpLoopMerge %38 %35 None + OpBranch %39 + %39 = OpLabel + %40 = OpCompositeExtract %uint %33 0 + %41 = OpCompositeExtract %uint %33 1 + %42 = OpULessThan %bool %40 %41 + OpSelectionMerge %43 None + OpBranchConditional %42 %44 %45 + %44 = OpLabel + %47 = OpIAdd %uint %40 %uint_1 + %49 = OpCompositeInsert %_struct_11 %47 %33 0 + %50 = OpCompositeConstruct %_struct_11 %uint_1 %40 + OpBranch %43 + %45 = OpLabel + %51 = OpCompositeInsert %_struct_11 %uint_0 %22 0 + OpBranch %43 + %43 = OpLabel + %52 = OpPhi %_struct_11 %49 %44 %33 %45 + %53 = OpPhi %_struct_11 %50 %44 %51 %45 + %54 = OpCompositeExtract %uint %53 0 + %55 = OpBitcast %int %54 + OpSelectionMerge %56 None + OpSwitch %55 %57 0 %58 1 %59 + %57 = OpLabel + OpBranch %56 + %58 = OpLabel + OpBranch %56 + %59 = OpLabel + %60 = OpAtomicExchange %uint %28 %uint_2 %uint_0 %36 + %61 = OpIAdd %uint %36 %60 + OpBranch %56 + %56 = OpLabel + %62 = OpPhi %bool %false %57 %false %58 %true %59 + %34 = OpPhi %_struct_11 %22 %57 %22 %58 %52 %59 + %37 = OpPhi %uint %26 %57 %26 %58 %61 %59 + OpBranch %35 + %35 = OpLabel + OpBranchConditional %62 %32 %38 + %38 = OpLabel + OpReturn + OpFunctionEnd diff --git a/naga/tests/in/spv/atomic_i_add_sub.spv b/naga/tests/in/spv/atomic_i_add_sub.spv new file mode 100644 index 0000000000..8c26850400 Binary files /dev/null and b/naga/tests/in/spv/atomic_i_add_sub.spv differ diff --git a/naga/tests/in/spv/atomic_i_add_sub.spvasm b/naga/tests/in/spv/atomic_i_add_sub.spvasm new file mode 100644 index 0000000000..b23af99582 --- /dev/null +++ b/naga/tests/in/spv/atomic_i_add_sub.spvasm @@ -0,0 +1,51 @@ +; SPIR-V +; Version: 1.5 +; Generator: Google rspirv; 0 +; Bound: 30 +; Schema: 0 + OpCapability Shader + OpCapability VulkanMemoryModel + OpMemoryModel Logical Vulkan + OpEntryPoint GLCompute %1 "stage::test_atomic_i_add_sub" %2 %3 + OpExecutionMode %1 LocalSize 32 1 1 + OpDecorate %_runtimearr_uint ArrayStride 4 + OpDecorate %_struct_7 Block + OpMemberDecorate %_struct_7 0 Offset 0 + OpDecorate %_struct_8 Block + OpMemberDecorate %_struct_8 0 Offset 0 + OpDecorate %2 Binding 0 + OpDecorate %2 DescriptorSet 0 + OpDecorate %3 Binding 1 + OpDecorate %3 DescriptorSet 0 + %uint = OpTypeInt 32 0 + %void = OpTypeVoid + %11 = OpTypeFunction %void + %bool = OpTypeBool +%_runtimearr_uint = OpTypeRuntimeArray %uint + %_struct_7 = OpTypeStruct %_runtimearr_uint +%_ptr_StorageBuffer__struct_7 = OpTypePointer StorageBuffer %_struct_7 + %uint_0 = OpConstant %uint 0 + %uint_2 = OpConstant %uint 2 +%_ptr_StorageBuffer_uint = OpTypePointer StorageBuffer %uint + %_struct_8 = OpTypeStruct %uint +%_ptr_StorageBuffer__struct_8 = OpTypePointer StorageBuffer %_struct_8 + %2 = OpVariable %_ptr_StorageBuffer__struct_8 StorageBuffer + %3 = OpVariable %_ptr_StorageBuffer__struct_7 StorageBuffer + %1 = OpFunction %void None %11 + %19 = OpLabel + %20 = OpAccessChain %_ptr_StorageBuffer_uint %2 %uint_0 + %22 = OpArrayLength %uint %3 0 + %23 = OpAtomicIAdd %uint %20 %uint_2 %uint_0 %uint_2 + %24 = OpAtomicISub %uint %20 %uint_2 %uint_0 %23 + %25 = OpULessThan %bool %23 %22 + OpSelectionMerge %26 None + OpBranchConditional %25 %27 %28 + %27 = OpLabel + %29 = OpAccessChain %_ptr_StorageBuffer_uint %3 %uint_0 %23 + OpStore %29 %24 + OpBranch %26 + %28 = OpLabel + OpBranch %26 + %26 = OpLabel + OpReturn + OpFunctionEnd diff --git a/naga/tests/in/spv/atomic_i_decrement.spv b/naga/tests/in/spv/atomic_i_decrement.spv new file mode 100644 index 0000000000..fda602ab55 Binary files /dev/null and b/naga/tests/in/spv/atomic_i_decrement.spv differ diff --git a/naga/tests/in/spv/atomic_i_decrement.spvasm b/naga/tests/in/spv/atomic_i_decrement.spvasm new file mode 100644 index 0000000000..cc125beec4 --- /dev/null +++ b/naga/tests/in/spv/atomic_i_decrement.spvasm @@ -0,0 +1,64 @@ +; SPIR-V +; Version: 1.5 +; Generator: Google rspirv; 0 +; Bound: 42 +; Schema: 0 + OpCapability Shader + OpCapability VulkanMemoryModel + OpMemoryModel Logical Vulkan + OpEntryPoint GLCompute %1 "stage::test_atomic_i_decrement" %2 %3 + OpExecutionMode %1 LocalSize 32 1 1 + OpDecorate %_runtimearr_uint ArrayStride 4 + OpDecorate %_struct_7 Block + OpMemberDecorate %_struct_7 0 Offset 0 + OpDecorate %_struct_8 Block + OpMemberDecorate %_struct_8 0 Offset 0 + OpDecorate %2 Binding 0 + OpDecorate %2 DescriptorSet 0 + OpDecorate %3 Binding 1 + OpDecorate %3 DescriptorSet 0 + %uint = OpTypeInt 32 0 + %void = OpTypeVoid + %11 = OpTypeFunction %void + %bool = OpTypeBool +%_runtimearr_uint = OpTypeRuntimeArray %uint + %_struct_7 = OpTypeStruct %_runtimearr_uint +%_ptr_StorageBuffer__struct_7 = OpTypePointer StorageBuffer %_struct_7 + %uint_0 = OpConstant %uint 0 + %uint_2 = OpConstant %uint 2 + %false = OpConstantFalse %bool +%_ptr_StorageBuffer_uint = OpTypePointer StorageBuffer %uint + %true = OpConstantTrue %bool + %_struct_8 = OpTypeStruct %uint +%_ptr_StorageBuffer__struct_8 = OpTypePointer StorageBuffer %_struct_8 + %2 = OpVariable %_ptr_StorageBuffer__struct_8 StorageBuffer + %3 = OpVariable %_ptr_StorageBuffer__struct_7 StorageBuffer + %1 = OpFunction %void None %11 + %21 = OpLabel + %22 = OpAccessChain %_ptr_StorageBuffer_uint %2 %uint_0 + %24 = OpArrayLength %uint %3 0 + OpBranch %25 + %25 = OpLabel + OpLoopMerge %26 %27 None + OpBranch %28 + %28 = OpLabel + %29 = OpAtomicIDecrement %uint %22 %uint_2 %uint_0 + %30 = OpULessThan %bool %29 %24 + OpSelectionMerge %31 None + OpBranchConditional %30 %32 %33 + %32 = OpLabel + %34 = OpAccessChain %_ptr_StorageBuffer_uint %3 %uint_0 %29 + OpStore %34 %29 + %35 = OpIEqual %bool %29 %uint_0 + %41 = OpSelect %bool %35 %false %true + OpBranch %31 + %33 = OpLabel + OpBranch %31 + %31 = OpLabel + %40 = OpPhi %bool %41 %32 %false %33 + OpBranch %27 + %27 = OpLabel + OpBranchConditional %40 %25 %26 + %26 = OpLabel + OpReturn + OpFunctionEnd diff --git a/naga/tests/in/spv/atomic_i_increment.spvasm b/naga/tests/in/spv/atomic_i_increment.spvasm index 4586102d23..c072ca2986 100644 --- a/naga/tests/in/spv/atomic_i_increment.spvasm +++ b/naga/tests/in/spv/atomic_i_increment.spvasm @@ -59,4 +59,3 @@ %26 = OpLabel OpReturn OpFunctionEnd - diff --git a/naga/tests/in/spv/atomic_load_and_store.spv b/naga/tests/in/spv/atomic_load_and_store.spv new file mode 100644 index 0000000000..e2e9ddfd0b Binary files /dev/null and b/naga/tests/in/spv/atomic_load_and_store.spv differ diff --git a/naga/tests/in/spv/atomic_load_and_store.spvasm b/naga/tests/in/spv/atomic_load_and_store.spvasm new file mode 100644 index 0000000000..f65600c437 --- /dev/null +++ b/naga/tests/in/spv/atomic_load_and_store.spvasm @@ -0,0 +1,86 @@ +; SPIR-V +; Version: 1.5 +; Generator: Google rspirv; 0 +; Bound: 60 +; Schema: 0 + OpCapability Shader + OpCapability VulkanMemoryModel + OpMemoryModel Logical Vulkan + OpEntryPoint GLCompute %1 "stage::test_atomic_load_and_store" %2 %3 + OpExecutionMode %1 LocalSize 32 1 1 + OpMemberDecorate %_struct_11 0 Offset 0 + OpMemberDecorate %_struct_11 1 Offset 4 + OpDecorate %_struct_12 Block + OpMemberDecorate %_struct_12 0 Offset 0 + OpDecorate %2 Binding 0 + OpDecorate %2 DescriptorSet 0 + OpDecorate %3 NonWritable + OpDecorate %3 Binding 1 + OpDecorate %3 DescriptorSet 0 + %uint = OpTypeInt 32 0 + %void = OpTypeVoid + %15 = OpTypeFunction %void + %bool = OpTypeBool + %uint_0 = OpConstant %uint 0 + %uint_2 = OpConstant %uint 2 + %false = OpConstantFalse %bool +%_ptr_StorageBuffer_uint = OpTypePointer StorageBuffer %uint + %uint_1 = OpConstant %uint 1 + %_struct_11 = OpTypeStruct %uint %uint + %22 = OpUndef %_struct_11 + %int = OpTypeInt 32 1 + %true = OpConstantTrue %bool + %_struct_12 = OpTypeStruct %uint +%_ptr_StorageBuffer__struct_12 = OpTypePointer StorageBuffer %_struct_12 + %2 = OpVariable %_ptr_StorageBuffer__struct_12 StorageBuffer + %3 = OpVariable %_ptr_StorageBuffer__struct_12 StorageBuffer + %1 = OpFunction %void None %15 + %26 = OpLabel + %27 = OpAccessChain %_ptr_StorageBuffer_uint %2 %uint_0 + %28 = OpAccessChain %_ptr_StorageBuffer_uint %3 %uint_0 + %29 = OpLoad %uint %28 + %30 = OpCompositeConstruct %_struct_11 %uint_0 %29 + OpBranch %31 + %31 = OpLabel + %32 = OpPhi %_struct_11 %30 %26 %33 %34 + OpLoopMerge %35 %34 None + OpBranch %36 + %36 = OpLabel + %37 = OpCompositeExtract %uint %32 0 + %38 = OpCompositeExtract %uint %32 1 + %39 = OpULessThan %bool %37 %38 + OpSelectionMerge %40 None + OpBranchConditional %39 %41 %42 + %41 = OpLabel + %44 = OpIAdd %uint %37 %uint_1 + %46 = OpCompositeInsert %_struct_11 %44 %32 0 + %47 = OpCompositeConstruct %_struct_11 %uint_1 %37 + OpBranch %40 + %42 = OpLabel + %48 = OpCompositeInsert %_struct_11 %uint_0 %22 0 + OpBranch %40 + %40 = OpLabel + %49 = OpPhi %_struct_11 %46 %41 %32 %42 + %50 = OpPhi %_struct_11 %47 %41 %48 %42 + %51 = OpCompositeExtract %uint %50 0 + %52 = OpBitcast %int %51 + OpSelectionMerge %53 None + OpSwitch %52 %54 0 %55 1 %56 + %54 = OpLabel + OpBranch %53 + %55 = OpLabel + OpBranch %53 + %56 = OpLabel + %57 = OpAtomicLoad %uint %27 %uint_2 %uint_0 + %58 = OpIAdd %uint %57 %uint_2 + OpAtomicStore %27 %uint_2 %uint_0 %58 + OpBranch %53 + %53 = OpLabel + %59 = OpPhi %bool %false %54 %false %55 %true %56 + %33 = OpPhi %_struct_11 %22 %54 %22 %55 %49 %56 + OpBranch %34 + %34 = OpLabel + OpBranchConditional %59 %31 %35 + %35 = OpLabel + OpReturn + OpFunctionEnd diff --git a/naga/tests/in/type-alias.wgsl b/naga/tests/in/type-alias.wgsl index 69c1eae4ef..cdc78451be 100644 --- a/naga/tests/in/type-alias.wgsl +++ b/naga/tests/in/type-alias.wgsl @@ -2,6 +2,8 @@ alias FVec3 = vec3; alias IVec3 = vec3i; alias Mat2 = mat2x2; alias Mat3 = mat3x3f; +alias I32 = i32; +alias F32 = f32; fn main() { let a = FVec3(0.0, 0.0, 0.0); @@ -12,4 +14,7 @@ fn main() { let f = Mat2(1.0, 2.0, 3.0, 4.0); let g = Mat3(a, a, a); + + let h = vec2(); + let i = mat2x2(); } diff --git a/naga/tests/out/analysis/access.info.ron b/naga/tests/out/analysis/access.info.ron index 52b0c020eb..308bb1a8b6 100644 --- a/naga/tests/out/analysis/access.info.ron +++ b/naga/tests/out/analysis/access.info.ron @@ -5,6 +5,7 @@ ("DATA | SIZED | COPY | IO_SHAREABLE | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), ("DATA | SIZED | COPY | IO_SHAREABLE | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), ("DATA | SIZED | COPY | IO_SHAREABLE | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), + ("DATA | SIZED | COPY | IO_SHAREABLE | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), ("DATA | SIZED | COPY | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), ("DATA | SIZED | COPY | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), ("DATA | SIZED | COPY | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), @@ -20,7 +21,6 @@ ("DATA | SIZED | COPY | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), ("DATA | SIZED | COPY | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), ("DATA | SIZED | COPY | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), - ("DATA | SIZED | COPY | IO_SHAREABLE | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), ("SIZED | COPY | ARGUMENT"), ("DATA | SIZED | COPY | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), ("DATA | SIZED | COPY | HOST_SHAREABLE | ARGUMENT | CONSTRUCTIBLE"), @@ -110,7 +110,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 15, + base: 16, space: Uniform, )), ), @@ -122,7 +122,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 14, + base: 15, space: Uniform, )), ), @@ -133,7 +133,7 @@ ), ref_count: 0, assignable_global: None, - ty: Handle(14), + ty: Handle(15), ), ( uniformity: ( @@ -143,7 +143,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 15, + base: 16, space: Uniform, )), ), @@ -155,7 +155,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 14, + base: 15, space: Uniform, )), ), @@ -198,7 +198,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 15, + base: 16, space: Uniform, )), ), @@ -210,7 +210,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 14, + base: 15, space: Uniform, )), ), @@ -262,7 +262,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 15, + base: 16, space: Uniform, )), ), @@ -274,7 +274,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 14, + base: 15, space: Uniform, )), ), @@ -330,7 +330,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 15, + base: 16, space: Uniform, )), ), @@ -342,7 +342,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 14, + base: 15, space: Uniform, )), ), @@ -407,7 +407,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 15, + base: 16, space: Uniform, )), ), @@ -419,7 +419,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 14, + base: 15, space: Uniform, )), ), @@ -484,7 +484,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 15, + base: 16, space: Uniform, )), ), @@ -496,7 +496,7 @@ ref_count: 1, assignable_global: Some(2), ty: Value(Pointer( - base: 14, + base: 15, space: Uniform, )), ), @@ -650,7 +650,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(14), + ty: Handle(15), ), ( uniformity: ( @@ -659,7 +659,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(15), + ty: Handle(16), ), ( uniformity: ( @@ -669,7 +669,7 @@ ref_count: 7, assignable_global: None, ty: Value(Pointer( - base: 15, + base: 16, space: Function, )), ), @@ -711,7 +711,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 14, + base: 15, space: Function, )), ), @@ -803,7 +803,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(14), + ty: Handle(15), ), ( uniformity: ( @@ -813,7 +813,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 14, + base: 15, space: Function, )), ), @@ -868,7 +868,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 14, + base: 15, space: Function, )), ), @@ -932,7 +932,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 14, + base: 15, space: Function, )), ), @@ -988,7 +988,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 14, + base: 15, space: Function, )), ), @@ -1053,7 +1053,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 14, + base: 15, space: Function, )), ), @@ -1118,7 +1118,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 14, + base: 15, space: Function, )), ), @@ -1267,7 +1267,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 19, + base: 20, space: Uniform, )), ), @@ -1279,7 +1279,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 18, + base: 19, space: Uniform, )), ), @@ -1290,7 +1290,7 @@ ), ref_count: 0, assignable_global: None, - ty: Handle(18), + ty: Handle(19), ), ( uniformity: ( @@ -1300,7 +1300,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 19, + base: 20, space: Uniform, )), ), @@ -1312,7 +1312,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 18, + base: 19, space: Uniform, )), ), @@ -1324,7 +1324,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 17, + base: 18, space: Uniform, )), ), @@ -1335,7 +1335,7 @@ ), ref_count: 0, assignable_global: None, - ty: Handle(17), + ty: Handle(18), ), ( uniformity: ( @@ -1345,7 +1345,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 19, + base: 20, space: Uniform, )), ), @@ -1357,7 +1357,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 18, + base: 19, space: Uniform, )), ), @@ -1369,7 +1369,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 17, + base: 18, space: Uniform, )), ), @@ -1412,7 +1412,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 19, + base: 20, space: Uniform, )), ), @@ -1424,7 +1424,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 18, + base: 19, space: Uniform, )), ), @@ -1436,7 +1436,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 17, + base: 18, space: Uniform, )), ), @@ -1488,7 +1488,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 19, + base: 20, space: Uniform, )), ), @@ -1500,7 +1500,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 18, + base: 19, space: Uniform, )), ), @@ -1512,7 +1512,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 17, + base: 18, space: Uniform, )), ), @@ -1568,7 +1568,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 19, + base: 20, space: Uniform, )), ), @@ -1580,7 +1580,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 18, + base: 19, space: Uniform, )), ), @@ -1592,7 +1592,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 17, + base: 18, space: Uniform, )), ), @@ -1657,7 +1657,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 19, + base: 20, space: Uniform, )), ), @@ -1669,7 +1669,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 18, + base: 19, space: Uniform, )), ), @@ -1681,7 +1681,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 17, + base: 18, space: Uniform, )), ), @@ -1746,7 +1746,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 19, + base: 20, space: Uniform, )), ), @@ -1758,7 +1758,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 18, + base: 19, space: Uniform, )), ), @@ -1770,7 +1770,7 @@ ref_count: 1, assignable_global: Some(4), ty: Value(Pointer( - base: 17, + base: 18, space: Uniform, )), ), @@ -1843,7 +1843,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(18), + ty: Handle(19), ), ( uniformity: ( @@ -1852,7 +1852,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(19), + ty: Handle(20), ), ( uniformity: ( @@ -1862,7 +1862,7 @@ ref_count: 8, assignable_global: None, ty: Value(Pointer( - base: 19, + base: 20, space: Function, )), ), @@ -1904,7 +1904,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 18, + base: 19, space: Function, )), ), @@ -1915,7 +1915,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(18), + ty: Handle(19), ), ( uniformity: ( @@ -1925,7 +1925,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 18, + base: 19, space: Function, )), ), @@ -1937,7 +1937,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 17, + base: 18, space: Function, )), ), @@ -2056,7 +2056,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(17), + ty: Handle(18), ), ( uniformity: ( @@ -2066,7 +2066,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 18, + base: 19, space: Function, )), ), @@ -2078,7 +2078,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 17, + base: 18, space: Function, )), ), @@ -2133,7 +2133,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 18, + base: 19, space: Function, )), ), @@ -2145,7 +2145,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 17, + base: 18, space: Function, )), ), @@ -2209,7 +2209,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 18, + base: 19, space: Function, )), ), @@ -2221,7 +2221,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 17, + base: 18, space: Function, )), ), @@ -2277,7 +2277,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 18, + base: 19, space: Function, )), ), @@ -2289,7 +2289,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 17, + base: 18, space: Function, )), ), @@ -2354,7 +2354,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 18, + base: 19, space: Function, )), ), @@ -2366,7 +2366,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 17, + base: 18, space: Function, )), ), @@ -2431,7 +2431,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 18, + base: 19, space: Function, )), ), @@ -2443,7 +2443,7 @@ ref_count: 1, assignable_global: None, ty: Value(Pointer( - base: 17, + base: 18, space: Function, )), ), @@ -2546,7 +2546,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(20), + ty: Handle(5), ), ], sampling: [], @@ -2594,7 +2594,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(20), + ty: Handle(5), ), ], sampling: [], @@ -2783,7 +2783,7 @@ ref_count: 3, assignable_global: None, ty: Value(Pointer( - base: 20, + base: 5, space: Function, )), ), @@ -2794,7 +2794,7 @@ ), ref_count: 0, assignable_global: None, - ty: Handle(20), + ty: Handle(5), ), ( uniformity: ( @@ -2816,7 +2816,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -2830,7 +2830,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 5, + base: 6, space: Storage( access: ("LOAD | STORE"), ), @@ -2843,7 +2843,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(5), + ty: Handle(6), ), ( uniformity: ( @@ -2853,7 +2853,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -2867,7 +2867,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 11, + base: 12, space: Storage( access: ("LOAD | STORE"), ), @@ -2880,7 +2880,7 @@ ), ref_count: 0, assignable_global: None, - ty: Handle(11), + ty: Handle(12), ), ( uniformity: ( @@ -2902,7 +2902,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -2916,7 +2916,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 5, + base: 6, space: Storage( access: ("LOAD | STORE"), ), @@ -2978,7 +2978,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -2992,7 +2992,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 12, + base: 13, space: Storage( access: ("LOAD | STORE"), ), @@ -3006,7 +3006,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -3020,7 +3020,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 12, + base: 13, space: Storage( access: ("LOAD | STORE"), ), @@ -3107,7 +3107,7 @@ ref_count: 1, assignable_global: Some(3), ty: Value(Pointer( - base: 16, + base: 17, space: Storage( access: ("LOAD | STORE"), ), @@ -3120,7 +3120,7 @@ ), ref_count: 0, assignable_global: None, - ty: Handle(16), + ty: Handle(17), ), ( uniformity: ( @@ -3130,7 +3130,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -3144,7 +3144,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 12, + base: 13, space: Storage( access: ("LOAD | STORE"), ), @@ -3185,7 +3185,7 @@ ), ref_count: 0, assignable_global: None, - ty: Handle(20), + ty: Handle(5), ), ( uniformity: ( @@ -3338,7 +3338,7 @@ ), ref_count: 0, assignable_global: None, - ty: Handle(20), + ty: Handle(5), ), ( uniformity: ( @@ -3435,7 +3435,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -3449,7 +3449,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 5, + base: 6, space: Storage( access: ("LOAD | STORE"), ), @@ -3511,7 +3511,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -3525,7 +3525,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 5, + base: 6, space: Storage( access: ("LOAD | STORE"), ), @@ -3646,7 +3646,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(5), + ty: Handle(6), ), ( uniformity: ( @@ -3656,7 +3656,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -3670,7 +3670,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 11, + base: 12, space: Storage( access: ("LOAD | STORE"), ), @@ -3737,7 +3737,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(11), + ty: Handle(12), ), ( uniformity: ( @@ -3747,7 +3747,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 13, + base: 14, space: Storage( access: ("LOAD | STORE"), ), @@ -3761,7 +3761,7 @@ ref_count: 1, assignable_global: Some(1), ty: Value(Pointer( - base: 12, + base: 13, space: Storage( access: ("LOAD | STORE"), ), @@ -3815,7 +3815,7 @@ ref_count: 1, assignable_global: Some(3), ty: Value(Pointer( - base: 16, + base: 17, space: Storage( access: ("LOAD | STORE"), ), @@ -3828,7 +3828,7 @@ ), ref_count: 1, assignable_global: None, - ty: Handle(16), + ty: Handle(17), ), ( uniformity: ( diff --git a/naga/tests/out/glsl/access.foo_frag.Fragment.glsl b/naga/tests/out/glsl/access.foo_frag.Fragment.glsl index 3d52fa56b0..aacdda0130 100644 --- a/naga/tests/out/glsl/access.foo_frag.Fragment.glsl +++ b/naga/tests/out/glsl/access.foo_frag.Fragment.glsl @@ -26,7 +26,7 @@ layout(std430) buffer Bar_block_0Fragment { AlignedWrapper data[]; } _group_0_binding_0_fs; -layout(std430) buffer type_12_block_1Fragment { ivec2 _group_0_binding_2_fs; }; +layout(std430) buffer type_13_block_1Fragment { ivec2 _group_0_binding_2_fs; }; layout(location = 0) out vec4 _fs2p_location0; diff --git a/naga/tests/out/glsl/access.foo_vert.Vertex.glsl b/naga/tests/out/glsl/access.foo_vert.Vertex.glsl index edc7ce1e6b..d4a9b92945 100644 --- a/naga/tests/out/glsl/access.foo_vert.Vertex.glsl +++ b/naga/tests/out/glsl/access.foo_vert.Vertex.glsl @@ -28,7 +28,7 @@ layout(std430) buffer Bar_block_0Vertex { uniform Baz_block_1Vertex { Baz _group_0_binding_1_vs; }; -layout(std430) buffer type_12_block_2Vertex { ivec2 _group_0_binding_2_vs; }; +layout(std430) buffer type_13_block_2Vertex { ivec2 _group_0_binding_2_vs; }; uniform MatCx2InArray_block_3Vertex { MatCx2InArray _group_0_binding_3_vs; }; diff --git a/naga/tests/out/glsl/cross.main.Compute.glsl b/naga/tests/out/glsl/cross.main.Compute.glsl new file mode 100644 index 0000000000..a4950274e3 --- /dev/null +++ b/naga/tests/out/glsl/cross.main.Compute.glsl @@ -0,0 +1,12 @@ +#version 310 es + +precision highp float; +precision highp int; + +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + + +void main() { + vec3 a = cross(vec3(0.0, 1.0, 2.0), vec3(0.0, 1.0, 2.0)); +} + diff --git a/naga/tests/out/glsl/interpolate.frag_main.Fragment.glsl b/naga/tests/out/glsl/interpolate.frag_main.Fragment.glsl index d1662da493..3f23f487f4 100644 --- a/naga/tests/out/glsl/interpolate.frag_main.Fragment.glsl +++ b/naga/tests/out/glsl/interpolate.frag_main.Fragment.glsl @@ -2,23 +2,31 @@ struct FragmentInput { vec4 position; uint _flat; + uint flat_first; + uint flat_either; float _linear; vec2 linear_centroid; vec3 linear_sample; + vec3 linear_center; vec4 perspective; float perspective_centroid; float perspective_sample; + float perspective_center; }; flat in uint _vs2fs_location0; -noperspective in float _vs2fs_location1; -noperspective centroid in vec2 _vs2fs_location2; -noperspective sample in vec3 _vs2fs_location3; -smooth in vec4 _vs2fs_location4; -smooth centroid in float _vs2fs_location5; -smooth sample in float _vs2fs_location6; +flat in uint _vs2fs_location1; +flat in uint _vs2fs_location2; +noperspective in float _vs2fs_location3; +noperspective centroid in vec2 _vs2fs_location4; +noperspective sample in vec3 _vs2fs_location6; +noperspective in vec3 _vs2fs_location7; +smooth in vec4 _vs2fs_location8; +smooth centroid in float _vs2fs_location9; +smooth sample in float _vs2fs_location10; +smooth in float _vs2fs_location11; void main() { - FragmentInput val = FragmentInput(gl_FragCoord, _vs2fs_location0, _vs2fs_location1, _vs2fs_location2, _vs2fs_location3, _vs2fs_location4, _vs2fs_location5, _vs2fs_location6); + FragmentInput val = FragmentInput(gl_FragCoord, _vs2fs_location0, _vs2fs_location1, _vs2fs_location2, _vs2fs_location3, _vs2fs_location4, _vs2fs_location6, _vs2fs_location7, _vs2fs_location8, _vs2fs_location9, _vs2fs_location10, _vs2fs_location11); return; } diff --git a/naga/tests/out/glsl/interpolate.vert_main.Vertex.glsl b/naga/tests/out/glsl/interpolate.vert_main.Vertex.glsl index f423a3dc18..1afe43a478 100644 --- a/naga/tests/out/glsl/interpolate.vert_main.Vertex.glsl +++ b/naga/tests/out/glsl/interpolate.vert_main.Vertex.glsl @@ -2,40 +2,56 @@ struct FragmentInput { vec4 position; uint _flat; + uint flat_first; + uint flat_either; float _linear; vec2 linear_centroid; vec3 linear_sample; + vec3 linear_center; vec4 perspective; float perspective_centroid; float perspective_sample; + float perspective_center; }; flat out uint _vs2fs_location0; -noperspective out float _vs2fs_location1; -noperspective centroid out vec2 _vs2fs_location2; -noperspective sample out vec3 _vs2fs_location3; -smooth out vec4 _vs2fs_location4; -smooth centroid out float _vs2fs_location5; -smooth sample out float _vs2fs_location6; +flat out uint _vs2fs_location1; +flat out uint _vs2fs_location2; +noperspective out float _vs2fs_location3; +noperspective centroid out vec2 _vs2fs_location4; +noperspective sample out vec3 _vs2fs_location6; +noperspective out vec3 _vs2fs_location7; +smooth out vec4 _vs2fs_location8; +smooth centroid out float _vs2fs_location9; +smooth sample out float _vs2fs_location10; +smooth out float _vs2fs_location11; void main() { - FragmentInput out_ = FragmentInput(vec4(0.0), 0u, 0.0, vec2(0.0), vec3(0.0), vec4(0.0), 0.0, 0.0); + FragmentInput out_ = FragmentInput(vec4(0.0), 0u, 0u, 0u, 0.0, vec2(0.0), vec3(0.0), vec3(0.0), vec4(0.0), 0.0, 0.0, 0.0); out_.position = vec4(2.0, 4.0, 5.0, 6.0); out_._flat = 8u; + out_.flat_first = 9u; + out_.flat_either = 10u; out_._linear = 27.0; out_.linear_centroid = vec2(64.0, 125.0); out_.linear_sample = vec3(216.0, 343.0, 512.0); + out_.linear_center = vec3(255.0, 511.0, 1024.0); out_.perspective = vec4(729.0, 1000.0, 1331.0, 1728.0); out_.perspective_centroid = 2197.0; out_.perspective_sample = 2744.0; - FragmentInput _e30 = out_; - gl_Position = _e30.position; - _vs2fs_location0 = _e30._flat; - _vs2fs_location1 = _e30._linear; - _vs2fs_location2 = _e30.linear_centroid; - _vs2fs_location3 = _e30.linear_sample; - _vs2fs_location4 = _e30.perspective; - _vs2fs_location5 = _e30.perspective_centroid; - _vs2fs_location6 = _e30.perspective_sample; + out_.perspective_center = 2812.0; + FragmentInput _e41 = out_; + gl_Position = _e41.position; + _vs2fs_location0 = _e41._flat; + _vs2fs_location1 = _e41.flat_first; + _vs2fs_location2 = _e41.flat_either; + _vs2fs_location3 = _e41._linear; + _vs2fs_location4 = _e41.linear_centroid; + _vs2fs_location6 = _e41.linear_sample; + _vs2fs_location7 = _e41.linear_center; + _vs2fs_location8 = _e41.perspective; + _vs2fs_location9 = _e41.perspective_centroid; + _vs2fs_location10 = _e41.perspective_sample; + _vs2fs_location11 = _e41.perspective_center; return; } diff --git a/naga/tests/out/glsl/interpolate_compat.frag_main.Fragment.glsl b/naga/tests/out/glsl/interpolate_compat.frag_main.Fragment.glsl new file mode 100644 index 0000000000..ac7fad324f --- /dev/null +++ b/naga/tests/out/glsl/interpolate_compat.frag_main.Fragment.glsl @@ -0,0 +1,30 @@ +#version 400 core +struct FragmentInput { + vec4 position; + uint _flat; + uint flat_either; + float _linear; + vec2 linear_centroid; + vec3 linear_sample; + vec3 linear_center; + vec4 perspective; + float perspective_centroid; + float perspective_sample; + float perspective_center; +}; +flat in uint _vs2fs_location0; +flat in uint _vs2fs_location2; +noperspective in float _vs2fs_location3; +noperspective centroid in vec2 _vs2fs_location4; +noperspective sample in vec3 _vs2fs_location6; +noperspective in vec3 _vs2fs_location7; +smooth in vec4 _vs2fs_location8; +smooth centroid in float _vs2fs_location9; +smooth sample in float _vs2fs_location10; +smooth in float _vs2fs_location11; + +void main() { + FragmentInput val = FragmentInput(gl_FragCoord, _vs2fs_location0, _vs2fs_location2, _vs2fs_location3, _vs2fs_location4, _vs2fs_location6, _vs2fs_location7, _vs2fs_location8, _vs2fs_location9, _vs2fs_location10, _vs2fs_location11); + return; +} + diff --git a/naga/tests/out/glsl/interpolate_compat.vert_main.Vertex.glsl b/naga/tests/out/glsl/interpolate_compat.vert_main.Vertex.glsl new file mode 100644 index 0000000000..5b85026d79 --- /dev/null +++ b/naga/tests/out/glsl/interpolate_compat.vert_main.Vertex.glsl @@ -0,0 +1,53 @@ +#version 400 core +struct FragmentInput { + vec4 position; + uint _flat; + uint flat_either; + float _linear; + vec2 linear_centroid; + vec3 linear_sample; + vec3 linear_center; + vec4 perspective; + float perspective_centroid; + float perspective_sample; + float perspective_center; +}; +flat out uint _vs2fs_location0; +flat out uint _vs2fs_location2; +noperspective out float _vs2fs_location3; +noperspective centroid out vec2 _vs2fs_location4; +noperspective sample out vec3 _vs2fs_location6; +noperspective out vec3 _vs2fs_location7; +smooth out vec4 _vs2fs_location8; +smooth centroid out float _vs2fs_location9; +smooth sample out float _vs2fs_location10; +smooth out float _vs2fs_location11; + +void main() { + FragmentInput out_ = FragmentInput(vec4(0.0), 0u, 0u, 0.0, vec2(0.0), vec3(0.0), vec3(0.0), vec4(0.0), 0.0, 0.0, 0.0); + out_.position = vec4(2.0, 4.0, 5.0, 6.0); + out_._flat = 8u; + out_.flat_either = 10u; + out_._linear = 27.0; + out_.linear_centroid = vec2(64.0, 125.0); + out_.linear_sample = vec3(216.0, 343.0, 512.0); + out_.linear_center = vec3(255.0, 511.0, 1024.0); + out_.perspective = vec4(729.0, 1000.0, 1331.0, 1728.0); + out_.perspective_centroid = 2197.0; + out_.perspective_sample = 2744.0; + out_.perspective_center = 2812.0; + FragmentInput _e39 = out_; + gl_Position = _e39.position; + _vs2fs_location0 = _e39._flat; + _vs2fs_location2 = _e39.flat_either; + _vs2fs_location3 = _e39._linear; + _vs2fs_location4 = _e39.linear_centroid; + _vs2fs_location6 = _e39.linear_sample; + _vs2fs_location7 = _e39.linear_center; + _vs2fs_location8 = _e39.perspective; + _vs2fs_location9 = _e39.perspective_centroid; + _vs2fs_location10 = _e39.perspective_sample; + _vs2fs_location11 = _e39.perspective_center; + return; +} + diff --git a/naga/tests/out/glsl/shadow.fs_main.Fragment.glsl b/naga/tests/out/glsl/shadow.fs_main.Fragment.glsl index 61c14561d5..ab7214380f 100644 --- a/naga/tests/out/glsl/shadow.fs_main.Fragment.glsl +++ b/naga/tests/out/glsl/shadow.fs_main.Fragment.glsl @@ -28,7 +28,7 @@ uniform Globals_block_0Fragment { Globals _group_0_binding_0_fs; }; uniform Entity_block_1Fragment { Entity _group_1_binding_0_fs; }; -layout(std430) readonly buffer type_6_block_2Fragment { Light _group_0_binding_1_fs[]; }; +layout(std430) readonly buffer type_8_block_2Fragment { Light _group_0_binding_1_fs[]; }; uniform highp sampler2DArrayShadow _group_0_binding_2_fs; diff --git a/naga/tests/out/glsl/shadow.fs_main_without_storage.Fragment.glsl b/naga/tests/out/glsl/shadow.fs_main_without_storage.Fragment.glsl index 57677c91a6..a9fcf31d56 100644 --- a/naga/tests/out/glsl/shadow.fs_main_without_storage.Fragment.glsl +++ b/naga/tests/out/glsl/shadow.fs_main_without_storage.Fragment.glsl @@ -28,7 +28,7 @@ uniform Globals_block_0Fragment { Globals _group_0_binding_0_fs; }; uniform Entity_block_1Fragment { Entity _group_1_binding_0_fs; }; -uniform type_7_block_2Fragment { Light _group_0_binding_1_fs[10]; }; +uniform type_9_block_2Fragment { Light _group_0_binding_1_fs[10]; }; uniform highp sampler2DArrayShadow _group_0_binding_2_fs; diff --git a/naga/tests/out/hlsl/cross.hlsl b/naga/tests/out/hlsl/cross.hlsl new file mode 100644 index 0000000000..96696c5066 --- /dev/null +++ b/naga/tests/out/hlsl/cross.hlsl @@ -0,0 +1,5 @@ +[numthreads(1, 1, 1)] +void main() +{ + float3 a = cross(float3(0.0, 1.0, 2.0), float3(0.0, 1.0, 2.0)); +} diff --git a/naga/tests/out/hlsl/cross.ron b/naga/tests/out/hlsl/cross.ron new file mode 100644 index 0000000000..a07b03300b --- /dev/null +++ b/naga/tests/out/hlsl/cross.ron @@ -0,0 +1,12 @@ +( + vertex:[ + ], + fragment:[ + ], + compute:[ + ( + entry_point:"main", + target_profile:"cs_5_1", + ), + ], +) diff --git a/naga/tests/out/hlsl/interpolate.hlsl b/naga/tests/out/hlsl/interpolate.hlsl index 29fd45e0ff..ebb6e83477 100644 --- a/naga/tests/out/hlsl/interpolate.hlsl +++ b/naga/tests/out/hlsl/interpolate.hlsl @@ -1,33 +1,45 @@ struct FragmentInput { float4 position : SV_Position; nointerpolation uint _flat : LOC0; - noperspective float _linear : LOC1; - noperspective centroid float2 linear_centroid : LOC2; - noperspective sample float3 linear_sample : LOC3; - float4 perspective : LOC4; - centroid float perspective_centroid : LOC5; - sample float perspective_sample : LOC6; + nointerpolation uint flat_first : LOC1; + nointerpolation uint flat_either : LOC2; + noperspective float _linear : LOC3; + noperspective centroid float2 linear_centroid : LOC4; + noperspective sample float3 linear_sample : LOC6; + noperspective float3 linear_center : LOC7; + float4 perspective : LOC8; + centroid float perspective_centroid : LOC9; + sample float perspective_sample : LOC10; + float perspective_center : LOC11; }; struct VertexOutput_vert_main { nointerpolation uint _flat : LOC0; - noperspective float _linear : LOC1; - noperspective centroid float2 linear_centroid : LOC2; - noperspective sample float3 linear_sample : LOC3; - float4 perspective : LOC4; - centroid float perspective_centroid : LOC5; - sample float perspective_sample : LOC6; + nointerpolation uint flat_first : LOC1; + nointerpolation uint flat_either : LOC2; + noperspective float _linear : LOC3; + noperspective centroid float2 linear_centroid : LOC4; + noperspective sample float3 linear_sample : LOC6; + noperspective float3 linear_center : LOC7; + float4 perspective : LOC8; + centroid float perspective_centroid : LOC9; + sample float perspective_sample : LOC10; + float perspective_center : LOC11; float4 position : SV_Position; }; struct FragmentInput_frag_main { nointerpolation uint _flat_1 : LOC0; - noperspective float _linear_1 : LOC1; - noperspective centroid float2 linear_centroid_1 : LOC2; - noperspective sample float3 linear_sample_1 : LOC3; - float4 perspective_1 : LOC4; - centroid float perspective_centroid_1 : LOC5; - sample float perspective_sample_1 : LOC6; + nointerpolation uint flat_first_1 : LOC1; + nointerpolation uint flat_either_1 : LOC2; + noperspective float _linear_1 : LOC3; + noperspective centroid float2 linear_centroid_1 : LOC4; + noperspective sample float3 linear_sample_1 : LOC6; + noperspective float3 linear_center_1 : LOC7; + float4 perspective_1 : LOC8; + centroid float perspective_centroid_1 : LOC9; + sample float perspective_sample_1 : LOC10; + float perspective_center_1 : LOC11; float4 position_1 : SV_Position; }; @@ -37,20 +49,24 @@ VertexOutput_vert_main vert_main() out_.position = float4(2.0, 4.0, 5.0, 6.0); out_._flat = 8u; + out_.flat_first = 9u; + out_.flat_either = 10u; out_._linear = 27.0; out_.linear_centroid = float2(64.0, 125.0); out_.linear_sample = float3(216.0, 343.0, 512.0); + out_.linear_center = float3(255.0, 511.0, 1024.0); out_.perspective = float4(729.0, 1000.0, 1331.0, 1728.0); out_.perspective_centroid = 2197.0; out_.perspective_sample = 2744.0; - FragmentInput _e30 = out_; - const FragmentInput fragmentinput = _e30; - const VertexOutput_vert_main fragmentinput_1 = { fragmentinput._flat, fragmentinput._linear, fragmentinput.linear_centroid, fragmentinput.linear_sample, fragmentinput.perspective, fragmentinput.perspective_centroid, fragmentinput.perspective_sample, fragmentinput.position }; + out_.perspective_center = 2812.0; + FragmentInput _e41 = out_; + const FragmentInput fragmentinput = _e41; + const VertexOutput_vert_main fragmentinput_1 = { fragmentinput._flat, fragmentinput.flat_first, fragmentinput.flat_either, fragmentinput._linear, fragmentinput.linear_centroid, fragmentinput.linear_sample, fragmentinput.linear_center, fragmentinput.perspective, fragmentinput.perspective_centroid, fragmentinput.perspective_sample, fragmentinput.perspective_center, fragmentinput.position }; return fragmentinput_1; } void frag_main(FragmentInput_frag_main fragmentinput_frag_main) { - FragmentInput val = { fragmentinput_frag_main.position_1, fragmentinput_frag_main._flat_1, fragmentinput_frag_main._linear_1, fragmentinput_frag_main.linear_centroid_1, fragmentinput_frag_main.linear_sample_1, fragmentinput_frag_main.perspective_1, fragmentinput_frag_main.perspective_centroid_1, fragmentinput_frag_main.perspective_sample_1 }; + FragmentInput val = { fragmentinput_frag_main.position_1, fragmentinput_frag_main._flat_1, fragmentinput_frag_main.flat_first_1, fragmentinput_frag_main.flat_either_1, fragmentinput_frag_main._linear_1, fragmentinput_frag_main.linear_centroid_1, fragmentinput_frag_main.linear_sample_1, fragmentinput_frag_main.linear_center_1, fragmentinput_frag_main.perspective_1, fragmentinput_frag_main.perspective_centroid_1, fragmentinput_frag_main.perspective_sample_1, fragmentinput_frag_main.perspective_center_1 }; return; } diff --git a/naga/tests/out/hlsl/interpolate_compat.hlsl b/naga/tests/out/hlsl/interpolate_compat.hlsl new file mode 100644 index 0000000000..85f1bb001c --- /dev/null +++ b/naga/tests/out/hlsl/interpolate_compat.hlsl @@ -0,0 +1,68 @@ +struct FragmentInput { + float4 position : SV_Position; + nointerpolation uint _flat : LOC0; + nointerpolation uint flat_either : LOC2; + noperspective float _linear : LOC3; + noperspective centroid float2 linear_centroid : LOC4; + noperspective sample float3 linear_sample : LOC6; + noperspective float3 linear_center : LOC7; + float4 perspective : LOC8; + centroid float perspective_centroid : LOC9; + sample float perspective_sample : LOC10; + float perspective_center : LOC11; +}; + +struct VertexOutput_vert_main { + nointerpolation uint _flat : LOC0; + nointerpolation uint flat_either : LOC2; + noperspective float _linear : LOC3; + noperspective centroid float2 linear_centroid : LOC4; + noperspective sample float3 linear_sample : LOC6; + noperspective float3 linear_center : LOC7; + float4 perspective : LOC8; + centroid float perspective_centroid : LOC9; + sample float perspective_sample : LOC10; + float perspective_center : LOC11; + float4 position : SV_Position; +}; + +struct FragmentInput_frag_main { + nointerpolation uint _flat_1 : LOC0; + nointerpolation uint flat_either_1 : LOC2; + noperspective float _linear_1 : LOC3; + noperspective centroid float2 linear_centroid_1 : LOC4; + noperspective sample float3 linear_sample_1 : LOC6; + noperspective float3 linear_center_1 : LOC7; + float4 perspective_1 : LOC8; + centroid float perspective_centroid_1 : LOC9; + sample float perspective_sample_1 : LOC10; + float perspective_center_1 : LOC11; + float4 position_1 : SV_Position; +}; + +VertexOutput_vert_main vert_main() +{ + FragmentInput out_ = (FragmentInput)0; + + out_.position = float4(2.0, 4.0, 5.0, 6.0); + out_._flat = 8u; + out_.flat_either = 10u; + out_._linear = 27.0; + out_.linear_centroid = float2(64.0, 125.0); + out_.linear_sample = float3(216.0, 343.0, 512.0); + out_.linear_center = float3(255.0, 511.0, 1024.0); + out_.perspective = float4(729.0, 1000.0, 1331.0, 1728.0); + out_.perspective_centroid = 2197.0; + out_.perspective_sample = 2744.0; + out_.perspective_center = 2812.0; + FragmentInput _e39 = out_; + const FragmentInput fragmentinput = _e39; + const VertexOutput_vert_main fragmentinput_1 = { fragmentinput._flat, fragmentinput.flat_either, fragmentinput._linear, fragmentinput.linear_centroid, fragmentinput.linear_sample, fragmentinput.linear_center, fragmentinput.perspective, fragmentinput.perspective_centroid, fragmentinput.perspective_sample, fragmentinput.perspective_center, fragmentinput.position }; + return fragmentinput_1; +} + +void frag_main(FragmentInput_frag_main fragmentinput_frag_main) +{ + FragmentInput val = { fragmentinput_frag_main.position_1, fragmentinput_frag_main._flat_1, fragmentinput_frag_main.flat_either_1, fragmentinput_frag_main._linear_1, fragmentinput_frag_main.linear_centroid_1, fragmentinput_frag_main.linear_sample_1, fragmentinput_frag_main.linear_center_1, fragmentinput_frag_main.perspective_1, fragmentinput_frag_main.perspective_centroid_1, fragmentinput_frag_main.perspective_sample_1, fragmentinput_frag_main.perspective_center_1 }; + return; +} diff --git a/naga/tests/out/hlsl/interpolate_compat.ron b/naga/tests/out/hlsl/interpolate_compat.ron new file mode 100644 index 0000000000..d0046b04dd --- /dev/null +++ b/naga/tests/out/hlsl/interpolate_compat.ron @@ -0,0 +1,16 @@ +( + vertex:[ + ( + entry_point:"vert_main", + target_profile:"vs_5_1", + ), + ], + fragment:[ + ( + entry_point:"frag_main", + target_profile:"ps_5_1", + ), + ], + compute:[ + ], +) diff --git a/naga/tests/out/ir/access.compact.ron b/naga/tests/out/ir/access.compact.ron index fd9405f2d0..1b95742ff2 100644 --- a/naga/tests/out/ir/access.compact.ron +++ b/naga/tests/out/ir/access.compact.ron @@ -64,6 +64,13 @@ span: 8, ), ), + ( + name: None, + inner: Scalar(( + kind: Float, + width: 4, + )), + ), ( name: None, inner: Matrix( @@ -89,7 +96,7 @@ ( name: None, inner: Array( - base: 6, + base: 7, size: Constant(2), stride: 16, ), @@ -104,7 +111,7 @@ ( name: None, inner: Array( - base: 8, + base: 9, size: Constant(10), stride: 4, ), @@ -122,7 +129,7 @@ ( name: None, inner: Array( - base: 10, + base: 11, size: Constant(2), stride: 8, ), @@ -141,37 +148,37 @@ members: [ ( name: Some("_matrix"), - ty: 5, + ty: 6, binding: None, offset: 0, ), ( name: Some("matrix_array"), - ty: 7, + ty: 8, binding: None, offset: 64, ), ( name: Some("atom"), - ty: 8, + ty: 9, binding: None, offset: 96, ), ( name: Some("atom_arr"), - ty: 9, + ty: 10, binding: None, offset: 100, ), ( name: Some("arr"), - ty: 11, + ty: 12, binding: None, offset: 144, ), ( name: Some("data"), - ty: 12, + ty: 13, binding: None, offset: 160, ), @@ -196,7 +203,7 @@ members: [ ( name: Some("m"), - ty: 14, + ty: 15, binding: None, offset: 0, ), @@ -228,7 +235,7 @@ ( name: None, inner: Array( - base: 17, + base: 18, size: Constant(2), stride: 32, ), @@ -239,7 +246,7 @@ members: [ ( name: Some("am"), - ty: 18, + ty: 19, binding: None, offset: 0, ), @@ -247,24 +254,17 @@ span: 64, ), ), - ( - name: None, - inner: Scalar(( - kind: Float, - width: 4, - )), - ), ( name: None, inner: Pointer( - base: 20, + base: 5, space: Function, ), ), ( name: None, inner: Array( - base: 20, + base: 5, size: Constant(10), stride: 4, ), @@ -342,7 +342,7 @@ group: 0, binding: 0, )), - ty: 13, + ty: 14, init: None, ), ( @@ -352,7 +352,7 @@ group: 0, binding: 1, )), - ty: 15, + ty: 16, init: None, ), ( @@ -364,7 +364,7 @@ group: 0, binding: 2, )), - ty: 16, + ty: 17, init: None, ), ( @@ -374,7 +374,7 @@ group: 0, binding: 3, )), - ty: 19, + ty: 20, init: None, ), ], @@ -414,7 +414,7 @@ ), ( name: Some("t"), - ty: 15, + ty: 16, init: Some(48), ), ], @@ -557,7 +557,7 @@ value: 45, ), Compose( - ty: 14, + ty: 15, components: [ 42, 44, @@ -565,7 +565,7 @@ ], ), Compose( - ty: 15, + ty: 16, components: [ 47, ], @@ -600,7 +600,7 @@ value: 58, ), Compose( - ty: 14, + ty: 15, components: [ 55, 57, @@ -900,7 +900,7 @@ ), ( name: Some("t"), - ty: 19, + ty: 20, init: Some(52), ), ], @@ -1063,9 +1063,9 @@ Load( pointer: 49, ), - ZeroValue(18), + ZeroValue(19), Compose( - ty: 19, + ty: 20, components: [ 51, ], @@ -1084,7 +1084,7 @@ base: 53, index: 0, ), - ZeroValue(18), + ZeroValue(19), AccessIndex( base: 53, index: 0, @@ -1114,7 +1114,7 @@ value: 67, ), Compose( - ty: 17, + ty: 18, components: [ 62, 64, @@ -1502,7 +1502,7 @@ ), ], result: Some(( - ty: 20, + ty: 5, binding: None, )), local_variables: [], @@ -1535,7 +1535,7 @@ ), ], result: Some(( - ty: 20, + ty: 5, binding: None, )), local_variables: [], @@ -1680,7 +1680,7 @@ local_variables: [ ( name: Some("foo"), - ty: 20, + ty: 5, init: Some(1), ), ( @@ -2017,7 +2017,7 @@ value: 13, ), Compose( - ty: 5, + ty: 6, components: [ 8, 10, @@ -2041,7 +2041,7 @@ value: 20, ), Compose( - ty: 11, + ty: 12, components: [ 19, 21, @@ -2062,7 +2062,7 @@ ), Literal(I32(1)), GlobalVariable(3), - ZeroValue(16), + ZeroValue(17), Literal(F32(0.0)), Splat( size: Quad, diff --git a/naga/tests/out/ir/access.ron b/naga/tests/out/ir/access.ron index fd9405f2d0..1b95742ff2 100644 --- a/naga/tests/out/ir/access.ron +++ b/naga/tests/out/ir/access.ron @@ -64,6 +64,13 @@ span: 8, ), ), + ( + name: None, + inner: Scalar(( + kind: Float, + width: 4, + )), + ), ( name: None, inner: Matrix( @@ -89,7 +96,7 @@ ( name: None, inner: Array( - base: 6, + base: 7, size: Constant(2), stride: 16, ), @@ -104,7 +111,7 @@ ( name: None, inner: Array( - base: 8, + base: 9, size: Constant(10), stride: 4, ), @@ -122,7 +129,7 @@ ( name: None, inner: Array( - base: 10, + base: 11, size: Constant(2), stride: 8, ), @@ -141,37 +148,37 @@ members: [ ( name: Some("_matrix"), - ty: 5, + ty: 6, binding: None, offset: 0, ), ( name: Some("matrix_array"), - ty: 7, + ty: 8, binding: None, offset: 64, ), ( name: Some("atom"), - ty: 8, + ty: 9, binding: None, offset: 96, ), ( name: Some("atom_arr"), - ty: 9, + ty: 10, binding: None, offset: 100, ), ( name: Some("arr"), - ty: 11, + ty: 12, binding: None, offset: 144, ), ( name: Some("data"), - ty: 12, + ty: 13, binding: None, offset: 160, ), @@ -196,7 +203,7 @@ members: [ ( name: Some("m"), - ty: 14, + ty: 15, binding: None, offset: 0, ), @@ -228,7 +235,7 @@ ( name: None, inner: Array( - base: 17, + base: 18, size: Constant(2), stride: 32, ), @@ -239,7 +246,7 @@ members: [ ( name: Some("am"), - ty: 18, + ty: 19, binding: None, offset: 0, ), @@ -247,24 +254,17 @@ span: 64, ), ), - ( - name: None, - inner: Scalar(( - kind: Float, - width: 4, - )), - ), ( name: None, inner: Pointer( - base: 20, + base: 5, space: Function, ), ), ( name: None, inner: Array( - base: 20, + base: 5, size: Constant(10), stride: 4, ), @@ -342,7 +342,7 @@ group: 0, binding: 0, )), - ty: 13, + ty: 14, init: None, ), ( @@ -352,7 +352,7 @@ group: 0, binding: 1, )), - ty: 15, + ty: 16, init: None, ), ( @@ -364,7 +364,7 @@ group: 0, binding: 2, )), - ty: 16, + ty: 17, init: None, ), ( @@ -374,7 +374,7 @@ group: 0, binding: 3, )), - ty: 19, + ty: 20, init: None, ), ], @@ -414,7 +414,7 @@ ), ( name: Some("t"), - ty: 15, + ty: 16, init: Some(48), ), ], @@ -557,7 +557,7 @@ value: 45, ), Compose( - ty: 14, + ty: 15, components: [ 42, 44, @@ -565,7 +565,7 @@ ], ), Compose( - ty: 15, + ty: 16, components: [ 47, ], @@ -600,7 +600,7 @@ value: 58, ), Compose( - ty: 14, + ty: 15, components: [ 55, 57, @@ -900,7 +900,7 @@ ), ( name: Some("t"), - ty: 19, + ty: 20, init: Some(52), ), ], @@ -1063,9 +1063,9 @@ Load( pointer: 49, ), - ZeroValue(18), + ZeroValue(19), Compose( - ty: 19, + ty: 20, components: [ 51, ], @@ -1084,7 +1084,7 @@ base: 53, index: 0, ), - ZeroValue(18), + ZeroValue(19), AccessIndex( base: 53, index: 0, @@ -1114,7 +1114,7 @@ value: 67, ), Compose( - ty: 17, + ty: 18, components: [ 62, 64, @@ -1502,7 +1502,7 @@ ), ], result: Some(( - ty: 20, + ty: 5, binding: None, )), local_variables: [], @@ -1535,7 +1535,7 @@ ), ], result: Some(( - ty: 20, + ty: 5, binding: None, )), local_variables: [], @@ -1680,7 +1680,7 @@ local_variables: [ ( name: Some("foo"), - ty: 20, + ty: 5, init: Some(1), ), ( @@ -2017,7 +2017,7 @@ value: 13, ), Compose( - ty: 5, + ty: 6, components: [ 8, 10, @@ -2041,7 +2041,7 @@ value: 20, ), Compose( - ty: 11, + ty: 12, components: [ 19, 21, @@ -2062,7 +2062,7 @@ ), Literal(I32(1)), GlobalVariable(3), - ZeroValue(16), + ZeroValue(17), Literal(F32(0.0)), Splat( size: Quad, diff --git a/naga/tests/out/ir/atomic_i_increment.compact.ron b/naga/tests/out/ir/atomic_i_increment.compact.ron index 58b01f5870..12a4692a3e 100644 --- a/naga/tests/out/ir/atomic_i_increment.compact.ron +++ b/naga/tests/out/ir/atomic_i_increment.compact.ron @@ -216,10 +216,6 @@ ), ], reject: [ - Emit(( - start: 13, - end: 14, - )), Atomic( pointer: 7, fun: Add, diff --git a/naga/tests/out/ir/atomic_i_increment.ron b/naga/tests/out/ir/atomic_i_increment.ron index 2c55289218..82fa975024 100644 --- a/naga/tests/out/ir/atomic_i_increment.ron +++ b/naga/tests/out/ir/atomic_i_increment.ron @@ -241,10 +241,6 @@ ), ], reject: [ - Emit(( - start: 14, - end: 15, - )), Atomic( pointer: 8, fun: Add, diff --git a/naga/tests/out/ir/const_assert.compact.ron b/naga/tests/out/ir/const_assert.compact.ron new file mode 100644 index 0000000000..9dce67b5f9 --- /dev/null +++ b/naga/tests/out/ir/const_assert.compact.ron @@ -0,0 +1,54 @@ +( + types: [ + ( + name: None, + inner: Scalar(( + kind: Sint, + width: 4, + )), + ), + ], + special_types: ( + ray_desc: None, + ray_intersection: None, + predeclared_types: {}, + ), + constants: [ + ( + name: Some("x"), + ty: 0, + init: 0, + ), + ( + name: Some("y"), + ty: 0, + init: 1, + ), + ], + overrides: [], + global_variables: [], + global_expressions: [ + Literal(I32(1)), + Literal(I32(2)), + ], + functions: [ + ( + name: Some("foo"), + arguments: [], + result: None, + local_variables: [], + expressions: [ + Literal(I32(1)), + ], + named_expressions: { + 0: "z", + }, + body: [ + Return( + value: None, + ), + ], + ), + ], + entry_points: [], +) \ No newline at end of file diff --git a/naga/tests/out/ir/const_assert.ron b/naga/tests/out/ir/const_assert.ron new file mode 100644 index 0000000000..9dce67b5f9 --- /dev/null +++ b/naga/tests/out/ir/const_assert.ron @@ -0,0 +1,54 @@ +( + types: [ + ( + name: None, + inner: Scalar(( + kind: Sint, + width: 4, + )), + ), + ], + special_types: ( + ray_desc: None, + ray_intersection: None, + predeclared_types: {}, + ), + constants: [ + ( + name: Some("x"), + ty: 0, + init: 0, + ), + ( + name: Some("y"), + ty: 0, + init: 1, + ), + ], + overrides: [], + global_variables: [], + global_expressions: [ + Literal(I32(1)), + Literal(I32(2)), + ], + functions: [ + ( + name: Some("foo"), + arguments: [], + result: None, + local_variables: [], + expressions: [ + Literal(I32(1)), + ], + named_expressions: { + 0: "z", + }, + body: [ + Return( + value: None, + ), + ], + ), + ], + entry_points: [], +) \ No newline at end of file diff --git a/naga/tests/out/ir/local-const.compact.ron b/naga/tests/out/ir/local-const.compact.ron new file mode 100644 index 0000000000..a9b9f32af8 --- /dev/null +++ b/naga/tests/out/ir/local-const.compact.ron @@ -0,0 +1,139 @@ +( + types: [ + ( + name: None, + inner: Scalar(( + kind: Sint, + width: 4, + )), + ), + ( + name: None, + inner: Scalar(( + kind: Uint, + width: 4, + )), + ), + ( + name: None, + inner: Scalar(( + kind: Float, + width: 4, + )), + ), + ( + name: None, + inner: Vector( + size: Tri, + scalar: ( + kind: Sint, + width: 4, + ), + ), + ), + ], + special_types: ( + ray_desc: None, + ray_intersection: None, + predeclared_types: {}, + ), + constants: [ + ( + name: Some("ga"), + ty: 0, + init: 0, + ), + ( + name: Some("gb"), + ty: 0, + init: 1, + ), + ( + name: Some("gc"), + ty: 1, + init: 2, + ), + ( + name: Some("gd"), + ty: 2, + init: 3, + ), + ( + name: Some("ge"), + ty: 3, + init: 4, + ), + ( + name: Some("gf"), + ty: 2, + init: 5, + ), + ], + overrides: [], + global_variables: [], + global_expressions: [ + Literal(I32(4)), + Literal(I32(4)), + Literal(U32(4)), + Literal(F32(4.0)), + Compose( + ty: 3, + components: [ + 0, + 0, + 0, + ], + ), + Literal(F32(2.0)), + ], + functions: [ + ( + name: Some("const_in_fn"), + arguments: [], + result: None, + local_variables: [], + expressions: [ + Literal(I32(4)), + Literal(I32(4)), + Literal(U32(4)), + Literal(F32(4.0)), + Compose( + ty: 3, + components: [ + 0, + 0, + 0, + ], + ), + Literal(F32(2.0)), + Constant(0), + Constant(1), + Constant(2), + Constant(3), + Constant(4), + Constant(5), + ], + named_expressions: { + 0: "a", + 1: "b", + 2: "c", + 3: "d", + 4: "e", + 5: "f", + 6: "ag", + 7: "bg", + 8: "cg", + 9: "dg", + 10: "eg", + 11: "fg", + }, + body: [ + Emit(( + start: 4, + end: 5, + )), + ], + ), + ], + entry_points: [], +) \ No newline at end of file diff --git a/naga/tests/out/ir/local-const.ron b/naga/tests/out/ir/local-const.ron new file mode 100644 index 0000000000..a9b9f32af8 --- /dev/null +++ b/naga/tests/out/ir/local-const.ron @@ -0,0 +1,139 @@ +( + types: [ + ( + name: None, + inner: Scalar(( + kind: Sint, + width: 4, + )), + ), + ( + name: None, + inner: Scalar(( + kind: Uint, + width: 4, + )), + ), + ( + name: None, + inner: Scalar(( + kind: Float, + width: 4, + )), + ), + ( + name: None, + inner: Vector( + size: Tri, + scalar: ( + kind: Sint, + width: 4, + ), + ), + ), + ], + special_types: ( + ray_desc: None, + ray_intersection: None, + predeclared_types: {}, + ), + constants: [ + ( + name: Some("ga"), + ty: 0, + init: 0, + ), + ( + name: Some("gb"), + ty: 0, + init: 1, + ), + ( + name: Some("gc"), + ty: 1, + init: 2, + ), + ( + name: Some("gd"), + ty: 2, + init: 3, + ), + ( + name: Some("ge"), + ty: 3, + init: 4, + ), + ( + name: Some("gf"), + ty: 2, + init: 5, + ), + ], + overrides: [], + global_variables: [], + global_expressions: [ + Literal(I32(4)), + Literal(I32(4)), + Literal(U32(4)), + Literal(F32(4.0)), + Compose( + ty: 3, + components: [ + 0, + 0, + 0, + ], + ), + Literal(F32(2.0)), + ], + functions: [ + ( + name: Some("const_in_fn"), + arguments: [], + result: None, + local_variables: [], + expressions: [ + Literal(I32(4)), + Literal(I32(4)), + Literal(U32(4)), + Literal(F32(4.0)), + Compose( + ty: 3, + components: [ + 0, + 0, + 0, + ], + ), + Literal(F32(2.0)), + Constant(0), + Constant(1), + Constant(2), + Constant(3), + Constant(4), + Constant(5), + ], + named_expressions: { + 0: "a", + 1: "b", + 2: "c", + 3: "d", + 4: "e", + 5: "f", + 6: "ag", + 7: "bg", + 8: "cg", + 9: "dg", + 10: "eg", + 11: "fg", + }, + body: [ + Emit(( + start: 4, + end: 5, + )), + ], + ), + ], + entry_points: [], +) \ No newline at end of file diff --git a/naga/tests/out/msl/abstract-types-const.msl b/naga/tests/out/msl/abstract-types-const.msl index af4de25cc6..16a3e35b9a 100644 --- a/naga/tests/out/msl/abstract-types-const.msl +++ b/naga/tests/out/msl/abstract-types-const.msl @@ -4,7 +4,7 @@ using metal::uint; -struct type_5 { +struct type_7 { float inner[2]; }; struct S { @@ -32,12 +32,12 @@ constant metal::int2 ivis_ai = metal::int2(1); constant metal::uint2 ivus_ai = metal::uint2(1u); constant metal::float2 ivfs_ai = metal::float2(1.0); constant metal::float2 ivfs_af = metal::float2(1.0); -constant type_5 iafafaf = type_5 {1.0, 2.0}; -constant type_5 iafaiai = type_5 {1.0, 2.0}; -constant type_5 iafpafaf = type_5 {1.0, 2.0}; -constant type_5 iafpaiaf = type_5 {1.0, 2.0}; -constant type_5 iafpafai = type_5 {1.0, 2.0}; -constant type_5 xafpafaf = type_5 {1.0, 2.0}; +constant type_7 iafafaf = type_7 {1.0, 2.0}; +constant type_7 iafaiai = type_7 {1.0, 2.0}; +constant type_7 iafpafaf = type_7 {1.0, 2.0}; +constant type_7 iafpaiaf = type_7 {1.0, 2.0}; +constant type_7 iafpafai = type_7 {1.0, 2.0}; +constant type_7 xafpafaf = type_7 {1.0, 2.0}; constant S s_f_i_u = S {1.0, 1, 1u}; constant S s_f_iai = S {1.0, 1, 1u}; constant S s_fai_u = S {1.0, 1, 1u}; diff --git a/naga/tests/out/msl/abstract-types-var.msl b/naga/tests/out/msl/abstract-types-var.msl index 45096f8672..7d5623469b 100644 --- a/naga/tests/out/msl/abstract-types-var.msl +++ b/naga/tests/out/msl/abstract-types-var.msl @@ -4,10 +4,10 @@ using metal::uint; -struct type_5 { +struct type_7 { float inner[2]; }; -struct type_7 { +struct type_8 { int inner[2]; }; @@ -35,17 +35,17 @@ void all_constant_arguments( metal::uint2 xvus_ai = metal::uint2(1u); metal::float2 xvfs_ai = metal::float2(1.0); metal::float2 xvfs_af = metal::float2(1.0); - type_5 xafafaf = type_5 {1.0, 2.0}; - type_5 xaf_faf = type_5 {1.0, 2.0}; - type_5 xafaf_f = type_5 {1.0, 2.0}; - type_5 xafaiai = type_5 {1.0, 2.0}; - type_7 xai_iai = type_7 {1, 2}; - type_7 xaiai_i = type_7 {1, 2}; - type_7 xaipaiai = type_7 {1, 2}; - type_5 xafpaiai = type_5 {1.0, 2.0}; - type_5 xafpaiaf = type_5 {1.0, 2.0}; - type_5 xafpafai = type_5 {1.0, 2.0}; - type_5 xafpafaf = type_5 {1.0, 2.0}; + type_7 xafafaf = type_7 {1.0, 2.0}; + type_7 xaf_faf = type_7 {1.0, 2.0}; + type_7 xafaf_f = type_7 {1.0, 2.0}; + type_7 xafaiai = type_7 {1.0, 2.0}; + type_8 xai_iai = type_8 {1, 2}; + type_8 xaiai_i = type_8 {1, 2}; + type_8 xaipaiai = type_8 {1, 2}; + type_7 xafpaiai = type_7 {1.0, 2.0}; + type_7 xafpaiaf = type_7 {1.0, 2.0}; + type_7 xafpafai = type_7 {1.0, 2.0}; + type_7 xafpafaf = type_7 {1.0, 2.0}; } void mixed_constant_and_runtime_arguments( @@ -61,18 +61,18 @@ void mixed_constant_and_runtime_arguments( metal::float2x2 xmfpai_faiai_1 = {}; metal::float2x2 xmfpaiai_fai_1 = {}; metal::float2x2 xmfpaiaiai_f_1 = {}; - type_5 xaf_faf_1 = {}; - type_5 xafaf_f_1 = {}; - type_5 xaf_fai = {}; - type_5 xafai_f = {}; - type_7 xai_iai_1 = {}; - type_7 xaiai_i_1 = {}; - type_5 xafp_faf = {}; - type_5 xafpaf_f = {}; - type_5 xafp_fai = {}; - type_5 xafpai_f = {}; - type_7 xaip_iai = {}; - type_7 xaipai_i = {}; + type_7 xaf_faf_1 = {}; + type_7 xafaf_f_1 = {}; + type_7 xaf_fai = {}; + type_7 xafai_f = {}; + type_8 xai_iai_1 = {}; + type_8 xaiai_i_1 = {}; + type_7 xafp_faf = {}; + type_7 xafpaf_f = {}; + type_7 xafp_fai = {}; + type_7 xafpai_f = {}; + type_8 xaip_iai = {}; + type_8 xaipai_i = {}; uint _e3 = u; xvupuai_1 = metal::uint2(_e3, 43u); uint _e7 = u; @@ -90,28 +90,28 @@ void mixed_constant_and_runtime_arguments( float _e43 = f; xmfpaiaiai_f_1 = metal::float2x2(metal::float2(1.0, 2.0), metal::float2(3.0, _e43)); float _e51 = f; - xaf_faf_1 = type_5 {_e51, 2.0}; + xaf_faf_1 = type_7 {_e51, 2.0}; float _e55 = f; - xafaf_f_1 = type_5 {1.0, _e55}; + xafaf_f_1 = type_7 {1.0, _e55}; float _e59 = f; - xaf_fai = type_5 {_e59, 2.0}; + xaf_fai = type_7 {_e59, 2.0}; float _e63 = f; - xafai_f = type_5 {1.0, _e63}; + xafai_f = type_7 {1.0, _e63}; int _e67 = i; - xai_iai_1 = type_7 {_e67, 2}; + xai_iai_1 = type_8 {_e67, 2}; int _e71 = i; - xaiai_i_1 = type_7 {1, _e71}; + xaiai_i_1 = type_8 {1, _e71}; float _e75 = f; - xafp_faf = type_5 {_e75, 2.0}; + xafp_faf = type_7 {_e75, 2.0}; float _e79 = f; - xafpaf_f = type_5 {1.0, _e79}; + xafpaf_f = type_7 {1.0, _e79}; float _e83 = f; - xafp_fai = type_5 {_e83, 2.0}; + xafp_fai = type_7 {_e83, 2.0}; float _e87 = f; - xafpai_f = type_5 {1.0, _e87}; + xafpai_f = type_7 {1.0, _e87}; int _e91 = i; - xaip_iai = type_7 {_e91, 2}; + xaip_iai = type_8 {_e91, 2}; int _e95 = i; - xaipai_i = type_7 {1, _e95}; + xaipai_i = type_8 {1, _e95}; return; } diff --git a/naga/tests/out/msl/access.msl b/naga/tests/out/msl/access.msl index 908535ea31..65dba4910e 100644 --- a/naga/tests/out/msl/access.msl +++ b/naga/tests/out/msl/access.msl @@ -17,33 +17,33 @@ struct GlobalConst { struct AlignedWrapper { int value; }; -struct type_5 { +struct type_6 { metal::float2x2 inner[2]; }; -struct type_7 { +struct type_8 { metal::atomic_int inner[10]; }; -struct type_9 { +struct type_10 { metal::uint2 inner[2]; }; -typedef AlignedWrapper type_10[1]; +typedef AlignedWrapper type_11[1]; struct Bar { metal::float4x3 _matrix; - type_5 matrix_array; + type_6 matrix_array; metal::atomic_int atom; - type_7 atom_arr; + type_8 atom_arr; char _pad4[4]; - type_9 arr; - type_10 data; + type_10 arr; + type_11 data; }; struct Baz { metal::float3x2 m; }; -struct type_14 { +struct type_15 { metal::float4x2 inner[2]; }; struct MatCx2InArray { - type_14 am; + type_15 am; }; struct type_17 { float inner[10]; @@ -98,10 +98,10 @@ void test_matrix_within_array_within_struct_accesses( constant MatCx2InArray& nested_mat_cx2_ ) { int idx_1 = 1; - MatCx2InArray t_1 = MatCx2InArray {type_14 {}}; + MatCx2InArray t_1 = MatCx2InArray {type_15 {}}; int _e3 = idx_1; idx_1 = _e3 - 1; - type_14 l0_1 = nested_mat_cx2_.am; + type_15 l0_1 = nested_mat_cx2_.am; metal::float4x2 l1_1 = nested_mat_cx2_.am.inner[0]; metal::float2 l2_1 = nested_mat_cx2_.am.inner[0][0]; int _e20 = idx_1; @@ -116,7 +116,7 @@ void test_matrix_within_array_within_struct_accesses( float l7_ = nested_mat_cx2_.am.inner[0][_e46][_e48]; int _e55 = idx_1; idx_1 = _e55 + 1; - t_1.am = type_14 {}; + t_1.am = type_15 {}; t_1.am.inner[0] = metal::float4x2(metal::float2(8.0), metal::float2(7.0), metal::float2(6.0), metal::float2(5.0)); t_1.am.inner[0][0] = metal::float2(9.0); int _e77 = idx_1; @@ -179,7 +179,7 @@ vertex foo_vertOutput foo_vert( test_matrix_within_struct_accesses(baz); test_matrix_within_array_within_struct_accesses(nested_mat_cx2_); metal::float4x3 _matrix = bar._matrix; - type_9 arr_1 = bar.arr; + type_10 arr_1 = bar.arr; float b = bar._matrix[3u].x; int a_1 = bar.data[(1 + (_buffer_sizes.size1 - 160 - 8) / 8) - 2u].value; metal::int2 c = qux; @@ -202,7 +202,7 @@ fragment foo_fragOutput foo_frag( ) { bar._matrix[1].z = 1.0; bar._matrix = metal::float4x3(metal::float3(0.0), metal::float3(1.0), metal::float3(2.0), metal::float3(3.0)); - bar.arr = type_9 {metal::uint2(0u), metal::uint2(1u)}; + bar.arr = type_10 {metal::uint2(0u), metal::uint2(1u)}; bar.data[1].value = 1; qux = metal::int2 {}; return foo_fragOutput { metal::float4(0.0) }; diff --git a/naga/tests/out/msl/boids.msl b/naga/tests/out/msl/boids.msl index ce1ccc7cc2..0dd520ac74 100644 --- a/naga/tests/out/msl/boids.msl +++ b/naga/tests/out/msl/boids.msl @@ -55,8 +55,9 @@ kernel void main_( vPos = _e8; metal::float2 _e14 = particlesSrc.particles[index].vel; vVel = _e14; +#define LOOP_IS_REACHABLE if (volatile bool unpredictable_jump_over_loop = true; unpredictable_jump_over_loop) bool loop_init = true; - while(true) { + LOOP_IS_REACHABLE while(true) { if (!loop_init) { uint _e91 = i; i = _e91 + 1u; diff --git a/naga/tests/out/msl/break-if.msl b/naga/tests/out/msl/break-if.msl index 8c0d9343b9..3684f7222c 100644 --- a/naga/tests/out/msl/break-if.msl +++ b/naga/tests/out/msl/break-if.msl @@ -7,8 +7,9 @@ using metal::uint; void breakIfEmpty( ) { +#define LOOP_IS_REACHABLE if (volatile bool unpredictable_jump_over_loop = true; unpredictable_jump_over_loop) bool loop_init = true; - while(true) { + LOOP_IS_REACHABLE while(true) { if (!loop_init) { if (true) { break; @@ -25,7 +26,7 @@ void breakIfEmptyBody( bool b = {}; bool c = {}; bool loop_init_1 = true; - while(true) { + LOOP_IS_REACHABLE while(true) { if (!loop_init_1) { b = a; bool _e2 = b; @@ -46,7 +47,7 @@ void breakIf( bool d = {}; bool e = {}; bool loop_init_2 = true; - while(true) { + LOOP_IS_REACHABLE while(true) { if (!loop_init_2) { bool _e5 = e; if (a_1 == e) { @@ -65,7 +66,7 @@ void breakIfSeparateVariable( ) { uint counter = 0u; bool loop_init_3 = true; - while(true) { + LOOP_IS_REACHABLE while(true) { if (!loop_init_3) { uint _e5 = counter; if (counter == 5u) { diff --git a/naga/tests/out/msl/collatz.msl b/naga/tests/out/msl/collatz.msl index e283741459..1ae910de6f 100644 --- a/naga/tests/out/msl/collatz.msl +++ b/naga/tests/out/msl/collatz.msl @@ -19,7 +19,8 @@ uint collatz_iterations( uint n = {}; uint i = 0u; n = n_base; - while(true) { +#define LOOP_IS_REACHABLE if (volatile bool unpredictable_jump_over_loop = true; unpredictable_jump_over_loop) + LOOP_IS_REACHABLE while(true) { uint _e4 = n; if (_e4 > 1u) { } else { diff --git a/naga/tests/out/msl/constructors.msl b/naga/tests/out/msl/constructors.msl index 6733568a92..d4dc5c5292 100644 --- a/naga/tests/out/msl/constructors.msl +++ b/naga/tests/out/msl/constructors.msl @@ -8,7 +8,7 @@ struct Foo { metal::float4 a; int b; }; -struct type_5 { +struct type_6 { metal::float2x2 inner[1]; }; struct type_10 { @@ -19,7 +19,7 @@ struct type_11 { }; constant metal::float3 const2_ = metal::float3(0.0, 1.0, 2.0); constant metal::float2x2 const3_ = metal::float2x2(metal::float2(0.0, 1.0), metal::float2(2.0, 3.0)); -constant type_5 const4_ = type_5 {metal::float2x2(metal::float2(0.0, 1.0), metal::float2(2.0, 3.0))}; +constant type_6 const4_ = type_6 {metal::float2x2(metal::float2(0.0, 1.0), metal::float2(2.0, 3.0))}; constant bool cz0_ = bool {}; constant int cz1_ = int {}; constant uint cz2_ = uint {}; diff --git a/naga/tests/out/msl/control-flow.msl b/naga/tests/out/msl/control-flow.msl index 11771693aa..dbf75163aa 100644 --- a/naga/tests/out/msl/control-flow.msl +++ b/naga/tests/out/msl/control-flow.msl @@ -31,7 +31,8 @@ void switch_case_break( void loop_switch_continue( int x ) { - while(true) { +#define LOOP_IS_REACHABLE if (volatile bool unpredictable_jump_over_loop = true; unpredictable_jump_over_loop) + LOOP_IS_REACHABLE while(true) { switch(x) { case 1: { continue; @@ -49,7 +50,7 @@ void loop_switch_continue_nesting( int y, int z ) { - while(true) { + LOOP_IS_REACHABLE while(true) { switch(x_1) { case 1: { continue; @@ -60,7 +61,7 @@ void loop_switch_continue_nesting( continue; } default: { - while(true) { + LOOP_IS_REACHABLE while(true) { switch(z) { case 1: { continue; @@ -85,7 +86,7 @@ void loop_switch_continue_nesting( } } } - while(true) { + LOOP_IS_REACHABLE while(true) { switch(y) { case 1: default: { @@ -108,7 +109,7 @@ void loop_switch_omit_continue_variable_checks( int w ) { int pos_1 = 0; - while(true) { + LOOP_IS_REACHABLE while(true) { switch(x_2) { case 1: { pos_1 = 1; @@ -119,7 +120,7 @@ void loop_switch_omit_continue_variable_checks( } } } - while(true) { + LOOP_IS_REACHABLE while(true) { switch(x_2) { case 1: { break; diff --git a/naga/tests/out/msl/cross.msl b/naga/tests/out/msl/cross.msl new file mode 100644 index 0000000000..70095cd6e9 --- /dev/null +++ b/naga/tests/out/msl/cross.msl @@ -0,0 +1,11 @@ +// language: metal1.0 +#include +#include + +using metal::uint; + + +kernel void main_( +) { + metal::float3 a = metal::cross(metal::float3(0.0, 1.0, 2.0), metal::float3(0.0, 1.0, 2.0)); +} diff --git a/naga/tests/out/msl/do-while.msl b/naga/tests/out/msl/do-while.msl index c1b4d08b0e..b093da1dc5 100644 --- a/naga/tests/out/msl/do-while.msl +++ b/naga/tests/out/msl/do-while.msl @@ -8,8 +8,9 @@ using metal::uint; void fb1_( thread bool& cond ) { +#define LOOP_IS_REACHABLE if (volatile bool unpredictable_jump_over_loop = true; unpredictable_jump_over_loop) bool loop_init = true; - while(true) { + LOOP_IS_REACHABLE while(true) { if (!loop_init) { bool _e1 = cond; if (!(cond)) { diff --git a/naga/tests/out/msl/interpolate.msl b/naga/tests/out/msl/interpolate.msl index 616291253f..c19005753f 100644 --- a/naga/tests/out/msl/interpolate.msl +++ b/naga/tests/out/msl/interpolate.msl @@ -7,54 +7,71 @@ using metal::uint; struct FragmentInput { metal::float4 position; uint _flat; + uint flat_first; + uint flat_either; float _linear; metal::float2 linear_centroid; + char _pad6[8]; metal::float3 linear_sample; + metal::float3 linear_center; metal::float4 perspective; float perspective_centroid; float perspective_sample; + float perspective_center; }; struct vert_mainOutput { metal::float4 position [[position]]; uint _flat [[user(loc0), flat]]; - float _linear [[user(loc1), center_no_perspective]]; - metal::float2 linear_centroid [[user(loc2), centroid_no_perspective]]; - metal::float3 linear_sample [[user(loc3), sample_no_perspective]]; - metal::float4 perspective [[user(loc4), center_perspective]]; - float perspective_centroid [[user(loc5), centroid_perspective]]; - float perspective_sample [[user(loc6), sample_perspective]]; + uint flat_first [[user(loc1), flat]]; + uint flat_either [[user(loc2), flat]]; + float _linear [[user(loc3), center_no_perspective]]; + metal::float2 linear_centroid [[user(loc4), centroid_no_perspective]]; + metal::float3 linear_sample [[user(loc6), sample_no_perspective]]; + metal::float3 linear_center [[user(loc7), center_no_perspective]]; + metal::float4 perspective [[user(loc8), center_perspective]]; + float perspective_centroid [[user(loc9), centroid_perspective]]; + float perspective_sample [[user(loc10), sample_perspective]]; + float perspective_center [[user(loc11), center_perspective]]; }; vertex vert_mainOutput vert_main( ) { FragmentInput out = {}; out.position = metal::float4(2.0, 4.0, 5.0, 6.0); out._flat = 8u; + out.flat_first = 9u; + out.flat_either = 10u; out._linear = 27.0; out.linear_centroid = metal::float2(64.0, 125.0); out.linear_sample = metal::float3(216.0, 343.0, 512.0); + out.linear_center = metal::float3(255.0, 511.0, 1024.0); out.perspective = metal::float4(729.0, 1000.0, 1331.0, 1728.0); out.perspective_centroid = 2197.0; out.perspective_sample = 2744.0; - FragmentInput _e30 = out; - const auto _tmp = _e30; - return vert_mainOutput { _tmp.position, _tmp._flat, _tmp._linear, _tmp.linear_centroid, _tmp.linear_sample, _tmp.perspective, _tmp.perspective_centroid, _tmp.perspective_sample }; + out.perspective_center = 2812.0; + FragmentInput _e41 = out; + const auto _tmp = _e41; + return vert_mainOutput { _tmp.position, _tmp._flat, _tmp.flat_first, _tmp.flat_either, _tmp._linear, _tmp.linear_centroid, _tmp.linear_sample, _tmp.linear_center, _tmp.perspective, _tmp.perspective_centroid, _tmp.perspective_sample, _tmp.perspective_center }; } struct frag_mainInput { uint _flat [[user(loc0), flat]]; - float _linear [[user(loc1), center_no_perspective]]; - metal::float2 linear_centroid [[user(loc2), centroid_no_perspective]]; - metal::float3 linear_sample [[user(loc3), sample_no_perspective]]; - metal::float4 perspective [[user(loc4), center_perspective]]; - float perspective_centroid [[user(loc5), centroid_perspective]]; - float perspective_sample [[user(loc6), sample_perspective]]; + uint flat_first [[user(loc1), flat]]; + uint flat_either [[user(loc2), flat]]; + float _linear [[user(loc3), center_no_perspective]]; + metal::float2 linear_centroid [[user(loc4), centroid_no_perspective]]; + metal::float3 linear_sample [[user(loc6), sample_no_perspective]]; + metal::float3 linear_center [[user(loc7), center_no_perspective]]; + metal::float4 perspective [[user(loc8), center_perspective]]; + float perspective_centroid [[user(loc9), centroid_perspective]]; + float perspective_sample [[user(loc10), sample_perspective]]; + float perspective_center [[user(loc11), center_perspective]]; }; fragment void frag_main( frag_mainInput varyings_1 [[stage_in]] , metal::float4 position [[position]] ) { - const FragmentInput val = { position, varyings_1._flat, varyings_1._linear, varyings_1.linear_centroid, varyings_1.linear_sample, varyings_1.perspective, varyings_1.perspective_centroid, varyings_1.perspective_sample }; + const FragmentInput val = { position, varyings_1._flat, varyings_1.flat_first, varyings_1.flat_either, varyings_1._linear, varyings_1.linear_centroid, {}, varyings_1.linear_sample, varyings_1.linear_center, varyings_1.perspective, varyings_1.perspective_centroid, varyings_1.perspective_sample, varyings_1.perspective_center }; return; } diff --git a/naga/tests/out/msl/interpolate_compat.msl b/naga/tests/out/msl/interpolate_compat.msl new file mode 100644 index 0000000000..e386c07db0 --- /dev/null +++ b/naga/tests/out/msl/interpolate_compat.msl @@ -0,0 +1,74 @@ +// language: metal1.0 +#include +#include + +using metal::uint; + +struct FragmentInput { + metal::float4 position; + uint _flat; + uint flat_either; + float _linear; + char _pad4[4]; + metal::float2 linear_centroid; + char _pad5[8]; + metal::float3 linear_sample; + metal::float3 linear_center; + metal::float4 perspective; + float perspective_centroid; + float perspective_sample; + float perspective_center; +}; + +struct vert_mainOutput { + metal::float4 position [[position]]; + uint _flat [[user(loc0), flat]]; + uint flat_either [[user(loc2), flat]]; + float _linear [[user(loc3), center_no_perspective]]; + metal::float2 linear_centroid [[user(loc4), centroid_no_perspective]]; + metal::float3 linear_sample [[user(loc6), sample_no_perspective]]; + metal::float3 linear_center [[user(loc7), center_no_perspective]]; + metal::float4 perspective [[user(loc8), center_perspective]]; + float perspective_centroid [[user(loc9), centroid_perspective]]; + float perspective_sample [[user(loc10), sample_perspective]]; + float perspective_center [[user(loc11), center_perspective]]; +}; +vertex vert_mainOutput vert_main( +) { + FragmentInput out = {}; + out.position = metal::float4(2.0, 4.0, 5.0, 6.0); + out._flat = 8u; + out.flat_either = 10u; + out._linear = 27.0; + out.linear_centroid = metal::float2(64.0, 125.0); + out.linear_sample = metal::float3(216.0, 343.0, 512.0); + out.linear_center = metal::float3(255.0, 511.0, 1024.0); + out.perspective = metal::float4(729.0, 1000.0, 1331.0, 1728.0); + out.perspective_centroid = 2197.0; + out.perspective_sample = 2744.0; + out.perspective_center = 2812.0; + FragmentInput _e39 = out; + const auto _tmp = _e39; + return vert_mainOutput { _tmp.position, _tmp._flat, _tmp.flat_either, _tmp._linear, _tmp.linear_centroid, _tmp.linear_sample, _tmp.linear_center, _tmp.perspective, _tmp.perspective_centroid, _tmp.perspective_sample, _tmp.perspective_center }; +} + + +struct frag_mainInput { + uint _flat [[user(loc0), flat]]; + uint flat_either [[user(loc2), flat]]; + float _linear [[user(loc3), center_no_perspective]]; + metal::float2 linear_centroid [[user(loc4), centroid_no_perspective]]; + metal::float3 linear_sample [[user(loc6), sample_no_perspective]]; + metal::float3 linear_center [[user(loc7), center_no_perspective]]; + metal::float4 perspective [[user(loc8), center_perspective]]; + float perspective_centroid [[user(loc9), centroid_perspective]]; + float perspective_sample [[user(loc10), sample_perspective]]; + float perspective_center [[user(loc11), center_perspective]]; +}; +fragment void frag_main( + frag_mainInput varyings_1 [[stage_in]] +, metal::float4 position [[position]] +) { + const FragmentInput val = { position, varyings_1._flat, varyings_1.flat_either, varyings_1._linear, {}, varyings_1.linear_centroid, {}, varyings_1.linear_sample, varyings_1.linear_center, varyings_1.perspective, varyings_1.perspective_centroid, varyings_1.perspective_sample, varyings_1.perspective_center }; + return; +} diff --git a/naga/tests/out/msl/overrides-ray-query.msl b/naga/tests/out/msl/overrides-ray-query.msl index 3a508b6f61..f2ad45c985 100644 --- a/naga/tests/out/msl/overrides-ray-query.msl +++ b/naga/tests/out/msl/overrides-ray-query.msl @@ -33,7 +33,8 @@ kernel void main_( rq.intersector.force_opacity((desc.flags & 1) != 0 ? metal::raytracing::forced_opacity::opaque : (desc.flags & 2) != 0 ? metal::raytracing::forced_opacity::non_opaque : metal::raytracing::forced_opacity::none); rq.intersector.accept_any_intersection((desc.flags & 4) != 0); rq.intersection = rq.intersector.intersect(metal::raytracing::ray(desc.origin, desc.dir, desc.tmin, desc.tmax), acc_struct, desc.cull_mask); rq.ready = true; - while(true) { +#define LOOP_IS_REACHABLE if (volatile bool unpredictable_jump_over_loop = true; unpredictable_jump_over_loop) + LOOP_IS_REACHABLE while(true) { bool _e31 = rq.ready; rq.ready = false; if (_e31) { diff --git a/naga/tests/out/msl/policy-mix.msl b/naga/tests/out/msl/policy-mix.msl index 24a40179a8..468b6f507a 100644 --- a/naga/tests/out/msl/policy-mix.msl +++ b/naga/tests/out/msl/policy-mix.msl @@ -10,17 +10,17 @@ struct DefaultConstructible { } }; -struct type_1 { +struct type_2 { metal::float4 inner[10]; }; struct InStorage { - type_1 a; + type_2 a; }; -struct type_2 { +struct type_3 { metal::float4 inner[20]; }; struct InUniform { - type_2 a; + type_3 a; }; struct type_5 { float inner[30]; diff --git a/naga/tests/out/msl/ray-query.msl b/naga/tests/out/msl/ray-query.msl index fbdaef5484..129ad108a9 100644 --- a/naga/tests/out/msl/ray-query.msl +++ b/naga/tests/out/msl/ray-query.msl @@ -53,7 +53,8 @@ RayIntersection query_loop( rq.intersector.force_opacity((_e8.flags & 1) != 0 ? metal::raytracing::forced_opacity::opaque : (_e8.flags & 2) != 0 ? metal::raytracing::forced_opacity::non_opaque : metal::raytracing::forced_opacity::none); rq.intersector.accept_any_intersection((_e8.flags & 4) != 0); rq.intersection = rq.intersector.intersect(metal::raytracing::ray(_e8.origin, _e8.dir, _e8.tmin, _e8.tmax), acs, _e8.cull_mask); rq.ready = true; - while(true) { +#define LOOP_IS_REACHABLE if (volatile bool unpredictable_jump_over_loop = true; unpredictable_jump_over_loop) + LOOP_IS_REACHABLE while(true) { bool _e9 = rq.ready; rq.ready = false; if (_e9) { diff --git a/naga/tests/out/msl/shadow.msl b/naga/tests/out/msl/shadow.msl index 2443f002f2..f8aeef9d45 100644 --- a/naga/tests/out/msl/shadow.msl +++ b/naga/tests/out/msl/shadow.msl @@ -26,8 +26,8 @@ struct Light { metal::float4 pos; metal::float4 color; }; -typedef Light type_6[1]; -struct type_7 { +typedef Light type_8[1]; +struct type_9 { Light inner[10]; }; constant metal::float3 c_ambient = metal::float3(0.05, 0.05, 0.05); @@ -91,7 +91,7 @@ fragment fs_mainOutput fs_main( , metal::float4 proj_position [[position]] , constant Globals& u_globals [[user(fake0)]] , constant Entity& u_entity [[user(fake0)]] -, device type_6 const& s_lights [[user(fake0)]] +, device type_8 const& s_lights [[user(fake0)]] , metal::depth2d_array t_shadow [[user(fake0)]] , metal::sampler sampler_shadow [[user(fake0)]] , constant _mslBufferSizes& _buffer_sizes [[user(fake0)]] @@ -100,8 +100,9 @@ fragment fs_mainOutput fs_main( metal::float3 color = c_ambient; uint i = 0u; metal::float3 normal_1 = metal::normalize(in.world_normal); +#define LOOP_IS_REACHABLE if (volatile bool unpredictable_jump_over_loop = true; unpredictable_jump_over_loop) bool loop_init = true; - while(true) { + LOOP_IS_REACHABLE while(true) { if (!loop_init) { uint _e40 = i; i = _e40 + 1u; @@ -142,7 +143,7 @@ fragment fs_main_without_storageOutput fs_main_without_storage( , metal::float4 proj_position_1 [[position]] , constant Globals& u_globals [[user(fake0)]] , constant Entity& u_entity [[user(fake0)]] -, constant type_7& u_lights [[user(fake0)]] +, constant type_9& u_lights [[user(fake0)]] , metal::depth2d_array t_shadow [[user(fake0)]] , metal::sampler sampler_shadow [[user(fake0)]] ) { @@ -151,7 +152,7 @@ fragment fs_main_without_storageOutput fs_main_without_storage( uint i_1 = 0u; metal::float3 normal_2 = metal::normalize(in_1.world_normal); bool loop_init_1 = true; - while(true) { + LOOP_IS_REACHABLE while(true) { if (!loop_init_1) { uint _e40 = i_1; i_1 = _e40 + 1u; diff --git a/naga/tests/out/spv/abstract-types-const.spvasm b/naga/tests/out/spv/abstract-types-const.spvasm index 207a04f564..edebb600ac 100644 --- a/naga/tests/out/spv/abstract-types-const.spvasm +++ b/naga/tests/out/spv/abstract-types-const.spvasm @@ -11,36 +11,36 @@ OpMemberDecorate %12 0 Offset 0 OpMemberDecorate %12 1 Offset 4 OpMemberDecorate %12 2 Offset 8 %2 = OpTypeVoid -%4 = OpTypeInt 32 0 -%3 = OpTypeVector %4 2 -%6 = OpTypeFloat 32 -%5 = OpTypeVector %6 2 -%7 = OpTypeMatrix %5 2 +%3 = OpTypeInt 32 0 +%4 = OpTypeVector %3 2 +%5 = OpTypeFloat 32 +%6 = OpTypeVector %5 2 +%7 = OpTypeMatrix %6 2 %9 = OpTypeInt 32 1 %8 = OpTypeVector %9 2 -%11 = OpConstant %4 2 -%10 = OpTypeArray %6 %11 -%12 = OpTypeStruct %6 %9 %4 -%13 = OpTypeVector %6 3 -%14 = OpConstant %4 42 -%15 = OpConstant %4 43 -%16 = OpConstantComposite %3 %14 %15 -%17 = OpConstant %6 44.0 -%18 = OpConstant %6 45.0 -%19 = OpConstantComposite %5 %17 %18 -%20 = OpConstant %6 1.0 -%21 = OpConstant %6 2.0 -%22 = OpConstantComposite %5 %20 %21 -%23 = OpConstant %6 3.0 -%24 = OpConstant %6 4.0 -%25 = OpConstantComposite %5 %23 %24 +%11 = OpConstant %3 2 +%10 = OpTypeArray %5 %11 +%12 = OpTypeStruct %5 %9 %3 +%13 = OpTypeVector %5 3 +%14 = OpConstant %3 42 +%15 = OpConstant %3 43 +%16 = OpConstantComposite %4 %14 %15 +%17 = OpConstant %5 44.0 +%18 = OpConstant %5 45.0 +%19 = OpConstantComposite %6 %17 %18 +%20 = OpConstant %5 1.0 +%21 = OpConstant %5 2.0 +%22 = OpConstantComposite %6 %20 %21 +%23 = OpConstant %5 3.0 +%24 = OpConstant %5 4.0 +%25 = OpConstantComposite %6 %23 %24 %26 = OpConstantComposite %7 %22 %25 %27 = OpConstant %9 1 %28 = OpConstantComposite %8 %27 %27 -%29 = OpConstantComposite %5 %20 %20 -%30 = OpConstant %4 1 -%31 = OpConstantComposite %3 %30 %30 +%29 = OpConstantComposite %6 %20 %20 +%30 = OpConstant %3 1 +%31 = OpConstantComposite %4 %30 %30 %32 = OpConstantComposite %10 %20 %21 %33 = OpConstantComposite %12 %20 %27 %30 %34 = OpConstantComposite %13 %20 %21 %23 -%35 = OpConstantComposite %5 %21 %23 \ No newline at end of file +%35 = OpConstantComposite %6 %21 %23 \ No newline at end of file diff --git a/naga/tests/out/spv/abstract-types-var.spvasm b/naga/tests/out/spv/abstract-types-var.spvasm index 1b4b0664b4..59dba569cd 100644 --- a/naga/tests/out/spv/abstract-types-var.spvasm +++ b/naga/tests/out/spv/abstract-types-var.spvasm @@ -1,7 +1,7 @@ ; SPIR-V ; Version: 1.1 ; Generator: rspirv -; Bound: 209 +; Bound: 220 OpCapability Shader OpCapability Linkage %1 = OpExtInstImport "GLSL.std.450" @@ -9,48 +9,48 @@ OpMemoryModel Logical GLSL450 OpDecorate %10 ArrayStride 4 OpDecorate %12 ArrayStride 4 %2 = OpTypeVoid -%4 = OpTypeInt 32 1 -%3 = OpTypeVector %4 2 -%6 = OpTypeInt 32 0 -%5 = OpTypeVector %6 2 -%8 = OpTypeFloat 32 -%7 = OpTypeVector %8 2 -%9 = OpTypeMatrix %7 2 -%11 = OpConstant %6 2 -%10 = OpTypeArray %8 %11 -%12 = OpTypeArray %4 %11 -%13 = OpConstant %4 42 -%14 = OpConstant %4 43 -%15 = OpConstantComposite %3 %13 %14 -%16 = OpConstant %6 44 -%17 = OpConstant %6 45 -%18 = OpConstantComposite %5 %16 %17 -%19 = OpConstant %8 46.0 -%20 = OpConstant %8 47.0 -%21 = OpConstantComposite %7 %19 %20 -%22 = OpConstant %6 42 -%23 = OpConstant %6 43 -%24 = OpConstantComposite %5 %22 %23 -%25 = OpConstant %8 1.0 -%26 = OpConstant %8 2.0 -%27 = OpConstantComposite %7 %25 %26 -%28 = OpConstant %8 3.0 -%29 = OpConstant %8 4.0 -%30 = OpConstantComposite %7 %28 %29 +%3 = OpTypeInt 32 1 +%4 = OpTypeVector %3 2 +%5 = OpTypeInt 32 0 +%6 = OpTypeVector %5 2 +%7 = OpTypeFloat 32 +%8 = OpTypeVector %7 2 +%9 = OpTypeMatrix %8 2 +%11 = OpConstant %5 2 +%10 = OpTypeArray %7 %11 +%12 = OpTypeArray %3 %11 +%13 = OpConstant %3 42 +%14 = OpConstant %3 43 +%15 = OpConstantComposite %4 %13 %14 +%16 = OpConstant %5 44 +%17 = OpConstant %5 45 +%18 = OpConstantComposite %6 %16 %17 +%19 = OpConstant %7 46.0 +%20 = OpConstant %7 47.0 +%21 = OpConstantComposite %8 %19 %20 +%22 = OpConstant %5 42 +%23 = OpConstant %5 43 +%24 = OpConstantComposite %6 %22 %23 +%25 = OpConstant %7 1.0 +%26 = OpConstant %7 2.0 +%27 = OpConstantComposite %8 %25 %26 +%28 = OpConstant %7 3.0 +%29 = OpConstant %7 4.0 +%30 = OpConstantComposite %8 %28 %29 %31 = OpConstantComposite %9 %27 %30 -%32 = OpConstant %4 1 -%33 = OpConstantComposite %3 %32 %32 -%34 = OpConstantComposite %7 %25 %25 -%35 = OpConstant %6 1 -%36 = OpConstantComposite %5 %35 %35 +%32 = OpConstant %3 1 +%33 = OpConstantComposite %4 %32 %32 +%34 = OpConstantComposite %8 %25 %25 +%35 = OpConstant %5 1 +%36 = OpConstantComposite %6 %35 %35 %37 = OpConstantComposite %10 %25 %26 -%38 = OpConstant %4 2 +%38 = OpConstant %3 2 %39 = OpConstantComposite %12 %32 %38 -%41 = OpTypePointer Private %3 +%41 = OpTypePointer Private %4 %40 = OpVariable %41 Private %15 -%43 = OpTypePointer Private %5 +%43 = OpTypePointer Private %6 %42 = OpVariable %43 Private %18 -%45 = OpTypePointer Private %7 +%45 = OpTypePointer Private %8 %44 = OpVariable %45 Private %21 %46 = OpVariable %43 Private %24 %47 = OpVariable %43 Private %24 @@ -76,168 +76,179 @@ OpDecorate %12 ArrayStride 4 %67 = OpVariable %63 Private %37 %68 = OpVariable %63 Private %37 %69 = OpVariable %63 Private %37 -%72 = OpTypeFunction %2 -%74 = OpTypePointer Function %3 -%76 = OpTypePointer Function %5 -%78 = OpTypePointer Function %7 -%84 = OpTypePointer Function %9 -%100 = OpTypePointer Function %10 -%105 = OpTypePointer Function %12 -%116 = OpTypePointer Function %6 -%117 = OpConstantNull %6 -%119 = OpTypePointer Function %4 -%120 = OpConstantNull %4 -%122 = OpTypePointer Function %8 -%123 = OpConstantNull %8 -%125 = OpConstantNull %5 -%127 = OpConstantNull %5 -%129 = OpConstantNull %5 -%131 = OpConstantNull %5 -%133 = OpConstantNull %9 -%135 = OpConstantNull %9 -%137 = OpConstantNull %9 -%139 = OpConstantNull %9 -%141 = OpConstantNull %10 -%143 = OpConstantNull %10 -%145 = OpConstantNull %10 -%147 = OpConstantNull %10 -%149 = OpConstantNull %12 -%151 = OpConstantNull %12 -%153 = OpConstantNull %10 -%155 = OpConstantNull %10 -%157 = OpConstantNull %10 -%159 = OpConstantNull %10 -%161 = OpConstantNull %12 -%163 = OpConstantNull %12 -%71 = OpFunction %2 None %72 -%70 = OpLabel -%109 = OpVariable %100 Function %37 -%106 = OpVariable %105 Function %39 -%102 = OpVariable %100 Function %37 -%98 = OpVariable %78 Function %34 -%95 = OpVariable %74 Function %33 -%92 = OpVariable %84 Function %31 -%89 = OpVariable %84 Function %31 -%86 = OpVariable %84 Function %31 -%82 = OpVariable %76 Function %24 -%79 = OpVariable %76 Function %24 -%73 = OpVariable %74 Function %15 -%110 = OpVariable %100 Function %37 -%107 = OpVariable %105 Function %39 -%103 = OpVariable %100 Function %37 -%99 = OpVariable %100 Function %37 -%96 = OpVariable %76 Function %36 -%93 = OpVariable %74 Function %33 -%90 = OpVariable %84 Function %31 -%87 = OpVariable %84 Function %31 -%83 = OpVariable %84 Function %31 -%80 = OpVariable %76 Function %24 -%75 = OpVariable %76 Function %18 -%111 = OpVariable %100 Function %37 -%108 = OpVariable %100 Function %37 -%104 = OpVariable %105 Function %39 -%101 = OpVariable %100 Function %37 -%97 = OpVariable %78 Function %34 -%94 = OpVariable %78 Function %34 -%91 = OpVariable %84 Function %31 -%88 = OpVariable %84 Function %31 -%85 = OpVariable %84 Function %31 -%81 = OpVariable %76 Function %24 -%77 = OpVariable %78 Function %21 -OpBranch %112 -%112 = OpLabel +%70 = OpVariable %41 Private %33 +%71 = OpVariable %45 Private %34 +%72 = OpVariable %41 Private %33 +%73 = OpVariable %43 Private %36 +%74 = OpVariable %45 Private %34 +%75 = OpVariable %45 Private %34 +%76 = OpVariable %63 Private %37 +%77 = OpVariable %63 Private %37 +%78 = OpVariable %63 Private %37 +%79 = OpVariable %63 Private %37 +%80 = OpVariable %63 Private %37 +%83 = OpTypeFunction %2 +%85 = OpTypePointer Function %4 +%87 = OpTypePointer Function %6 +%89 = OpTypePointer Function %8 +%95 = OpTypePointer Function %9 +%111 = OpTypePointer Function %10 +%116 = OpTypePointer Function %12 +%127 = OpTypePointer Function %5 +%128 = OpConstantNull %5 +%130 = OpTypePointer Function %3 +%131 = OpConstantNull %3 +%133 = OpTypePointer Function %7 +%134 = OpConstantNull %7 +%136 = OpConstantNull %6 +%138 = OpConstantNull %6 +%140 = OpConstantNull %6 +%142 = OpConstantNull %6 +%144 = OpConstantNull %9 +%146 = OpConstantNull %9 +%148 = OpConstantNull %9 +%150 = OpConstantNull %9 +%152 = OpConstantNull %10 +%154 = OpConstantNull %10 +%156 = OpConstantNull %10 +%158 = OpConstantNull %10 +%160 = OpConstantNull %12 +%162 = OpConstantNull %12 +%164 = OpConstantNull %10 +%166 = OpConstantNull %10 +%168 = OpConstantNull %10 +%170 = OpConstantNull %10 +%172 = OpConstantNull %12 +%174 = OpConstantNull %12 +%82 = OpFunction %2 None %83 +%81 = OpLabel +%120 = OpVariable %111 Function %37 +%117 = OpVariable %116 Function %39 +%113 = OpVariable %111 Function %37 +%109 = OpVariable %89 Function %34 +%106 = OpVariable %85 Function %33 +%103 = OpVariable %95 Function %31 +%100 = OpVariable %95 Function %31 +%97 = OpVariable %95 Function %31 +%93 = OpVariable %87 Function %24 +%90 = OpVariable %87 Function %24 +%84 = OpVariable %85 Function %15 +%121 = OpVariable %111 Function %37 +%118 = OpVariable %116 Function %39 +%114 = OpVariable %111 Function %37 +%110 = OpVariable %111 Function %37 +%107 = OpVariable %87 Function %36 +%104 = OpVariable %85 Function %33 +%101 = OpVariable %95 Function %31 +%98 = OpVariable %95 Function %31 +%94 = OpVariable %95 Function %31 +%91 = OpVariable %87 Function %24 +%86 = OpVariable %87 Function %18 +%122 = OpVariable %111 Function %37 +%119 = OpVariable %111 Function %37 +%115 = OpVariable %116 Function %39 +%112 = OpVariable %111 Function %37 +%108 = OpVariable %89 Function %34 +%105 = OpVariable %89 Function %34 +%102 = OpVariable %95 Function %31 +%99 = OpVariable %95 Function %31 +%96 = OpVariable %95 Function %31 +%92 = OpVariable %87 Function %24 +%88 = OpVariable %89 Function %21 +OpBranch %123 +%123 = OpLabel OpReturn OpFunctionEnd -%114 = OpFunction %2 None %72 -%113 = OpLabel -%162 = OpVariable %105 Function %163 -%156 = OpVariable %100 Function %157 -%150 = OpVariable %105 Function %151 -%144 = OpVariable %100 Function %145 -%138 = OpVariable %84 Function %139 -%132 = OpVariable %84 Function %133 -%126 = OpVariable %76 Function %127 -%118 = OpVariable %119 Function %120 -%160 = OpVariable %105 Function %161 -%154 = OpVariable %100 Function %155 -%148 = OpVariable %105 Function %149 -%142 = OpVariable %100 Function %143 -%136 = OpVariable %84 Function %137 -%130 = OpVariable %76 Function %131 -%124 = OpVariable %76 Function %125 -%115 = OpVariable %116 Function %117 -%158 = OpVariable %100 Function %159 -%152 = OpVariable %100 Function %153 -%146 = OpVariable %100 Function %147 -%140 = OpVariable %100 Function %141 -%134 = OpVariable %84 Function %135 -%128 = OpVariable %76 Function %129 -%121 = OpVariable %122 Function %123 -OpBranch %164 -%164 = OpLabel -%165 = OpLoad %6 %115 -%166 = OpCompositeConstruct %5 %165 %23 -OpStore %124 %166 -%167 = OpLoad %6 %115 -%168 = OpCompositeConstruct %5 %22 %167 -OpStore %126 %168 -%169 = OpLoad %6 %115 -%170 = OpCompositeConstruct %5 %169 %23 -OpStore %128 %170 -%171 = OpLoad %6 %115 -%172 = OpCompositeConstruct %5 %22 %171 -OpStore %130 %172 -%173 = OpLoad %8 %121 -%174 = OpCompositeConstruct %7 %173 %26 -%175 = OpCompositeConstruct %9 %174 %30 -OpStore %132 %175 -%176 = OpLoad %8 %121 -%177 = OpCompositeConstruct %7 %25 %176 -%178 = OpCompositeConstruct %9 %177 %30 -OpStore %134 %178 -%179 = OpLoad %8 %121 -%180 = OpCompositeConstruct %7 %179 %29 -%181 = OpCompositeConstruct %9 %27 %180 -OpStore %136 %181 -%182 = OpLoad %8 %121 -%183 = OpCompositeConstruct %7 %28 %182 -%184 = OpCompositeConstruct %9 %27 %183 -OpStore %138 %184 -%185 = OpLoad %8 %121 -%186 = OpCompositeConstruct %10 %185 %26 -OpStore %140 %186 -%187 = OpLoad %8 %121 -%188 = OpCompositeConstruct %10 %25 %187 -OpStore %142 %188 -%189 = OpLoad %8 %121 -%190 = OpCompositeConstruct %10 %189 %26 -OpStore %144 %190 -%191 = OpLoad %8 %121 -%192 = OpCompositeConstruct %10 %25 %191 -OpStore %146 %192 -%193 = OpLoad %4 %118 -%194 = OpCompositeConstruct %12 %193 %38 -OpStore %148 %194 -%195 = OpLoad %4 %118 -%196 = OpCompositeConstruct %12 %32 %195 -OpStore %150 %196 -%197 = OpLoad %8 %121 -%198 = OpCompositeConstruct %10 %197 %26 -OpStore %152 %198 -%199 = OpLoad %8 %121 -%200 = OpCompositeConstruct %10 %25 %199 -OpStore %154 %200 -%201 = OpLoad %8 %121 -%202 = OpCompositeConstruct %10 %201 %26 -OpStore %156 %202 -%203 = OpLoad %8 %121 -%204 = OpCompositeConstruct %10 %25 %203 -OpStore %158 %204 -%205 = OpLoad %4 %118 -%206 = OpCompositeConstruct %12 %205 %38 -OpStore %160 %206 -%207 = OpLoad %4 %118 -%208 = OpCompositeConstruct %12 %32 %207 -OpStore %162 %208 +%125 = OpFunction %2 None %83 +%124 = OpLabel +%173 = OpVariable %116 Function %174 +%167 = OpVariable %111 Function %168 +%161 = OpVariable %116 Function %162 +%155 = OpVariable %111 Function %156 +%149 = OpVariable %95 Function %150 +%143 = OpVariable %95 Function %144 +%137 = OpVariable %87 Function %138 +%129 = OpVariable %130 Function %131 +%171 = OpVariable %116 Function %172 +%165 = OpVariable %111 Function %166 +%159 = OpVariable %116 Function %160 +%153 = OpVariable %111 Function %154 +%147 = OpVariable %95 Function %148 +%141 = OpVariable %87 Function %142 +%135 = OpVariable %87 Function %136 +%126 = OpVariable %127 Function %128 +%169 = OpVariable %111 Function %170 +%163 = OpVariable %111 Function %164 +%157 = OpVariable %111 Function %158 +%151 = OpVariable %111 Function %152 +%145 = OpVariable %95 Function %146 +%139 = OpVariable %87 Function %140 +%132 = OpVariable %133 Function %134 +OpBranch %175 +%175 = OpLabel +%176 = OpLoad %5 %126 +%177 = OpCompositeConstruct %6 %176 %23 +OpStore %135 %177 +%178 = OpLoad %5 %126 +%179 = OpCompositeConstruct %6 %22 %178 +OpStore %137 %179 +%180 = OpLoad %5 %126 +%181 = OpCompositeConstruct %6 %180 %23 +OpStore %139 %181 +%182 = OpLoad %5 %126 +%183 = OpCompositeConstruct %6 %22 %182 +OpStore %141 %183 +%184 = OpLoad %7 %132 +%185 = OpCompositeConstruct %8 %184 %26 +%186 = OpCompositeConstruct %9 %185 %30 +OpStore %143 %186 +%187 = OpLoad %7 %132 +%188 = OpCompositeConstruct %8 %25 %187 +%189 = OpCompositeConstruct %9 %188 %30 +OpStore %145 %189 +%190 = OpLoad %7 %132 +%191 = OpCompositeConstruct %8 %190 %29 +%192 = OpCompositeConstruct %9 %27 %191 +OpStore %147 %192 +%193 = OpLoad %7 %132 +%194 = OpCompositeConstruct %8 %28 %193 +%195 = OpCompositeConstruct %9 %27 %194 +OpStore %149 %195 +%196 = OpLoad %7 %132 +%197 = OpCompositeConstruct %10 %196 %26 +OpStore %151 %197 +%198 = OpLoad %7 %132 +%199 = OpCompositeConstruct %10 %25 %198 +OpStore %153 %199 +%200 = OpLoad %7 %132 +%201 = OpCompositeConstruct %10 %200 %26 +OpStore %155 %201 +%202 = OpLoad %7 %132 +%203 = OpCompositeConstruct %10 %25 %202 +OpStore %157 %203 +%204 = OpLoad %3 %129 +%205 = OpCompositeConstruct %12 %204 %38 +OpStore %159 %205 +%206 = OpLoad %3 %129 +%207 = OpCompositeConstruct %12 %32 %206 +OpStore %161 %207 +%208 = OpLoad %7 %132 +%209 = OpCompositeConstruct %10 %208 %26 +OpStore %163 %209 +%210 = OpLoad %7 %132 +%211 = OpCompositeConstruct %10 %25 %210 +OpStore %165 %211 +%212 = OpLoad %7 %132 +%213 = OpCompositeConstruct %10 %212 %26 +OpStore %167 %213 +%214 = OpLoad %7 %132 +%215 = OpCompositeConstruct %10 %25 %214 +OpStore %169 %215 +%216 = OpLoad %3 %129 +%217 = OpCompositeConstruct %12 %216 %38 +OpStore %171 %217 +%218 = OpLoad %3 %129 +%219 = OpCompositeConstruct %12 %32 %218 +OpStore %173 %219 OpReturn OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/access.spvasm b/naga/tests/out/spv/access.spvasm index 3446878c9a..ab0112870f 100644 --- a/naga/tests/out/spv/access.spvasm +++ b/naga/tests/out/spv/access.spvasm @@ -108,10 +108,10 @@ OpDecorate %272 Location 0 %5 = OpTypeInt 32 1 %6 = OpTypeStruct %3 %4 %5 %7 = OpTypeStruct %5 -%10 = OpTypeFloat 32 -%9 = OpTypeVector %10 3 -%8 = OpTypeMatrix %9 4 -%12 = OpTypeVector %10 2 +%8 = OpTypeFloat 32 +%10 = OpTypeVector %8 3 +%9 = OpTypeMatrix %10 4 +%12 = OpTypeVector %8 2 %11 = OpTypeMatrix %12 2 %14 = OpConstant %3 2 %13 = OpTypeArray %11 %14 @@ -120,18 +120,18 @@ OpDecorate %272 Location 0 %17 = OpTypeVector %3 2 %18 = OpTypeArray %17 %14 %19 = OpTypeRuntimeArray %7 -%20 = OpTypeStruct %8 %13 %5 %15 %18 %19 +%20 = OpTypeStruct %9 %13 %5 %15 %18 %19 %21 = OpTypeMatrix %12 3 %22 = OpTypeStruct %21 %23 = OpTypeVector %5 2 %24 = OpTypeMatrix %12 4 %25 = OpTypeArray %24 %14 %26 = OpTypeStruct %25 -%27 = OpTypePointer Function %10 -%28 = OpTypeArray %10 %16 +%27 = OpTypePointer Function %8 +%28 = OpTypeArray %8 %16 %30 = OpConstant %3 5 %29 = OpTypeArray %28 %30 -%31 = OpTypeVector %10 4 +%31 = OpTypeVector %8 4 %32 = OpTypeArray %5 %30 %33 = OpTypePointer Function %3 %34 = OpTypeArray %31 %14 @@ -156,44 +156,44 @@ OpDecorate %272 Location 0 %55 = OpTypeFunction %2 %56 = OpTypePointer Uniform %22 %58 = OpConstant %5 1 -%59 = OpConstant %10 1.0 +%59 = OpConstant %8 1.0 %60 = OpConstantComposite %12 %59 %59 -%61 = OpConstant %10 2.0 +%61 = OpConstant %8 2.0 %62 = OpConstantComposite %12 %61 %61 -%63 = OpConstant %10 3.0 +%63 = OpConstant %8 3.0 %64 = OpConstantComposite %12 %63 %63 %65 = OpConstantComposite %21 %60 %62 %64 %66 = OpConstantComposite %22 %65 -%67 = OpConstant %10 6.0 +%67 = OpConstant %8 6.0 %68 = OpConstantComposite %12 %67 %67 -%69 = OpConstant %10 5.0 +%69 = OpConstant %8 5.0 %70 = OpConstantComposite %12 %69 %69 -%71 = OpConstant %10 4.0 +%71 = OpConstant %8 4.0 %72 = OpConstantComposite %12 %71 %71 %73 = OpConstantComposite %21 %68 %70 %72 -%74 = OpConstant %10 9.0 +%74 = OpConstant %8 9.0 %75 = OpConstantComposite %12 %74 %74 -%76 = OpConstant %10 90.0 +%76 = OpConstant %8 90.0 %77 = OpConstantComposite %12 %76 %76 -%78 = OpConstant %10 10.0 -%79 = OpConstant %10 20.0 -%80 = OpConstant %10 30.0 -%81 = OpConstant %10 40.0 +%78 = OpConstant %8 10.0 +%79 = OpConstant %8 20.0 +%80 = OpConstant %8 30.0 +%81 = OpConstant %8 40.0 %83 = OpTypePointer Function %5 %85 = OpTypePointer Function %22 %89 = OpTypePointer Uniform %21 %92 = OpTypePointer Uniform %12 -%98 = OpTypePointer Uniform %10 +%98 = OpTypePointer Uniform %8 %99 = OpConstant %3 1 %114 = OpTypePointer Function %21 %116 = OpTypePointer Function %12 -%120 = OpTypePointer Function %10 +%120 = OpTypePointer Function %8 %131 = OpTypePointer Uniform %26 %133 = OpConstantNull %25 %134 = OpConstantComposite %26 %133 -%135 = OpConstant %10 8.0 +%135 = OpConstant %8 8.0 %136 = OpConstantComposite %12 %135 %135 -%137 = OpConstant %10 7.0 +%137 = OpConstant %8 7.0 %138 = OpConstantComposite %12 %137 %137 %139 = OpConstantComposite %24 %136 %138 %68 %70 %142 = OpTypePointer Function %26 @@ -201,8 +201,8 @@ OpDecorate %272 Location 0 %149 = OpTypePointer Uniform %24 %171 = OpTypePointer Function %25 %173 = OpTypePointer Function %24 -%189 = OpTypeFunction %10 %27 -%195 = OpTypeFunction %10 %29 +%189 = OpTypeFunction %8 %27 +%195 = OpTypeFunction %8 %29 %202 = OpTypeFunction %2 %33 %203 = OpConstant %3 42 %208 = OpTypeFunction %2 %35 @@ -214,7 +214,7 @@ OpDecorate %272 Location 0 %218 = OpTypePointer Output %31 %217 = OpVariable %218 Output %221 = OpTypePointer StorageBuffer %23 -%224 = OpConstant %10 0.0 +%224 = OpConstant %8 0.0 %225 = OpConstant %3 3 %226 = OpConstant %5 3 %227 = OpConstant %5 4 @@ -223,21 +223,21 @@ OpDecorate %272 Location 0 %230 = OpConstantNull %29 %233 = OpTypePointer Function %32 %234 = OpConstantNull %32 -%239 = OpTypePointer StorageBuffer %8 +%239 = OpTypePointer StorageBuffer %9 %242 = OpTypePointer StorageBuffer %18 %243 = OpConstant %3 4 -%246 = OpTypePointer StorageBuffer %9 -%247 = OpTypePointer StorageBuffer %10 +%246 = OpTypePointer StorageBuffer %10 +%247 = OpTypePointer StorageBuffer %8 %250 = OpTypePointer StorageBuffer %19 %253 = OpTypePointer StorageBuffer %7 %254 = OpTypePointer StorageBuffer %5 %266 = OpTypeVector %5 4 %272 = OpVariable %218 Output -%275 = OpConstantComposite %9 %224 %224 %224 -%276 = OpConstantComposite %9 %59 %59 %59 -%277 = OpConstantComposite %9 %61 %61 %61 -%278 = OpConstantComposite %9 %63 %63 %63 -%279 = OpConstantComposite %8 %275 %276 %277 %278 +%275 = OpConstantComposite %10 %224 %224 %224 +%276 = OpConstantComposite %10 %59 %59 %59 +%277 = OpConstantComposite %10 %61 %61 %61 +%278 = OpConstantComposite %10 %63 %63 %63 +%279 = OpConstantComposite %9 %275 %276 %277 %278 %280 = OpConstantComposite %17 %36 %36 %281 = OpConstantComposite %17 %99 %99 %282 = OpConstantComposite %18 %280 %281 @@ -265,17 +265,17 @@ OpStore %82 %88 %96 = OpAccessChain %92 %57 %36 %95 %97 = OpLoad %12 %96 %100 = OpAccessChain %98 %57 %36 %36 %99 -%101 = OpLoad %10 %100 +%101 = OpLoad %8 %100 %102 = OpLoad %5 %82 %103 = OpAccessChain %98 %57 %36 %36 %102 -%104 = OpLoad %10 %103 +%104 = OpLoad %8 %103 %105 = OpLoad %5 %82 %106 = OpAccessChain %98 %57 %36 %105 %99 -%107 = OpLoad %10 %106 +%107 = OpLoad %8 %106 %108 = OpLoad %5 %82 %109 = OpLoad %5 %82 %110 = OpAccessChain %98 %57 %36 %108 %109 -%111 = OpLoad %10 %110 +%111 = OpLoad %8 %110 %112 = OpLoad %5 %82 %113 = OpIAdd %5 %112 %58 OpStore %82 %113 @@ -320,17 +320,17 @@ OpStore %140 %145 %155 = OpAccessChain %92 %132 %36 %36 %154 %156 = OpLoad %12 %155 %157 = OpAccessChain %98 %132 %36 %36 %36 %99 -%158 = OpLoad %10 %157 +%158 = OpLoad %8 %157 %159 = OpLoad %5 %140 %160 = OpAccessChain %98 %132 %36 %36 %36 %159 -%161 = OpLoad %10 %160 +%161 = OpLoad %8 %160 %162 = OpLoad %5 %140 %163 = OpAccessChain %98 %132 %36 %36 %162 %99 -%164 = OpLoad %10 %163 +%164 = OpLoad %8 %163 %165 = OpLoad %5 %140 %166 = OpLoad %5 %140 %167 = OpAccessChain %98 %132 %36 %36 %165 %166 -%168 = OpLoad %10 %167 +%168 = OpLoad %8 %167 %169 = OpLoad %5 %140 %170 = OpIAdd %5 %169 %58 OpStore %140 %170 @@ -357,21 +357,21 @@ OpStore %182 %80 OpStore %185 %81 OpReturn OpFunctionEnd -%188 = OpFunction %10 None %189 +%188 = OpFunction %8 None %189 %187 = OpFunctionParameter %27 %186 = OpLabel OpBranch %190 %190 = OpLabel -%191 = OpLoad %10 %187 +%191 = OpLoad %8 %187 OpReturnValue %191 OpFunctionEnd -%194 = OpFunction %10 None %195 +%194 = OpFunction %8 None %195 %193 = OpFunctionParameter %29 %192 = OpLabel OpBranch %196 %196 = OpLabel %197 = OpCompositeExtract %28 %193 4 -%198 = OpCompositeExtract %10 %197 9 +%198 = OpCompositeExtract %8 %197 9 OpReturnValue %198 OpFunctionEnd %201 = OpFunction %2 None %202 @@ -400,22 +400,22 @@ OpFunctionEnd %223 = OpAccessChain %131 %50 %36 OpBranch %235 %235 = OpLabel -%236 = OpLoad %10 %231 +%236 = OpLoad %8 %231 OpStore %231 %59 %237 = OpFunctionCall %2 %54 %238 = OpFunctionCall %2 %130 %240 = OpAccessChain %239 %42 %36 -%241 = OpLoad %8 %240 +%241 = OpLoad %9 %240 %244 = OpAccessChain %242 %42 %243 %245 = OpLoad %18 %244 %248 = OpAccessChain %247 %42 %36 %225 %36 -%249 = OpLoad %10 %248 +%249 = OpLoad %8 %248 %251 = OpArrayLength %3 %42 5 %252 = OpISub %3 %251 %14 %255 = OpAccessChain %254 %42 %30 %252 %36 %256 = OpLoad %5 %255 %257 = OpLoad %23 %222 -%258 = OpFunctionCall %10 %188 %231 +%258 = OpFunctionCall %8 %188 %231 %259 = OpConvertFToS %5 %249 %260 = OpCompositeConstruct %32 %256 %259 %226 %227 %228 OpStore %232 %260 @@ -424,10 +424,10 @@ OpStore %232 %260 OpStore %262 %229 %263 = OpAccessChain %83 %232 %216 %264 = OpLoad %5 %263 -%265 = OpFunctionCall %10 %194 %230 +%265 = OpFunctionCall %8 %194 %230 %267 = OpCompositeConstruct %266 %264 %264 %264 %264 %268 = OpConvertSToF %31 %267 -%269 = OpMatrixTimesVector %9 %241 %268 +%269 = OpMatrixTimesVector %10 %241 %268 %270 = OpCompositeConstruct %31 %269 %61 OpStore %217 %270 OpReturn diff --git a/naga/tests/out/spv/boids.spvasm b/naga/tests/out/spv/boids.spvasm index 0e48e0f559..4fc1cbf49f 100644 --- a/naga/tests/out/spv/boids.spvasm +++ b/naga/tests/out/spv/boids.spvasm @@ -61,10 +61,10 @@ OpDecorate %18 Binding 2 OpDecorate %20 BuiltIn GlobalInvocationId %2 = OpTypeVoid %3 = OpTypeInt 32 0 -%5 = OpTypeFloat 32 -%4 = OpTypeVector %5 2 -%6 = OpTypeStruct %4 %4 -%7 = OpTypeStruct %5 %5 %5 %5 %5 %5 %5 +%4 = OpTypeFloat 32 +%5 = OpTypeVector %4 2 +%6 = OpTypeStruct %5 %5 +%7 = OpTypeStruct %4 %4 %4 %4 %4 %4 %4 %8 = OpTypeRuntimeArray %6 %9 = OpTypeStruct %8 %10 = OpTypeVector %3 3 @@ -81,32 +81,32 @@ OpDecorate %20 BuiltIn GlobalInvocationId %24 = OpTypeFunction %2 %25 = OpTypePointer Uniform %7 %26 = OpConstant %3 0 -%28 = OpConstant %5 0.0 -%29 = OpConstantComposite %4 %28 %28 +%28 = OpConstant %4 0.0 +%29 = OpConstantComposite %5 %28 %28 %30 = OpConstant %11 0 %31 = OpConstant %11 1 %32 = OpConstant %3 1 -%33 = OpConstant %5 0.1 -%34 = OpConstant %5 -1.0 -%35 = OpConstant %5 1.0 -%37 = OpTypePointer Function %4 -%38 = OpConstantNull %4 -%40 = OpConstantNull %4 +%33 = OpConstant %4 0.1 +%34 = OpConstant %4 -1.0 +%35 = OpConstant %4 1.0 +%37 = OpTypePointer Function %5 +%38 = OpConstantNull %5 +%40 = OpConstantNull %5 %45 = OpTypePointer Function %11 -%48 = OpConstantNull %4 -%50 = OpConstantNull %4 +%48 = OpConstantNull %5 +%50 = OpConstantNull %5 %52 = OpTypePointer Function %3 %55 = OpTypeBool %59 = OpTypePointer StorageBuffer %8 %60 = OpTypePointer StorageBuffer %6 -%61 = OpTypePointer StorageBuffer %4 -%87 = OpTypePointer Uniform %5 +%61 = OpTypePointer StorageBuffer %5 +%87 = OpTypePointer Uniform %4 %101 = OpConstant %3 2 %115 = OpConstant %3 3 %150 = OpConstant %3 4 %156 = OpConstant %3 5 %162 = OpConstant %3 6 -%179 = OpTypePointer Function %5 +%179 = OpTypePointer Function %4 %23 = OpFunction %2 None %24 %19 = OpLabel %51 = OpVariable %52 Function %26 @@ -131,10 +131,10 @@ OpBranchConditional %56 %58 %57 OpReturn %57 = OpLabel %62 = OpAccessChain %61 %16 %26 %54 %26 -%63 = OpLoad %4 %62 +%63 = OpLoad %5 %62 OpStore %36 %63 %64 = OpAccessChain %61 %16 %26 %54 %32 -%65 = OpLoad %4 %64 +%65 = OpLoad %5 %64 OpStore %39 %65 OpBranch %66 %66 = OpLabel @@ -157,59 +157,59 @@ OpBranch %69 %76 = OpLabel %78 = OpLoad %3 %51 %79 = OpAccessChain %61 %16 %26 %78 %26 -%80 = OpLoad %4 %79 +%80 = OpLoad %5 %79 OpStore %47 %80 %81 = OpLoad %3 %51 %82 = OpAccessChain %61 %16 %26 %81 %32 -%83 = OpLoad %4 %82 +%83 = OpLoad %5 %82 OpStore %49 %83 -%84 = OpLoad %4 %47 -%85 = OpLoad %4 %36 -%86 = OpExtInst %5 %1 Distance %84 %85 +%84 = OpLoad %5 %47 +%85 = OpLoad %5 %36 +%86 = OpExtInst %4 %1 Distance %84 %85 %88 = OpAccessChain %87 %27 %32 -%89 = OpLoad %5 %88 +%89 = OpLoad %4 %88 %90 = OpFOrdLessThan %55 %86 %89 OpSelectionMerge %91 None OpBranchConditional %90 %92 %91 %92 = OpLabel -%93 = OpLoad %4 %41 -%94 = OpLoad %4 %47 -%95 = OpFAdd %4 %93 %94 +%93 = OpLoad %5 %41 +%94 = OpLoad %5 %47 +%95 = OpFAdd %5 %93 %94 OpStore %41 %95 %96 = OpLoad %11 %44 %97 = OpIAdd %11 %96 %31 OpStore %44 %97 OpBranch %91 %91 = OpLabel -%98 = OpLoad %4 %47 -%99 = OpLoad %4 %36 -%100 = OpExtInst %5 %1 Distance %98 %99 +%98 = OpLoad %5 %47 +%99 = OpLoad %5 %36 +%100 = OpExtInst %4 %1 Distance %98 %99 %102 = OpAccessChain %87 %27 %101 -%103 = OpLoad %5 %102 +%103 = OpLoad %4 %102 %104 = OpFOrdLessThan %55 %100 %103 OpSelectionMerge %105 None OpBranchConditional %104 %106 %105 %106 = OpLabel -%107 = OpLoad %4 %43 -%108 = OpLoad %4 %47 -%109 = OpLoad %4 %36 -%110 = OpFSub %4 %108 %109 -%111 = OpFSub %4 %107 %110 +%107 = OpLoad %5 %43 +%108 = OpLoad %5 %47 +%109 = OpLoad %5 %36 +%110 = OpFSub %5 %108 %109 +%111 = OpFSub %5 %107 %110 OpStore %43 %111 OpBranch %105 %105 = OpLabel -%112 = OpLoad %4 %47 -%113 = OpLoad %4 %36 -%114 = OpExtInst %5 %1 Distance %112 %113 +%112 = OpLoad %5 %47 +%113 = OpLoad %5 %36 +%114 = OpExtInst %4 %1 Distance %112 %113 %116 = OpAccessChain %87 %27 %115 -%117 = OpLoad %5 %116 +%117 = OpLoad %4 %116 %118 = OpFOrdLessThan %55 %114 %117 OpSelectionMerge %119 None OpBranchConditional %118 %120 %119 %120 = OpLabel -%121 = OpLoad %4 %42 -%122 = OpLoad %4 %49 -%123 = OpFAdd %4 %121 %122 +%121 = OpLoad %5 %42 +%122 = OpLoad %5 %49 +%123 = OpFAdd %5 %121 %122 OpStore %42 %123 %124 = OpLoad %11 %46 %125 = OpIAdd %11 %124 %31 @@ -228,13 +228,13 @@ OpBranch %66 OpSelectionMerge %130 None OpBranchConditional %129 %131 %130 %131 = OpLabel -%132 = OpLoad %4 %41 +%132 = OpLoad %5 %41 %133 = OpLoad %11 %44 -%134 = OpConvertSToF %5 %133 -%135 = OpCompositeConstruct %4 %134 %134 -%136 = OpFDiv %4 %132 %135 -%137 = OpLoad %4 %36 -%138 = OpFSub %4 %136 %137 +%134 = OpConvertSToF %4 %133 +%135 = OpCompositeConstruct %5 %134 %134 +%136 = OpFDiv %5 %132 %135 +%137 = OpLoad %5 %36 +%138 = OpFSub %5 %136 %137 OpStore %41 %138 OpBranch %130 %130 = OpLabel @@ -243,47 +243,47 @@ OpBranch %130 OpSelectionMerge %141 None OpBranchConditional %140 %142 %141 %142 = OpLabel -%143 = OpLoad %4 %42 +%143 = OpLoad %5 %42 %144 = OpLoad %11 %46 -%145 = OpConvertSToF %5 %144 -%146 = OpCompositeConstruct %4 %145 %145 -%147 = OpFDiv %4 %143 %146 +%145 = OpConvertSToF %4 %144 +%146 = OpCompositeConstruct %5 %145 %145 +%147 = OpFDiv %5 %143 %146 OpStore %42 %147 OpBranch %141 %141 = OpLabel -%148 = OpLoad %4 %39 -%149 = OpLoad %4 %41 +%148 = OpLoad %5 %39 +%149 = OpLoad %5 %41 %151 = OpAccessChain %87 %27 %150 -%152 = OpLoad %5 %151 -%153 = OpVectorTimesScalar %4 %149 %152 -%154 = OpFAdd %4 %148 %153 -%155 = OpLoad %4 %43 +%152 = OpLoad %4 %151 +%153 = OpVectorTimesScalar %5 %149 %152 +%154 = OpFAdd %5 %148 %153 +%155 = OpLoad %5 %43 %157 = OpAccessChain %87 %27 %156 -%158 = OpLoad %5 %157 -%159 = OpVectorTimesScalar %4 %155 %158 -%160 = OpFAdd %4 %154 %159 -%161 = OpLoad %4 %42 +%158 = OpLoad %4 %157 +%159 = OpVectorTimesScalar %5 %155 %158 +%160 = OpFAdd %5 %154 %159 +%161 = OpLoad %5 %42 %163 = OpAccessChain %87 %27 %162 -%164 = OpLoad %5 %163 -%165 = OpVectorTimesScalar %4 %161 %164 -%166 = OpFAdd %4 %160 %165 +%164 = OpLoad %4 %163 +%165 = OpVectorTimesScalar %5 %161 %164 +%166 = OpFAdd %5 %160 %165 OpStore %39 %166 -%167 = OpLoad %4 %39 -%168 = OpExtInst %4 %1 Normalize %167 -%169 = OpLoad %4 %39 -%170 = OpExtInst %5 %1 Length %169 -%171 = OpExtInst %5 %1 FClamp %170 %28 %33 -%172 = OpVectorTimesScalar %4 %168 %171 +%167 = OpLoad %5 %39 +%168 = OpExtInst %5 %1 Normalize %167 +%169 = OpLoad %5 %39 +%170 = OpExtInst %4 %1 Length %169 +%171 = OpExtInst %4 %1 FClamp %170 %28 %33 +%172 = OpVectorTimesScalar %5 %168 %171 OpStore %39 %172 -%173 = OpLoad %4 %36 -%174 = OpLoad %4 %39 +%173 = OpLoad %5 %36 +%174 = OpLoad %5 %39 %175 = OpAccessChain %87 %27 %26 -%176 = OpLoad %5 %175 -%177 = OpVectorTimesScalar %4 %174 %176 -%178 = OpFAdd %4 %173 %177 +%176 = OpLoad %4 %175 +%177 = OpVectorTimesScalar %5 %174 %176 +%178 = OpFAdd %5 %173 %177 OpStore %36 %178 %180 = OpAccessChain %179 %36 %26 -%181 = OpLoad %5 %180 +%181 = OpLoad %4 %180 %182 = OpFOrdLessThan %55 %181 %34 OpSelectionMerge %183 None OpBranchConditional %182 %184 %183 @@ -293,7 +293,7 @@ OpStore %185 %35 OpBranch %183 %183 = OpLabel %186 = OpAccessChain %179 %36 %26 -%187 = OpLoad %5 %186 +%187 = OpLoad %4 %186 %188 = OpFOrdGreaterThan %55 %187 %35 OpSelectionMerge %189 None OpBranchConditional %188 %190 %189 @@ -303,7 +303,7 @@ OpStore %191 %34 OpBranch %189 %189 = OpLabel %192 = OpAccessChain %179 %36 %32 -%193 = OpLoad %5 %192 +%193 = OpLoad %4 %192 %194 = OpFOrdLessThan %55 %193 %34 OpSelectionMerge %195 None OpBranchConditional %194 %196 %195 @@ -313,7 +313,7 @@ OpStore %197 %35 OpBranch %195 %195 = OpLabel %198 = OpAccessChain %179 %36 %32 -%199 = OpLoad %5 %198 +%199 = OpLoad %4 %198 %200 = OpFOrdGreaterThan %55 %199 %35 OpSelectionMerge %201 None OpBranchConditional %200 %202 %201 @@ -322,10 +322,10 @@ OpBranchConditional %200 %202 %201 OpStore %203 %34 OpBranch %201 %201 = OpLabel -%204 = OpLoad %4 %36 +%204 = OpLoad %5 %36 %205 = OpAccessChain %61 %18 %26 %54 %26 OpStore %205 %204 -%206 = OpLoad %4 %39 +%206 = OpLoad %5 %39 %207 = OpAccessChain %61 %18 %26 %54 %32 OpStore %207 %206 OpReturn diff --git a/naga/tests/out/spv/const-exprs.spvasm b/naga/tests/out/spv/const-exprs.spvasm index 22fef53749..afd9fe8499 100644 --- a/naga/tests/out/spv/const-exprs.spvasm +++ b/naga/tests/out/spv/const-exprs.spvasm @@ -11,25 +11,25 @@ OpExecutionMode %100 LocalSize 2 3 1 %3 = OpTypeInt 32 0 %4 = OpTypeInt 32 1 %5 = OpTypeVector %4 4 -%7 = OpTypeFloat 32 -%6 = OpTypeVector %7 4 -%8 = OpTypeVector %7 2 +%6 = OpTypeFloat 32 +%7 = OpTypeVector %6 4 +%8 = OpTypeVector %6 2 %10 = OpTypeBool %9 = OpTypeVector %10 2 %11 = OpConstant %3 2 %12 = OpConstant %4 3 %13 = OpConstant %4 4 %14 = OpConstant %4 8 -%15 = OpConstant %7 3.141 -%16 = OpConstant %7 6.282 -%17 = OpConstant %7 0.44444445 -%18 = OpConstant %7 0.0 -%19 = OpConstantComposite %6 %17 %18 %18 %18 +%15 = OpConstant %6 3.141 +%16 = OpConstant %6 6.282 +%17 = OpConstant %6 0.44444445 +%18 = OpConstant %6 0.0 +%19 = OpConstantComposite %7 %17 %18 %18 %18 %20 = OpConstant %4 0 %21 = OpConstant %4 1 %22 = OpConstant %4 2 -%23 = OpConstant %7 4.0 -%24 = OpConstant %7 5.0 +%23 = OpConstant %6 4.0 +%24 = OpConstant %6 5.0 %25 = OpConstantComposite %8 %23 %24 %26 = OpConstantTrue %10 %27 = OpConstantFalse %10 @@ -46,10 +46,10 @@ OpExecutionMode %100 LocalSize 2 3 1 %57 = OpConstantNull %5 %68 = OpConstant %4 -4 %69 = OpConstantComposite %5 %68 %68 %68 %68 -%78 = OpConstant %7 1.0 -%79 = OpConstant %7 2.0 -%80 = OpConstantComposite %6 %79 %78 %78 %78 -%82 = OpTypePointer Function %6 +%78 = OpConstant %6 1.0 +%79 = OpConstant %6 2.0 +%80 = OpConstantComposite %7 %79 %78 %78 %78 +%82 = OpTypePointer Function %7 %87 = OpTypeFunction %3 %4 %88 = OpConstant %3 10 %89 = OpConstant %3 20 diff --git a/naga/tests/out/spv/constructors.spvasm b/naga/tests/out/spv/constructors.spvasm index 615a31dc1b..ec83d04822 100644 --- a/naga/tests/out/spv/constructors.spvasm +++ b/naga/tests/out/spv/constructors.spvasm @@ -13,12 +13,12 @@ OpDecorate %10 ArrayStride 16 OpDecorate %15 ArrayStride 32 OpDecorate %17 ArrayStride 4 %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 %5 = OpTypeInt 32 1 -%6 = OpTypeStruct %3 %5 -%7 = OpTypeVector %4 3 -%9 = OpTypeVector %4 2 +%6 = OpTypeStruct %4 %5 +%7 = OpTypeVector %3 3 +%9 = OpTypeVector %3 2 %8 = OpTypeMatrix %9 2 %12 = OpTypeInt 32 0 %11 = OpConstant %12 1 @@ -29,13 +29,13 @@ OpDecorate %17 ArrayStride 4 %15 = OpTypeArray %6 %16 %18 = OpConstant %12 4 %17 = OpTypeArray %5 %18 -%19 = OpTypeMatrix %3 4 +%19 = OpTypeMatrix %4 4 %20 = OpTypeMatrix %7 2 -%21 = OpConstant %4 0.0 -%22 = OpConstant %4 1.0 -%23 = OpConstant %4 2.0 +%21 = OpConstant %3 0.0 +%22 = OpConstant %3 1.0 +%23 = OpConstant %3 2.0 %24 = OpConstantComposite %7 %21 %22 %23 -%25 = OpConstant %4 3.0 +%25 = OpConstant %3 3.0 %26 = OpConstantComposite %9 %21 %22 %27 = OpConstantComposite %9 %23 %25 %28 = OpConstantComposite %8 %26 %27 @@ -43,7 +43,7 @@ OpDecorate %17 ArrayStride 4 %30 = OpConstantNull %13 %31 = OpConstantNull %5 %32 = OpConstantNull %12 -%33 = OpConstantNull %4 +%33 = OpConstantNull %3 %34 = OpConstantNull %14 %35 = OpConstantNull %8 %36 = OpConstantNull %15 @@ -54,14 +54,14 @@ OpDecorate %17 ArrayStride 4 %41 = OpConstant %5 3 %42 = OpConstantComposite %17 %38 %39 %40 %41 %45 = OpTypeFunction %2 -%46 = OpConstantComposite %3 %22 %22 %22 %22 +%46 = OpConstantComposite %4 %22 %22 %22 %22 %47 = OpConstantComposite %6 %46 %39 %48 = OpConstantComposite %9 %22 %21 %49 = OpConstantComposite %8 %48 %26 -%50 = OpConstantComposite %3 %22 %21 %21 %21 -%51 = OpConstantComposite %3 %21 %22 %21 %21 -%52 = OpConstantComposite %3 %21 %21 %22 %21 -%53 = OpConstantComposite %3 %21 %21 %21 %22 +%50 = OpConstantComposite %4 %22 %21 %21 %21 +%51 = OpConstantComposite %4 %21 %22 %21 %21 +%52 = OpConstantComposite %4 %21 %21 %22 %21 +%53 = OpConstantComposite %4 %21 %21 %21 %22 %54 = OpConstantComposite %19 %50 %51 %52 %53 %55 = OpConstant %12 0 %56 = OpConstantComposite %14 %55 %55 diff --git a/naga/tests/out/spv/cross.spvasm b/naga/tests/out/spv/cross.spvasm new file mode 100644 index 0000000000..b41479900c --- /dev/null +++ b/naga/tests/out/spv/cross.spvasm @@ -0,0 +1,24 @@ +; SPIR-V +; Version: 1.1 +; Generator: rspirv +; Bound: 14 +OpCapability Shader +%1 = OpExtInstImport "GLSL.std.450" +OpMemoryModel Logical GLSL450 +OpEntryPoint GLCompute %6 "main" +OpExecutionMode %6 LocalSize 1 1 1 +%2 = OpTypeVoid +%4 = OpTypeFloat 32 +%3 = OpTypeVector %4 3 +%7 = OpTypeFunction %2 +%8 = OpConstant %4 0.0 +%9 = OpConstant %4 1.0 +%10 = OpConstant %4 2.0 +%11 = OpConstantComposite %3 %8 %9 %10 +%6 = OpFunction %2 None %7 +%5 = OpLabel +OpBranch %12 +%12 = OpLabel +%13 = OpExtInst %3 %1 Cross %11 %11 +OpReturn +OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/debug-symbol-large-source.spvasm b/naga/tests/out/spv/debug-symbol-large-source.spvasm index 47a6fb77aa..15e95cf2a5 100644 --- a/naga/tests/out/spv/debug-symbol-large-source.spvasm +++ b/naga/tests/out/spv/debug-symbol-large-source.spvasm @@ -7653,17 +7653,17 @@ OpDecorate %578 Location 0 OpDecorate %580 Location 1 OpDecorate %582 Location 0 %2 = OpTypeVoid -%5 = OpTypeFloat 32 -%4 = OpTypeVector %5 3 -%6 = OpTypeVector %5 2 -%7 = OpTypeVector %5 4 +%4 = OpTypeFloat 32 +%5 = OpTypeVector %4 3 +%6 = OpTypeVector %4 2 +%7 = OpTypeVector %4 4 %8 = OpTypeInt 32 0 %9 = OpTypeMatrix %6 2 %10 = OpTypeVector %8 2 %12 = OpTypeInt 32 1 %11 = OpTypeVector %12 2 %13 = OpTypeStruct %10 %11 %6 -%14 = OpTypeStruct %4 %4 +%14 = OpTypeStruct %5 %5 %15 = OpTypeRuntimeArray %14 %16 = OpTypeStruct %15 %17 = OpTypeRuntimeArray %8 @@ -7674,9 +7674,9 @@ OpDecorate %582 Location 0 %22 = OpTypeStruct %8 %8 %23 = OpTypeMatrix %7 4 %24 = OpTypeStruct %7 %23 -%25 = OpTypeStruct %4 %4 -%26 = OpTypeStruct %7 %4 %4 -%27 = OpTypeImage %5 2D 0 0 0 1 Unknown +%25 = OpTypeStruct %5 %5 +%26 = OpTypeStruct %7 %5 %5 +%27 = OpTypeImage %4 2D 0 0 0 1 Unknown %28 = OpTypeSampler %30 = OpTypeStruct %13 %31 = OpTypePointer Uniform %30 @@ -7700,67 +7700,67 @@ OpDecorate %582 Location 0 %47 = OpVariable %48 UniformConstant %49 = OpVariable %46 UniformConstant %50 = OpVariable %48 UniformConstant -%54 = OpTypeFunction %4 %4 -%55 = OpConstant %5 34.0 -%56 = OpConstant %5 1.0 -%57 = OpConstantComposite %4 %56 %56 %56 -%58 = OpConstant %5 289.0 -%59 = OpConstantComposite %4 %58 %58 %58 -%68 = OpTypeFunction %5 %6 -%69 = OpConstant %5 0.21132487 -%70 = OpConstant %5 0.36602542 -%71 = OpConstant %5 -0.57735026 -%72 = OpConstant %5 0.024390243 +%54 = OpTypeFunction %5 %5 +%55 = OpConstant %4 34.0 +%56 = OpConstant %4 1.0 +%57 = OpConstantComposite %5 %56 %56 %56 +%58 = OpConstant %4 289.0 +%59 = OpConstantComposite %5 %58 %58 %58 +%68 = OpTypeFunction %4 %6 +%69 = OpConstant %4 0.21132487 +%70 = OpConstant %4 0.36602542 +%71 = OpConstant %4 -0.57735026 +%72 = OpConstant %4 0.024390243 %73 = OpConstantComposite %7 %69 %70 %71 %72 -%74 = OpConstant %5 0.0 +%74 = OpConstant %4 0.0 %75 = OpConstantComposite %6 %56 %74 %76 = OpConstantComposite %6 %74 %56 %77 = OpConstantComposite %6 %58 %58 -%78 = OpConstant %5 0.5 -%79 = OpConstantComposite %4 %78 %78 %78 -%80 = OpConstantComposite %4 %74 %74 %74 -%81 = OpConstant %5 2.0 -%82 = OpConstant %5 0.85373473 -%83 = OpConstant %5 1.7928429 -%84 = OpConstantComposite %4 %83 %83 %83 -%85 = OpConstant %5 130.0 +%78 = OpConstant %4 0.5 +%79 = OpConstantComposite %5 %78 %78 %78 +%80 = OpConstantComposite %5 %74 %74 %74 +%81 = OpConstant %4 2.0 +%82 = OpConstant %4 0.85373473 +%83 = OpConstant %4 1.7928429 +%84 = OpConstantComposite %5 %83 %83 %83 +%85 = OpConstant %4 130.0 %87 = OpTypePointer Function %6 %88 = OpConstantNull %6 %90 = OpConstantNull %6 %92 = OpTypePointer Function %7 %93 = OpConstantNull %7 -%95 = OpTypePointer Function %4 -%96 = OpConstantNull %4 +%95 = OpTypePointer Function %5 +%96 = OpConstantNull %5 %112 = OpTypeBool %115 = OpTypeVector %112 2 -%125 = OpTypePointer Function %5 +%125 = OpTypePointer Function %4 %126 = OpConstant %8 1 %135 = OpConstant %8 0 %205 = OpConstant %8 5 -%206 = OpConstant %5 0.01 -%207 = OpConstant %5 100.0 +%206 = OpConstant %4 0.01 +%207 = OpConstant %4 100.0 %208 = OpConstantComposite %6 %207 %207 -%209 = OpConstant %5 0.87758255 -%210 = OpConstant %5 0.47942555 +%209 = OpConstant %4 0.87758255 +%210 = OpConstant %4 0.47942555 %211 = OpConstantComposite %6 %209 %210 %213 = OpConstantNull %6 -%215 = OpTypePointer Function %5 +%215 = OpTypePointer Function %4 %218 = OpTypePointer Function %8 -%258 = OpTypeFunction %4 %6 %6 +%258 = OpTypeFunction %5 %6 %6 %271 = OpTypeFunction %14 %6 %6 -%272 = OpConstant %5 0.1 +%272 = OpConstant %4 0.1 %273 = OpConstantComposite %6 %272 %74 %274 = OpConstantComposite %6 %74 %272 -%275 = OpConstant %5 -0.1 +%275 = OpConstant %4 -0.1 %276 = OpConstantComposite %6 %275 %74 %277 = OpConstantComposite %6 %74 %275 %304 = OpTypeFunction %6 %8 %10 %11 -%321 = OpTypeFunction %4 %6 -%322 = OpConstant %5 23.0 -%323 = OpConstant %5 32.0 +%321 = OpTypeFunction %5 %6 +%322 = OpConstant %4 23.0 +%323 = OpConstant %4 32.0 %324 = OpConstantComposite %6 %322 %323 -%325 = OpConstant %5 -43.0 -%326 = OpConstant %5 3.0 +%325 = OpConstant %4 -43.0 +%326 = OpConstant %4 3.0 %327 = OpConstantComposite %6 %325 %326 %343 = OpTypePointer Input %19 %342 = OpVariable %343 Input @@ -7787,7 +7787,7 @@ OpDecorate %582 Location 0 %414 = OpTypePointer Output %6 %413 = OpVariable %414 Output %416 = OpTypePointer Uniform %20 -%418 = OpConstant %5 -1.0 +%418 = OpConstant %4 -1.0 %419 = OpConstantComposite %6 %418 %418 %434 = OpTypePointer Uniform %8 %455 = OpVariable %407 Input @@ -7797,12 +7797,12 @@ OpDecorate %582 Location 0 %460 = OpVariable %461 Input %463 = OpVariable %410 Output %464 = OpVariable %410 Output -%467 = OpConstant %5 6.0 -%550 = OpTypePointer Input %4 +%467 = OpConstant %4 6.0 +%550 = OpTypePointer Input %5 %549 = OpVariable %550 Input %552 = OpVariable %550 Input %554 = OpVariable %412 Output -%556 = OpTypePointer Output %4 +%556 = OpTypePointer Output %5 %555 = OpVariable %556 Output %557 = OpVariable %556 Output %559 = OpTypePointer Uniform %24 @@ -7812,30 +7812,30 @@ OpDecorate %582 Location 0 %580 = OpVariable %550 Input %582 = OpVariable %412 Output %585 = OpTypePointer Uniform %25 -%587 = OpConstantComposite %4 %272 %272 %272 -%588 = OpConstant %5 0.7 -%589 = OpConstantComposite %4 %78 %272 %588 -%590 = OpConstant %5 0.2 -%591 = OpConstantComposite %4 %590 %590 %590 -%593 = OpConstantNull %4 -%608 = OpTypePointer Uniform %4 +%587 = OpConstantComposite %5 %272 %272 %272 +%588 = OpConstant %4 0.7 +%589 = OpConstantComposite %5 %78 %272 %588 +%590 = OpConstant %4 0.2 +%591 = OpConstantComposite %5 %590 %590 %590 +%593 = OpConstantNull %5 +%608 = OpTypePointer Uniform %5 %617 = OpTypePointer Uniform %7 -%53 = OpFunction %4 None %54 -%52 = OpFunctionParameter %4 +%53 = OpFunction %5 None %54 +%52 = OpFunctionParameter %5 %51 = OpLabel OpBranch %60 %60 = OpLabel OpLine %3 5734 52 -%61 = OpVectorTimesScalar %4 %52 %55 +%61 = OpVectorTimesScalar %5 %52 %55 OpLine %3 5734 63 OpLine %3 5734 50 -%62 = OpFAdd %4 %61 %57 -%63 = OpFMul %4 %62 %52 +%62 = OpFAdd %5 %61 %57 +%63 = OpFMul %5 %62 %52 OpLine %3 5734 49 -%64 = OpFRem %4 %63 %59 +%64 = OpFRem %5 %63 %59 OpReturnValue %64 OpFunctionEnd -%67 = OpFunction %5 None %68 +%67 = OpFunction %4 None %68 %66 = OpFunctionParameter %6 %65 = OpLabel %89 = OpVariable %87 Function %90 @@ -7847,7 +7847,7 @@ OpBranch %97 OpLine %3 5737 13 OpLine %3 5738 24 %98 = OpVectorShuffle %6 %73 %73 1 1 -%99 = OpDot %5 %66 %98 +%99 = OpDot %4 %66 %98 %100 = OpCompositeConstruct %6 %99 %99 %101 = OpFAdd %6 %66 %100 %102 = OpExtInst %6 %1 Floor %101 @@ -7858,13 +7858,13 @@ OpLine %3 5739 14 %104 = OpFSub %6 %66 %103 %105 = OpLoad %6 %86 %106 = OpVectorShuffle %6 %73 %73 0 0 -%107 = OpDot %5 %105 %106 +%107 = OpDot %4 %105 %106 %108 = OpCompositeConstruct %6 %107 %107 %109 = OpFAdd %6 %104 %108 OpLine %3 5741 32 OpLine %3 5741 25 -%110 = OpCompositeExtract %5 %109 0 -%111 = OpCompositeExtract %5 %109 1 +%110 = OpCompositeExtract %4 %109 0 +%111 = OpCompositeExtract %4 %109 1 %113 = OpFOrdLessThan %112 %110 %111 %116 = OpCompositeConstruct %115 %113 %113 %114 = OpSelect %6 %116 %76 %75 @@ -7888,101 +7888,101 @@ OpLine %3 5743 5 OpStore %86 %124 OpLine %3 5744 31 %127 = OpAccessChain %125 %86 %126 -%128 = OpLoad %5 %127 +%128 = OpLoad %4 %127 OpLine %3 5744 51 %129 = OpAccessChain %125 %89 %126 -%130 = OpLoad %5 %129 +%130 = OpLoad %4 %129 OpLine %3 5744 31 -%131 = OpCompositeConstruct %4 %74 %130 %56 -%132 = OpCompositeConstruct %4 %128 %128 %128 -%133 = OpFAdd %4 %132 %131 +%131 = OpCompositeConstruct %5 %74 %130 %56 +%132 = OpCompositeConstruct %5 %128 %128 %128 +%133 = OpFAdd %5 %132 %131 OpLine %3 5744 22 -%134 = OpFunctionCall %4 %53 %133 +%134 = OpFunctionCall %5 %53 %133 OpLine %3 5744 22 %136 = OpAccessChain %125 %86 %135 -%137 = OpLoad %5 %136 -%138 = OpCompositeConstruct %4 %137 %137 %137 -%139 = OpFAdd %4 %134 %138 +%137 = OpLoad %4 %136 +%138 = OpCompositeConstruct %5 %137 %137 %137 +%139 = OpFAdd %5 %134 %138 OpLine %3 5744 84 %140 = OpAccessChain %125 %89 %135 -%141 = OpLoad %5 %140 +%141 = OpLoad %4 %140 OpLine %3 5744 22 -%142 = OpCompositeConstruct %4 %74 %141 %56 -%143 = OpFAdd %4 %139 %142 +%142 = OpCompositeConstruct %5 %74 %141 %56 +%143 = OpFAdd %5 %139 %142 OpLine %3 5744 13 -%144 = OpFunctionCall %4 %53 %143 +%144 = OpFunctionCall %5 %53 %143 OpLine %3 5745 28 -%145 = OpDot %5 %109 %109 +%145 = OpDot %4 %109 %109 %146 = OpLoad %7 %91 %147 = OpVectorShuffle %6 %146 %146 0 1 %148 = OpLoad %7 %91 %149 = OpVectorShuffle %6 %148 %148 0 1 -%150 = OpDot %5 %147 %149 +%150 = OpDot %4 %147 %149 %151 = OpLoad %7 %91 %152 = OpVectorShuffle %6 %151 %151 2 3 %153 = OpLoad %7 %91 %154 = OpVectorShuffle %6 %153 %153 2 3 -%155 = OpDot %5 %152 %154 -%156 = OpCompositeConstruct %4 %145 %150 %155 +%155 = OpDot %4 %152 %154 +%156 = OpCompositeConstruct %5 %145 %150 %155 OpLine %3 5745 28 -%157 = OpFSub %4 %79 %156 +%157 = OpFSub %5 %79 %156 OpLine %3 5745 24 -%158 = OpExtInst %4 %1 FMax %157 %80 +%158 = OpExtInst %5 %1 FMax %157 %80 OpLine %3 5745 5 OpStore %94 %158 OpLine %3 5746 9 -%159 = OpLoad %4 %94 -%160 = OpLoad %4 %94 -%161 = OpFMul %4 %159 %160 +%159 = OpLoad %5 %94 +%160 = OpLoad %5 %94 +%161 = OpFMul %5 %159 %160 OpLine %3 5746 5 OpStore %94 %161 OpLine %3 5747 9 -%162 = OpLoad %4 %94 -%163 = OpLoad %4 %94 -%164 = OpFMul %4 %162 %163 +%162 = OpLoad %5 %94 +%163 = OpLoad %5 %94 +%164 = OpFMul %5 %162 %163 OpLine %3 5747 5 OpStore %94 %164 OpLine %3 5748 18 -%165 = OpVectorShuffle %4 %73 %73 3 3 3 -%166 = OpFMul %4 %144 %165 -%167 = OpExtInst %4 %1 Fract %166 +%165 = OpVectorShuffle %5 %73 %73 3 3 3 +%166 = OpFMul %5 %144 %165 +%167 = OpExtInst %5 %1 Fract %166 OpLine %3 5748 13 -%168 = OpVectorTimesScalar %4 %167 %81 +%168 = OpVectorTimesScalar %5 %167 %81 OpLine %3 5748 37 OpLine %3 5748 13 -%169 = OpFSub %4 %168 %57 +%169 = OpFSub %5 %168 %57 OpLine %3 5749 13 -%170 = OpExtInst %4 %1 FAbs %169 +%170 = OpExtInst %5 %1 FAbs %169 OpLine %3 5749 22 OpLine %3 5749 13 -%171 = OpFSub %4 %170 %79 +%171 = OpFSub %5 %170 %79 OpLine %3 7169 24 OpLine %3 7169 14 -%172 = OpFAdd %4 %169 %79 -%173 = OpExtInst %4 %1 Floor %172 +%172 = OpFAdd %5 %169 %79 +%173 = OpExtInst %5 %1 Floor %172 OpLine %3 7170 14 -%174 = OpFSub %4 %169 %173 +%174 = OpFSub %5 %169 %173 OpLine %3 1 1 -%175 = OpLoad %4 %94 +%175 = OpLoad %5 %94 OpLine %3 7171 53 -%176 = OpFMul %4 %174 %174 -%177 = OpFMul %4 %171 %171 -%178 = OpFAdd %4 %176 %177 +%176 = OpFMul %5 %174 %174 +%177 = OpFMul %5 %171 %171 +%178 = OpFAdd %5 %176 %177 OpLine %3 7171 14 -%179 = OpVectorTimesScalar %4 %178 %82 +%179 = OpVectorTimesScalar %5 %178 %82 OpLine %3 7171 9 -%180 = OpFSub %4 %84 %179 -%181 = OpFMul %4 %175 %180 +%180 = OpFSub %5 %84 %179 +%181 = OpFMul %5 %175 %180 OpLine %3 7171 5 OpStore %94 %181 OpLine %3 7172 13 -%182 = OpCompositeExtract %5 %174 0 -%183 = OpCompositeExtract %5 %109 0 -%184 = OpFMul %5 %182 %183 -%185 = OpCompositeExtract %5 %171 0 -%186 = OpCompositeExtract %5 %109 1 -%187 = OpFMul %5 %185 %186 -%188 = OpFAdd %5 %184 %187 +%182 = OpCompositeExtract %4 %174 0 +%183 = OpCompositeExtract %4 %109 0 +%184 = OpFMul %4 %182 %183 +%185 = OpCompositeExtract %4 %171 0 +%186 = OpCompositeExtract %4 %109 1 +%187 = OpFMul %4 %185 %186 +%188 = OpFAdd %4 %184 %187 %189 = OpVectorShuffle %6 %174 %174 1 2 %190 = OpLoad %7 %91 %191 = OpVectorShuffle %6 %190 %190 0 2 @@ -7992,15 +7992,15 @@ OpLine %3 7172 13 %195 = OpVectorShuffle %6 %194 %194 1 3 %196 = OpFMul %6 %193 %195 %197 = OpFAdd %6 %192 %196 -%198 = OpCompositeConstruct %4 %188 %197 +%198 = OpCompositeConstruct %5 %188 %197 OpLine %3 7173 19 -%199 = OpLoad %4 %94 -%200 = OpDot %5 %199 %198 +%199 = OpLoad %5 %94 +%200 = OpDot %4 %199 %198 OpLine %3 7173 12 -%201 = OpFMul %5 %85 %200 +%201 = OpFMul %4 %85 %200 OpReturnValue %201 OpFunctionEnd -%204 = OpFunction %5 None %68 +%204 = OpFunction %4 None %68 %203 = OpFunctionParameter %6 %202 = OpLabel %214 = OpVariable %215 Function %74 @@ -8016,11 +8016,11 @@ OpStore %212 %220 OpLine %3 7182 17 OpLine %3 7183 14 OpLine %3 7184 15 -%221 = OpCompositeExtract %5 %211 0 -%222 = OpCompositeExtract %5 %211 1 -%223 = OpCompositeExtract %5 %211 1 -%224 = OpFNegate %5 %223 -%225 = OpCompositeExtract %5 %211 0 +%221 = OpCompositeExtract %4 %211 0 +%222 = OpCompositeExtract %4 %211 1 +%223 = OpCompositeExtract %4 %211 1 +%224 = OpFNegate %4 %223 +%225 = OpCompositeExtract %4 %211 0 %226 = OpCompositeConstruct %6 %221 %222 %227 = OpCompositeConstruct %6 %224 %225 %228 = OpCompositeConstruct %9 %226 %227 @@ -8042,14 +8042,14 @@ OpBranch %230 OpBranch %237 %237 = OpLabel OpLine %3 1 1 -%239 = OpLoad %5 %214 -%240 = OpLoad %5 %216 +%239 = OpLoad %4 %214 +%240 = OpLoad %4 %216 %241 = OpLoad %6 %212 OpLine %3 7187 21 -%242 = OpFunctionCall %5 %67 %241 +%242 = OpFunctionCall %4 %67 %241 OpLine %3 7187 13 -%243 = OpFMul %5 %240 %242 -%244 = OpFAdd %5 %239 %243 +%243 = OpFMul %4 %240 %242 +%244 = OpFAdd %4 %239 %243 OpLine %3 7187 9 OpStore %214 %244 OpLine %3 7188 13 @@ -8061,9 +8061,9 @@ OpLine %3 7188 13 OpLine %3 7188 9 OpStore %212 %248 OpLine %3 1 1 -%249 = OpLoad %5 %216 +%249 = OpLoad %4 %216 OpLine %3 7189 13 -%250 = OpFMul %5 %249 %78 +%250 = OpFMul %4 %249 %78 OpLine %3 7189 9 OpStore %216 %250 OpBranch %238 @@ -8079,25 +8079,25 @@ OpStore %217 %252 OpBranch %229 %230 = OpLabel OpLine %3 1 1 -%253 = OpLoad %5 %214 +%253 = OpLoad %4 %214 OpReturnValue %253 OpFunctionEnd -%257 = OpFunction %4 None %258 +%257 = OpFunction %5 None %258 %255 = OpFunctionParameter %6 %256 = OpFunctionParameter %6 %254 = OpLabel OpBranch %259 %259 = OpLabel OpLine %3 7220 9 -%260 = OpCompositeExtract %5 %255 0 -%261 = OpCompositeExtract %5 %256 0 -%262 = OpCompositeExtract %5 %256 1 +%260 = OpCompositeExtract %4 %255 0 +%261 = OpCompositeExtract %4 %256 0 +%262 = OpCompositeExtract %4 %256 1 OpLine %3 7221 49 -%263 = OpFunctionCall %5 %204 %255 +%263 = OpFunctionCall %4 %204 %255 OpLine %3 7219 12 -%264 = OpExtInst %5 %1 FMix %261 %262 %263 -%265 = OpCompositeExtract %5 %255 1 -%266 = OpCompositeConstruct %4 %260 %264 %265 +%264 = OpExtInst %4 %1 FMix %261 %262 %263 +%265 = OpCompositeExtract %4 %255 1 +%266 = OpCompositeConstruct %5 %260 %264 %265 OpReturnValue %266 OpFunctionEnd %270 = OpFunction %14 None %271 @@ -8107,41 +8107,41 @@ OpFunctionEnd OpBranch %278 %278 = OpLabel OpLine %3 7227 13 -%279 = OpFunctionCall %4 %257 %268 %269 +%279 = OpFunctionCall %5 %257 %268 %269 OpLine %3 7229 29 %280 = OpFAdd %6 %268 %273 OpLine %3 7229 15 -%281 = OpFunctionCall %4 %257 %280 %269 +%281 = OpFunctionCall %5 %257 %280 %269 OpLine %3 7229 15 -%282 = OpFSub %4 %281 %279 +%282 = OpFSub %5 %281 %279 OpLine %3 7230 29 %283 = OpFAdd %6 %268 %274 OpLine %3 7230 15 -%284 = OpFunctionCall %4 %257 %283 %269 +%284 = OpFunctionCall %5 %257 %283 %269 OpLine %3 7230 15 -%285 = OpFSub %4 %284 %279 +%285 = OpFSub %5 %284 %279 OpLine %3 7231 29 %286 = OpFAdd %6 %268 %276 OpLine %3 7231 15 -%287 = OpFunctionCall %4 %257 %286 %269 +%287 = OpFunctionCall %5 %257 %286 %269 OpLine %3 7231 15 -%288 = OpFSub %4 %287 %279 +%288 = OpFSub %5 %287 %279 OpLine %3 7232 29 %289 = OpFAdd %6 %268 %277 OpLine %3 7232 15 -%290 = OpFunctionCall %4 %257 %289 %269 +%290 = OpFunctionCall %5 %257 %289 %269 OpLine %3 7232 15 -%291 = OpFSub %4 %290 %279 +%291 = OpFSub %5 %290 %279 OpLine %3 7234 14 -%292 = OpExtInst %4 %1 Cross %285 %282 -%293 = OpExtInst %4 %1 Normalize %292 +%292 = OpExtInst %5 %1 Cross %285 %282 +%293 = OpExtInst %5 %1 Normalize %292 OpLine %3 7235 14 -%294 = OpExtInst %4 %1 Cross %291 %288 -%295 = OpExtInst %4 %1 Normalize %294 +%294 = OpExtInst %5 %1 Cross %291 %288 +%295 = OpExtInst %5 %1 Normalize %294 OpLine %3 7237 14 -%296 = OpFAdd %4 %293 %295 +%296 = OpFAdd %5 %293 %295 OpLine %3 7237 13 -%297 = OpVectorTimesScalar %4 %296 %78 +%297 = OpVectorTimesScalar %5 %296 %78 OpLine %3 7239 12 %298 = OpCompositeConstruct %14 %279 %297 OpReturnValue %298 @@ -8154,50 +8154,50 @@ OpFunctionEnd OpBranch %305 %305 = OpLabel OpLine %3 7244 9 -%306 = OpConvertUToF %5 %300 +%306 = OpConvertUToF %4 %300 %307 = OpCompositeExtract %8 %301 0 OpLine %3 7244 9 %308 = OpIAdd %8 %307 %126 -%309 = OpConvertUToF %5 %308 -%310 = OpFRem %5 %306 %309 +%309 = OpConvertUToF %4 %308 +%310 = OpFRem %4 %306 %309 %311 = OpCompositeExtract %8 %301 0 OpLine %3 7243 12 %312 = OpIAdd %8 %311 %126 %313 = OpUDiv %8 %300 %312 -%314 = OpConvertUToF %5 %313 +%314 = OpConvertUToF %4 %313 %315 = OpCompositeConstruct %6 %310 %314 %316 = OpConvertSToF %6 %302 %317 = OpFAdd %6 %315 %316 OpReturnValue %317 OpFunctionEnd -%320 = OpFunction %4 None %321 +%320 = OpFunction %5 None %321 %319 = OpFunctionParameter %6 %318 = OpLabel OpBranch %328 %328 = OpLabel OpLine %3 7413 9 -%329 = OpFunctionCall %5 %67 %319 +%329 = OpFunctionCall %4 %67 %319 OpLine %3 7413 9 -%330 = OpFMul %5 %329 %78 +%330 = OpFMul %4 %329 %78 OpLine %3 7413 9 -%331 = OpFAdd %5 %330 %78 +%331 = OpFAdd %4 %330 %78 OpLine %3 7414 17 %332 = OpFAdd %6 %319 %324 OpLine %3 7414 9 -%333 = OpFunctionCall %5 %67 %332 +%333 = OpFunctionCall %4 %67 %332 OpLine %3 7414 9 -%334 = OpFMul %5 %333 %78 +%334 = OpFMul %4 %333 %78 OpLine %3 7414 9 -%335 = OpFAdd %5 %334 %78 +%335 = OpFAdd %4 %334 %78 OpLine %3 7415 17 %336 = OpFAdd %6 %319 %327 OpLine %3 7415 9 -%337 = OpFunctionCall %5 %67 %336 +%337 = OpFunctionCall %4 %67 %336 OpLine %3 7415 9 -%338 = OpFMul %5 %337 %78 +%338 = OpFMul %4 %337 %78 OpLine %3 7412 12 -%339 = OpFAdd %5 %338 %78 -%340 = OpCompositeConstruct %4 %331 %335 %339 +%339 = OpFAdd %4 %338 %78 +%340 = OpCompositeConstruct %5 %331 %335 %339 OpReturnValue %340 OpFunctionEnd %345 = OpFunction %2 None %346 @@ -8310,14 +8310,14 @@ OpLine %3 7304 18 %422 = OpUDiv %8 %421 %351 OpLine %3 7304 13 %423 = OpUMod %8 %422 %350 -%424 = OpConvertUToF %5 %423 +%424 = OpConvertUToF %4 %423 OpLine %3 7305 19 %425 = OpIAdd %8 %408 %126 OpLine %3 7305 18 %426 = OpUDiv %8 %425 %351 OpLine %3 7305 13 %427 = OpUMod %8 %426 %350 -%428 = OpConvertUToF %5 %427 +%428 = OpConvertUToF %4 %427 OpLine %3 7306 14 %429 = OpCompositeConstruct %6 %424 %428 OpLine %3 7308 30 @@ -8327,19 +8327,19 @@ OpLine %3 7308 30 OpLine %3 7308 20 %432 = OpCompositeConstruct %7 %431 %74 %56 OpLine %3 7311 21 -%433 = OpCompositeExtract %5 %429 0 +%433 = OpCompositeExtract %4 %429 0 OpLine %3 7311 21 %435 = OpAccessChain %434 %417 %351 %436 = OpLoad %8 %435 -%437 = OpConvertUToF %5 %436 -%438 = OpFMul %5 %433 %437 -%439 = OpCompositeExtract %5 %429 1 +%437 = OpConvertUToF %4 %436 +%438 = OpFMul %4 %433 %437 +%439 = OpCompositeExtract %4 %429 1 OpLine %3 7311 17 %440 = OpAccessChain %434 %417 %351 %441 = OpLoad %8 %440 -%442 = OpConvertUToF %5 %441 -%443 = OpFMul %5 %439 %442 -%444 = OpFAdd %5 %438 %443 +%442 = OpConvertUToF %4 %441 +%443 = OpFMul %4 %439 %442 +%444 = OpFAdd %4 %438 %443 %445 = OpConvertFToU %8 %444 OpLine %3 7311 17 %446 = OpAccessChain %434 %417 %352 @@ -8368,14 +8368,14 @@ OpBranch %470 %470 = OpLabel OpLine %3 7324 17 %471 = OpCompositeExtract %6 %454 2 -%472 = OpCompositeExtract %5 %471 0 +%472 = OpCompositeExtract %4 %471 0 OpLine %3 7324 17 %473 = OpAccessChain %434 %466 %351 %474 = OpLoad %8 %473 -%475 = OpConvertUToF %5 %474 -%476 = OpFMul %5 %472 %475 +%475 = OpConvertUToF %4 %474 +%476 = OpFMul %4 %472 %475 %477 = OpCompositeExtract %6 %454 2 -%478 = OpCompositeExtract %5 %477 1 +%478 = OpCompositeExtract %4 %477 1 OpLine %3 7324 70 %479 = OpAccessChain %434 %466 %351 %480 = OpLoad %8 %479 @@ -8383,19 +8383,19 @@ OpLine %3 7324 13 %481 = OpAccessChain %434 %466 %351 %482 = OpLoad %8 %481 %483 = OpIMul %8 %480 %482 -%484 = OpConvertUToF %5 %483 -%485 = OpFMul %5 %478 %484 -%486 = OpFAdd %5 %476 %485 +%484 = OpConvertUToF %4 %483 +%485 = OpFMul %4 %478 %484 +%486 = OpFAdd %4 %476 %485 %487 = OpConvertFToU %8 %486 OpLine %3 7324 13 %488 = OpAccessChain %434 %466 %352 %489 = OpLoad %8 %488 %490 = OpIAdd %8 %487 %489 OpLine %3 7325 32 -%491 = OpConvertUToF %5 %490 +%491 = OpConvertUToF %4 %490 OpLine %3 7325 22 -%492 = OpFDiv %5 %491 %467 -%493 = OpExtInst %5 %1 Floor %492 +%492 = OpFDiv %4 %491 %467 +%493 = OpExtInst %4 %1 Floor %492 %494 = OpConvertFToU %8 %493 OpLine %3 7326 22 %495 = OpUMod %8 %490 %349 @@ -8417,43 +8417,43 @@ OpSelectionMerge %504 None OpSwitch %495 %511 0 %505 1 %506 2 %507 3 %508 4 %509 5 %510 %505 = OpLabel OpLine %3 7334 37 -%512 = OpCompositeExtract %4 %503 0 -%513 = OpCompositeExtract %5 %512 0 +%512 = OpCompositeExtract %5 %503 0 +%513 = OpCompositeExtract %4 %512 0 OpLine %3 7334 20 OpStore %468 %513 OpBranch %504 %506 = OpLabel OpLine %3 7335 37 -%514 = OpCompositeExtract %4 %503 0 -%515 = OpCompositeExtract %5 %514 1 +%514 = OpCompositeExtract %5 %503 0 +%515 = OpCompositeExtract %4 %514 1 OpLine %3 7335 20 OpStore %468 %515 OpBranch %504 %507 = OpLabel OpLine %3 7336 37 -%516 = OpCompositeExtract %4 %503 0 -%517 = OpCompositeExtract %5 %516 2 +%516 = OpCompositeExtract %5 %503 0 +%517 = OpCompositeExtract %4 %516 2 OpLine %3 7336 20 OpStore %468 %517 OpBranch %504 %508 = OpLabel OpLine %3 7337 37 -%518 = OpCompositeExtract %4 %503 1 -%519 = OpCompositeExtract %5 %518 0 +%518 = OpCompositeExtract %5 %503 1 +%519 = OpCompositeExtract %4 %518 0 OpLine %3 7337 20 OpStore %468 %519 OpBranch %504 %509 = OpLabel OpLine %3 7338 37 -%520 = OpCompositeExtract %4 %503 1 -%521 = OpCompositeExtract %5 %520 1 +%520 = OpCompositeExtract %5 %503 1 +%521 = OpCompositeExtract %4 %520 1 OpLine %3 7338 20 OpStore %468 %521 OpBranch %504 %510 = OpLabel OpLine %3 7339 37 -%522 = OpCompositeExtract %4 %503 1 -%523 = OpCompositeExtract %5 %522 2 +%522 = OpCompositeExtract %5 %503 1 +%523 = OpCompositeExtract %4 %522 2 OpLine %3 7339 20 OpStore %468 %523 OpBranch %504 @@ -8502,7 +8502,7 @@ OpLine %3 7356 13 OpLine %3 7356 5 OpStore %469 %540 OpLine %3 7365 27 -%541 = OpLoad %5 %468 +%541 = OpLoad %4 %468 %542 = OpBitcast %8 %541 OpLine %3 7366 12 %543 = OpLoad %8 %469 @@ -8515,8 +8515,8 @@ OpReturn OpFunctionEnd %558 = OpFunction %2 None %346 %547 = OpLabel -%551 = OpLoad %4 %549 -%553 = OpLoad %4 %552 +%551 = OpLoad %5 %549 +%553 = OpLoad %5 %552 %548 = OpCompositeConstruct %14 %551 %553 %560 = OpAccessChain %559 %39 %135 OpBranch %561 @@ -8524,20 +8524,20 @@ OpBranch %561 OpLine %3 7397 25 %563 = OpAccessChain %562 %560 %126 %564 = OpLoad %23 %563 -%565 = OpCompositeExtract %4 %548 0 +%565 = OpCompositeExtract %5 %548 0 OpLine %3 7397 25 %566 = OpCompositeConstruct %7 %565 %56 %567 = OpMatrixTimesVector %7 %564 %566 OpLine %3 7398 18 -%568 = OpCompositeExtract %4 %548 1 +%568 = OpCompositeExtract %5 %548 1 OpLine %3 7399 12 -%569 = OpCompositeExtract %4 %548 0 +%569 = OpCompositeExtract %5 %548 0 %570 = OpCompositeConstruct %26 %567 %568 %569 %571 = OpCompositeExtract %7 %570 0 OpStore %554 %571 -%572 = OpCompositeExtract %4 %570 1 +%572 = OpCompositeExtract %5 %570 1 OpStore %555 %572 -%573 = OpCompositeExtract %4 %570 2 +%573 = OpCompositeExtract %5 %570 2 OpStore %557 %573 OpReturn OpFunctionEnd @@ -8545,8 +8545,8 @@ OpFunctionEnd %574 = OpLabel %592 = OpVariable %95 Function %593 %577 = OpLoad %7 %576 -%579 = OpLoad %4 %578 -%581 = OpLoad %4 %580 +%579 = OpLoad %5 %578 +%581 = OpLoad %5 %580 %575 = OpCompositeConstruct %26 %577 %579 %581 %584 = OpAccessChain %559 %39 %135 %586 = OpAccessChain %585 %42 %135 @@ -8554,70 +8554,70 @@ OpBranch %594 %594 = OpLabel OpLine %3 7421 28 OpLine %3 7421 17 -%595 = OpCompositeExtract %4 %575 2 -%596 = OpExtInst %4 %1 Fract %595 -%597 = OpExtInst %4 %1 SmoothStep %80 %587 %596 +%595 = OpCompositeExtract %5 %575 2 +%596 = OpExtInst %5 %1 Fract %595 +%597 = OpExtInst %5 %1 SmoothStep %80 %587 %596 OpLine %3 7421 5 OpStore %592 %597 OpLine %3 7422 17 OpLine %3 7422 13 %598 = OpAccessChain %125 %592 %135 -%599 = OpLoad %5 %598 +%599 = OpLoad %4 %598 %600 = OpAccessChain %125 %592 %126 -%601 = OpLoad %5 %600 -%602 = OpFMul %5 %599 %601 +%601 = OpLoad %4 %600 +%602 = OpFMul %4 %599 %601 %603 = OpAccessChain %125 %592 %350 -%604 = OpLoad %5 %603 -%605 = OpFMul %5 %602 %604 -%606 = OpCompositeConstruct %4 %605 %605 %605 -%607 = OpExtInst %4 %1 FMix %589 %591 %606 +%604 = OpLoad %4 %603 +%605 = OpFMul %4 %602 %604 +%606 = OpCompositeConstruct %5 %605 %605 %605 +%607 = OpExtInst %5 %1 FMix %589 %591 %606 OpLine %3 7422 5 OpStore %592 %607 OpLine %3 7425 25 %609 = OpAccessChain %608 %586 %126 -%610 = OpLoad %4 %609 -%611 = OpVectorTimesScalar %4 %610 %272 +%610 = OpLoad %5 %609 +%611 = OpVectorTimesScalar %5 %610 %272 OpLine %3 7427 21 %612 = OpAccessChain %608 %586 %135 -%613 = OpLoad %4 %612 -%614 = OpCompositeExtract %4 %575 2 -%615 = OpFSub %4 %613 %614 -%616 = OpExtInst %4 %1 Normalize %615 +%613 = OpLoad %5 %612 +%614 = OpCompositeExtract %5 %575 2 +%615 = OpFSub %5 %613 %614 +%616 = OpExtInst %5 %1 Normalize %615 OpLine %3 7428 20 %618 = OpAccessChain %617 %584 %135 %619 = OpLoad %7 %618 -%620 = OpVectorShuffle %4 %619 %619 0 1 2 -%621 = OpCompositeExtract %4 %575 2 -%622 = OpFSub %4 %620 %621 -%623 = OpExtInst %4 %1 Normalize %622 +%620 = OpVectorShuffle %5 %619 %619 0 1 2 +%621 = OpCompositeExtract %5 %575 2 +%622 = OpFSub %5 %620 %621 +%623 = OpExtInst %5 %1 Normalize %622 OpLine %3 7429 20 -%624 = OpFAdd %4 %623 %616 -%625 = OpExtInst %4 %1 Normalize %624 +%624 = OpFAdd %5 %623 %616 +%625 = OpExtInst %5 %1 Normalize %624 OpLine %3 7431 32 -%626 = OpCompositeExtract %4 %575 1 -%627 = OpDot %5 %626 %616 +%626 = OpCompositeExtract %5 %575 1 +%627 = OpDot %4 %626 %616 OpLine %3 7431 28 -%628 = OpExtInst %5 %1 FMax %627 %74 +%628 = OpExtInst %4 %1 FMax %627 %74 OpLine %3 7432 25 %629 = OpAccessChain %608 %586 %126 -%630 = OpLoad %4 %629 -%631 = OpVectorTimesScalar %4 %630 %628 +%630 = OpLoad %5 %629 +%631 = OpVectorTimesScalar %5 %630 %628 OpLine %3 7434 37 -%632 = OpCompositeExtract %4 %575 1 -%633 = OpDot %5 %632 %625 +%632 = OpCompositeExtract %5 %575 1 +%633 = OpDot %4 %632 %625 OpLine %3 7434 33 -%634 = OpExtInst %5 %1 FMax %633 %74 +%634 = OpExtInst %4 %1 FMax %633 %74 OpLine %3 7434 29 -%635 = OpExtInst %5 %1 Pow %634 %323 +%635 = OpExtInst %4 %1 Pow %634 %323 OpLine %3 7435 26 %636 = OpAccessChain %608 %586 %126 -%637 = OpLoad %4 %636 -%638 = OpVectorTimesScalar %4 %637 %635 +%637 = OpLoad %5 %636 +%638 = OpVectorTimesScalar %5 %637 %635 OpLine %3 7437 18 -%639 = OpFAdd %4 %611 %631 -%640 = OpFAdd %4 %639 %638 -%641 = OpLoad %4 %592 -%642 = OpFMul %4 %640 %641 +%639 = OpFAdd %5 %611 %631 +%640 = OpFAdd %5 %639 %638 +%641 = OpLoad %5 %592 +%642 = OpFMul %5 %640 %641 OpLine %3 7439 12 %643 = OpCompositeConstruct %7 %642 %56 OpStore %582 %643 diff --git a/naga/tests/out/spv/debug-symbol-simple.spvasm b/naga/tests/out/spv/debug-symbol-simple.spvasm index b2fd1f2607..e525177f28 100644 --- a/naga/tests/out/spv/debug-symbol-simple.spvasm +++ b/naga/tests/out/spv/debug-symbol-simple.spvasm @@ -72,24 +72,24 @@ OpDecorate %43 BuiltIn FragCoord OpDecorate %46 Location 0 OpDecorate %48 Location 0 %2 = OpTypeVoid -%5 = OpTypeFloat 32 -%4 = OpTypeVector %5 3 -%6 = OpTypeStruct %4 %4 -%7 = OpTypeVector %5 4 -%8 = OpTypeStruct %7 %4 +%4 = OpTypeFloat 32 +%5 = OpTypeVector %4 3 +%6 = OpTypeStruct %5 %5 +%7 = OpTypeVector %4 4 +%8 = OpTypeStruct %7 %5 %9 = OpTypeInt 32 1 -%13 = OpTypePointer Input %4 +%13 = OpTypePointer Input %5 %12 = OpVariable %13 Input %15 = OpVariable %13 Input %18 = OpTypePointer Output %7 %17 = OpVariable %18 Output -%20 = OpTypePointer Output %4 +%20 = OpTypePointer Output %5 %19 = OpVariable %20 Output %22 = OpTypeFunction %2 -%23 = OpConstant %5 1.0 +%23 = OpConstant %4 1.0 %25 = OpTypePointer Function %8 %26 = OpConstantNull %8 -%28 = OpTypePointer Function %4 +%28 = OpTypePointer Function %5 %31 = OpTypeInt 32 0 %30 = OpConstant %31 1 %33 = OpTypePointer Function %7 @@ -100,30 +100,30 @@ OpDecorate %48 Location 0 %48 = OpVariable %18 Output %50 = OpConstant %9 0 %51 = OpConstant %9 10 -%52 = OpConstant %5 0.001 -%53 = OpConstant %5 0.002 +%52 = OpConstant %4 0.001 +%53 = OpConstant %4 0.002 %54 = OpConstant %9 1 -%56 = OpConstantNull %4 +%56 = OpConstantNull %5 %58 = OpTypePointer Function %9 -%60 = OpTypePointer Function %5 -%61 = OpConstantNull %5 +%60 = OpTypePointer Function %4 +%61 = OpConstantNull %4 %69 = OpTypeBool -%77 = OpTypePointer Function %5 +%77 = OpTypePointer Function %4 %21 = OpFunction %2 None %22 %10 = OpLabel %24 = OpVariable %25 Function %26 -%14 = OpLoad %4 %12 -%16 = OpLoad %4 %15 +%14 = OpLoad %5 %12 +%16 = OpLoad %5 %15 %11 = OpCompositeConstruct %6 %14 %16 OpBranch %27 %27 = OpLabel OpLine %3 16 5 -%29 = OpCompositeExtract %4 %11 1 +%29 = OpCompositeExtract %5 %11 1 OpLine %3 16 5 %32 = OpAccessChain %28 %24 %30 OpStore %32 %29 OpLine %3 17 5 -%34 = OpCompositeExtract %4 %11 0 +%34 = OpCompositeExtract %5 %11 0 OpLine %3 17 25 %35 = OpCompositeConstruct %7 %34 %23 OpLine %3 17 5 @@ -133,7 +133,7 @@ OpLine %3 1 1 %38 = OpLoad %8 %24 %39 = OpCompositeExtract %7 %38 0 OpStore %17 %39 -%40 = OpCompositeExtract %4 %38 1 +%40 = OpCompositeExtract %5 %38 1 OpStore %19 %40 OpReturn OpFunctionEnd @@ -143,12 +143,12 @@ OpFunctionEnd %57 = OpVariable %58 Function %50 %59 = OpVariable %60 Function %61 %45 = OpLoad %7 %43 -%47 = OpLoad %4 %46 +%47 = OpLoad %5 %46 %42 = OpCompositeConstruct %8 %45 %47 OpBranch %62 %62 = OpLabel OpLine %3 25 17 -%63 = OpCompositeExtract %4 %42 1 +%63 = OpCompositeExtract %5 %42 1 OpLine %3 25 5 OpStore %55 %63 OpBranch %64 @@ -171,26 +171,26 @@ OpBranch %73 %73 = OpLabel OpLine %3 27 18 %75 = OpLoad %9 %57 -%76 = OpConvertSToF %5 %75 +%76 = OpConvertSToF %4 %75 OpLine %3 27 9 OpStore %59 %76 OpLine %3 28 9 -%78 = OpLoad %5 %59 +%78 = OpLoad %4 %59 OpLine %3 28 9 -%79 = OpFMul %5 %78 %52 +%79 = OpFMul %4 %78 %52 %80 = OpAccessChain %77 %55 %36 -%81 = OpLoad %5 %80 -%82 = OpFAdd %5 %81 %79 +%81 = OpLoad %4 %80 +%82 = OpFAdd %4 %81 %79 OpLine %3 28 9 %83 = OpAccessChain %77 %55 %36 OpStore %83 %82 OpLine %3 29 9 -%84 = OpLoad %5 %59 +%84 = OpLoad %4 %59 OpLine %3 29 9 -%85 = OpFMul %5 %84 %53 +%85 = OpFMul %4 %84 %53 %86 = OpAccessChain %77 %55 %30 -%87 = OpLoad %5 %86 -%88 = OpFAdd %5 %87 %85 +%87 = OpLoad %4 %86 +%88 = OpFAdd %4 %87 %85 OpLine %3 29 9 %89 = OpAccessChain %77 %55 %30 OpStore %89 %88 @@ -206,7 +206,7 @@ OpStore %57 %91 OpBranch %64 %65 = OpLabel OpLine %3 1 1 -%92 = OpLoad %4 %55 +%92 = OpLoad %5 %55 OpLine %3 32 12 %93 = OpCompositeConstruct %7 %92 %23 OpStore %48 %93 diff --git a/naga/tests/out/spv/debug-symbol-terrain.spvasm b/naga/tests/out/spv/debug-symbol-terrain.spvasm index fd8e7f5df3..38d8984d0f 100644 --- a/naga/tests/out/spv/debug-symbol-terrain.spvasm +++ b/naga/tests/out/spv/debug-symbol-terrain.spvasm @@ -488,17 +488,17 @@ OpDecorate %578 Location 0 OpDecorate %580 Location 1 OpDecorate %582 Location 0 %2 = OpTypeVoid -%5 = OpTypeFloat 32 -%4 = OpTypeVector %5 3 -%6 = OpTypeVector %5 2 -%7 = OpTypeVector %5 4 +%4 = OpTypeFloat 32 +%5 = OpTypeVector %4 3 +%6 = OpTypeVector %4 2 +%7 = OpTypeVector %4 4 %8 = OpTypeInt 32 0 %9 = OpTypeMatrix %6 2 %10 = OpTypeVector %8 2 %12 = OpTypeInt 32 1 %11 = OpTypeVector %12 2 %13 = OpTypeStruct %10 %11 %6 -%14 = OpTypeStruct %4 %4 +%14 = OpTypeStruct %5 %5 %15 = OpTypeRuntimeArray %14 %16 = OpTypeStruct %15 %17 = OpTypeRuntimeArray %8 @@ -509,9 +509,9 @@ OpDecorate %582 Location 0 %22 = OpTypeStruct %8 %8 %23 = OpTypeMatrix %7 4 %24 = OpTypeStruct %7 %23 -%25 = OpTypeStruct %4 %4 -%26 = OpTypeStruct %7 %4 %4 -%27 = OpTypeImage %5 2D 0 0 0 1 Unknown +%25 = OpTypeStruct %5 %5 +%26 = OpTypeStruct %7 %5 %5 +%27 = OpTypeImage %4 2D 0 0 0 1 Unknown %28 = OpTypeSampler %30 = OpTypeStruct %13 %31 = OpTypePointer Uniform %30 @@ -535,67 +535,67 @@ OpDecorate %582 Location 0 %47 = OpVariable %48 UniformConstant %49 = OpVariable %46 UniformConstant %50 = OpVariable %48 UniformConstant -%54 = OpTypeFunction %4 %4 -%55 = OpConstant %5 34.0 -%56 = OpConstant %5 1.0 -%57 = OpConstantComposite %4 %56 %56 %56 -%58 = OpConstant %5 289.0 -%59 = OpConstantComposite %4 %58 %58 %58 -%68 = OpTypeFunction %5 %6 -%69 = OpConstant %5 0.21132487 -%70 = OpConstant %5 0.36602542 -%71 = OpConstant %5 -0.57735026 -%72 = OpConstant %5 0.024390243 +%54 = OpTypeFunction %5 %5 +%55 = OpConstant %4 34.0 +%56 = OpConstant %4 1.0 +%57 = OpConstantComposite %5 %56 %56 %56 +%58 = OpConstant %4 289.0 +%59 = OpConstantComposite %5 %58 %58 %58 +%68 = OpTypeFunction %4 %6 +%69 = OpConstant %4 0.21132487 +%70 = OpConstant %4 0.36602542 +%71 = OpConstant %4 -0.57735026 +%72 = OpConstant %4 0.024390243 %73 = OpConstantComposite %7 %69 %70 %71 %72 -%74 = OpConstant %5 0.0 +%74 = OpConstant %4 0.0 %75 = OpConstantComposite %6 %56 %74 %76 = OpConstantComposite %6 %74 %56 %77 = OpConstantComposite %6 %58 %58 -%78 = OpConstant %5 0.5 -%79 = OpConstantComposite %4 %78 %78 %78 -%80 = OpConstantComposite %4 %74 %74 %74 -%81 = OpConstant %5 2.0 -%82 = OpConstant %5 0.85373473 -%83 = OpConstant %5 1.7928429 -%84 = OpConstantComposite %4 %83 %83 %83 -%85 = OpConstant %5 130.0 +%78 = OpConstant %4 0.5 +%79 = OpConstantComposite %5 %78 %78 %78 +%80 = OpConstantComposite %5 %74 %74 %74 +%81 = OpConstant %4 2.0 +%82 = OpConstant %4 0.85373473 +%83 = OpConstant %4 1.7928429 +%84 = OpConstantComposite %5 %83 %83 %83 +%85 = OpConstant %4 130.0 %87 = OpTypePointer Function %6 %88 = OpConstantNull %6 %90 = OpConstantNull %6 %92 = OpTypePointer Function %7 %93 = OpConstantNull %7 -%95 = OpTypePointer Function %4 -%96 = OpConstantNull %4 +%95 = OpTypePointer Function %5 +%96 = OpConstantNull %5 %112 = OpTypeBool %115 = OpTypeVector %112 2 -%125 = OpTypePointer Function %5 +%125 = OpTypePointer Function %4 %126 = OpConstant %8 1 %135 = OpConstant %8 0 %205 = OpConstant %8 5 -%206 = OpConstant %5 0.01 -%207 = OpConstant %5 100.0 +%206 = OpConstant %4 0.01 +%207 = OpConstant %4 100.0 %208 = OpConstantComposite %6 %207 %207 -%209 = OpConstant %5 0.87758255 -%210 = OpConstant %5 0.47942555 +%209 = OpConstant %4 0.87758255 +%210 = OpConstant %4 0.47942555 %211 = OpConstantComposite %6 %209 %210 %213 = OpConstantNull %6 -%215 = OpTypePointer Function %5 +%215 = OpTypePointer Function %4 %218 = OpTypePointer Function %8 -%258 = OpTypeFunction %4 %6 %6 +%258 = OpTypeFunction %5 %6 %6 %271 = OpTypeFunction %14 %6 %6 -%272 = OpConstant %5 0.1 +%272 = OpConstant %4 0.1 %273 = OpConstantComposite %6 %272 %74 %274 = OpConstantComposite %6 %74 %272 -%275 = OpConstant %5 -0.1 +%275 = OpConstant %4 -0.1 %276 = OpConstantComposite %6 %275 %74 %277 = OpConstantComposite %6 %74 %275 %304 = OpTypeFunction %6 %8 %10 %11 -%321 = OpTypeFunction %4 %6 -%322 = OpConstant %5 23.0 -%323 = OpConstant %5 32.0 +%321 = OpTypeFunction %5 %6 +%322 = OpConstant %4 23.0 +%323 = OpConstant %4 32.0 %324 = OpConstantComposite %6 %322 %323 -%325 = OpConstant %5 -43.0 -%326 = OpConstant %5 3.0 +%325 = OpConstant %4 -43.0 +%326 = OpConstant %4 3.0 %327 = OpConstantComposite %6 %325 %326 %343 = OpTypePointer Input %19 %342 = OpVariable %343 Input @@ -622,7 +622,7 @@ OpDecorate %582 Location 0 %414 = OpTypePointer Output %6 %413 = OpVariable %414 Output %416 = OpTypePointer Uniform %20 -%418 = OpConstant %5 -1.0 +%418 = OpConstant %4 -1.0 %419 = OpConstantComposite %6 %418 %418 %434 = OpTypePointer Uniform %8 %455 = OpVariable %407 Input @@ -632,12 +632,12 @@ OpDecorate %582 Location 0 %460 = OpVariable %461 Input %463 = OpVariable %410 Output %464 = OpVariable %410 Output -%467 = OpConstant %5 6.0 -%550 = OpTypePointer Input %4 +%467 = OpConstant %4 6.0 +%550 = OpTypePointer Input %5 %549 = OpVariable %550 Input %552 = OpVariable %550 Input %554 = OpVariable %412 Output -%556 = OpTypePointer Output %4 +%556 = OpTypePointer Output %5 %555 = OpVariable %556 Output %557 = OpVariable %556 Output %559 = OpTypePointer Uniform %24 @@ -647,30 +647,30 @@ OpDecorate %582 Location 0 %580 = OpVariable %550 Input %582 = OpVariable %412 Output %585 = OpTypePointer Uniform %25 -%587 = OpConstantComposite %4 %272 %272 %272 -%588 = OpConstant %5 0.7 -%589 = OpConstantComposite %4 %78 %272 %588 -%590 = OpConstant %5 0.2 -%591 = OpConstantComposite %4 %590 %590 %590 -%593 = OpConstantNull %4 -%608 = OpTypePointer Uniform %4 +%587 = OpConstantComposite %5 %272 %272 %272 +%588 = OpConstant %4 0.7 +%589 = OpConstantComposite %5 %78 %272 %588 +%590 = OpConstant %4 0.2 +%591 = OpConstantComposite %5 %590 %590 %590 +%593 = OpConstantNull %5 +%608 = OpTypePointer Uniform %5 %617 = OpTypePointer Uniform %7 -%53 = OpFunction %4 None %54 -%52 = OpFunctionParameter %4 +%53 = OpFunction %5 None %54 +%52 = OpFunctionParameter %5 %51 = OpLabel OpBranch %60 %60 = OpLabel OpLine %3 10 52 -%61 = OpVectorTimesScalar %4 %52 %55 +%61 = OpVectorTimesScalar %5 %52 %55 OpLine %3 10 63 OpLine %3 10 50 -%62 = OpFAdd %4 %61 %57 -%63 = OpFMul %4 %62 %52 +%62 = OpFAdd %5 %61 %57 +%63 = OpFMul %5 %62 %52 OpLine %3 10 49 -%64 = OpFRem %4 %63 %59 +%64 = OpFRem %5 %63 %59 OpReturnValue %64 OpFunctionEnd -%67 = OpFunction %5 None %68 +%67 = OpFunction %4 None %68 %66 = OpFunctionParameter %6 %65 = OpLabel %89 = OpVariable %87 Function %90 @@ -682,7 +682,7 @@ OpBranch %97 OpLine %3 13 13 OpLine %3 14 24 %98 = OpVectorShuffle %6 %73 %73 1 1 -%99 = OpDot %5 %66 %98 +%99 = OpDot %4 %66 %98 %100 = OpCompositeConstruct %6 %99 %99 %101 = OpFAdd %6 %66 %100 %102 = OpExtInst %6 %1 Floor %101 @@ -693,13 +693,13 @@ OpLine %3 15 14 %104 = OpFSub %6 %66 %103 %105 = OpLoad %6 %86 %106 = OpVectorShuffle %6 %73 %73 0 0 -%107 = OpDot %5 %105 %106 +%107 = OpDot %4 %105 %106 %108 = OpCompositeConstruct %6 %107 %107 %109 = OpFAdd %6 %104 %108 OpLine %3 17 32 OpLine %3 17 25 -%110 = OpCompositeExtract %5 %109 0 -%111 = OpCompositeExtract %5 %109 1 +%110 = OpCompositeExtract %4 %109 0 +%111 = OpCompositeExtract %4 %109 1 %113 = OpFOrdLessThan %112 %110 %111 %116 = OpCompositeConstruct %115 %113 %113 %114 = OpSelect %6 %116 %76 %75 @@ -723,101 +723,101 @@ OpLine %3 19 5 OpStore %86 %124 OpLine %3 20 31 %127 = OpAccessChain %125 %86 %126 -%128 = OpLoad %5 %127 +%128 = OpLoad %4 %127 OpLine %3 20 51 %129 = OpAccessChain %125 %89 %126 -%130 = OpLoad %5 %129 +%130 = OpLoad %4 %129 OpLine %3 20 31 -%131 = OpCompositeConstruct %4 %74 %130 %56 -%132 = OpCompositeConstruct %4 %128 %128 %128 -%133 = OpFAdd %4 %132 %131 +%131 = OpCompositeConstruct %5 %74 %130 %56 +%132 = OpCompositeConstruct %5 %128 %128 %128 +%133 = OpFAdd %5 %132 %131 OpLine %3 20 22 -%134 = OpFunctionCall %4 %53 %133 +%134 = OpFunctionCall %5 %53 %133 OpLine %3 20 22 %136 = OpAccessChain %125 %86 %135 -%137 = OpLoad %5 %136 -%138 = OpCompositeConstruct %4 %137 %137 %137 -%139 = OpFAdd %4 %134 %138 +%137 = OpLoad %4 %136 +%138 = OpCompositeConstruct %5 %137 %137 %137 +%139 = OpFAdd %5 %134 %138 OpLine %3 20 84 %140 = OpAccessChain %125 %89 %135 -%141 = OpLoad %5 %140 +%141 = OpLoad %4 %140 OpLine %3 20 22 -%142 = OpCompositeConstruct %4 %74 %141 %56 -%143 = OpFAdd %4 %139 %142 +%142 = OpCompositeConstruct %5 %74 %141 %56 +%143 = OpFAdd %5 %139 %142 OpLine %3 20 13 -%144 = OpFunctionCall %4 %53 %143 +%144 = OpFunctionCall %5 %53 %143 OpLine %3 21 28 -%145 = OpDot %5 %109 %109 +%145 = OpDot %4 %109 %109 %146 = OpLoad %7 %91 %147 = OpVectorShuffle %6 %146 %146 0 1 %148 = OpLoad %7 %91 %149 = OpVectorShuffle %6 %148 %148 0 1 -%150 = OpDot %5 %147 %149 +%150 = OpDot %4 %147 %149 %151 = OpLoad %7 %91 %152 = OpVectorShuffle %6 %151 %151 2 3 %153 = OpLoad %7 %91 %154 = OpVectorShuffle %6 %153 %153 2 3 -%155 = OpDot %5 %152 %154 -%156 = OpCompositeConstruct %4 %145 %150 %155 +%155 = OpDot %4 %152 %154 +%156 = OpCompositeConstruct %5 %145 %150 %155 OpLine %3 21 28 -%157 = OpFSub %4 %79 %156 +%157 = OpFSub %5 %79 %156 OpLine %3 21 24 -%158 = OpExtInst %4 %1 FMax %157 %80 +%158 = OpExtInst %5 %1 FMax %157 %80 OpLine %3 21 5 OpStore %94 %158 OpLine %3 22 9 -%159 = OpLoad %4 %94 -%160 = OpLoad %4 %94 -%161 = OpFMul %4 %159 %160 +%159 = OpLoad %5 %94 +%160 = OpLoad %5 %94 +%161 = OpFMul %5 %159 %160 OpLine %3 22 5 OpStore %94 %161 OpLine %3 23 9 -%162 = OpLoad %4 %94 -%163 = OpLoad %4 %94 -%164 = OpFMul %4 %162 %163 +%162 = OpLoad %5 %94 +%163 = OpLoad %5 %94 +%164 = OpFMul %5 %162 %163 OpLine %3 23 5 OpStore %94 %164 OpLine %3 24 18 -%165 = OpVectorShuffle %4 %73 %73 3 3 3 -%166 = OpFMul %4 %144 %165 -%167 = OpExtInst %4 %1 Fract %166 +%165 = OpVectorShuffle %5 %73 %73 3 3 3 +%166 = OpFMul %5 %144 %165 +%167 = OpExtInst %5 %1 Fract %166 OpLine %3 24 13 -%168 = OpVectorTimesScalar %4 %167 %81 +%168 = OpVectorTimesScalar %5 %167 %81 OpLine %3 24 37 OpLine %3 24 13 -%169 = OpFSub %4 %168 %57 +%169 = OpFSub %5 %168 %57 OpLine %3 25 13 -%170 = OpExtInst %4 %1 FAbs %169 +%170 = OpExtInst %5 %1 FAbs %169 OpLine %3 25 22 OpLine %3 25 13 -%171 = OpFSub %4 %170 %79 +%171 = OpFSub %5 %170 %79 OpLine %3 26 24 OpLine %3 26 14 -%172 = OpFAdd %4 %169 %79 -%173 = OpExtInst %4 %1 Floor %172 +%172 = OpFAdd %5 %169 %79 +%173 = OpExtInst %5 %1 Floor %172 OpLine %3 27 14 -%174 = OpFSub %4 %169 %173 +%174 = OpFSub %5 %169 %173 OpLine %3 1 1 -%175 = OpLoad %4 %94 +%175 = OpLoad %5 %94 OpLine %3 28 53 -%176 = OpFMul %4 %174 %174 -%177 = OpFMul %4 %171 %171 -%178 = OpFAdd %4 %176 %177 +%176 = OpFMul %5 %174 %174 +%177 = OpFMul %5 %171 %171 +%178 = OpFAdd %5 %176 %177 OpLine %3 28 14 -%179 = OpVectorTimesScalar %4 %178 %82 +%179 = OpVectorTimesScalar %5 %178 %82 OpLine %3 28 9 -%180 = OpFSub %4 %84 %179 -%181 = OpFMul %4 %175 %180 +%180 = OpFSub %5 %84 %179 +%181 = OpFMul %5 %175 %180 OpLine %3 28 5 OpStore %94 %181 OpLine %3 29 13 -%182 = OpCompositeExtract %5 %174 0 -%183 = OpCompositeExtract %5 %109 0 -%184 = OpFMul %5 %182 %183 -%185 = OpCompositeExtract %5 %171 0 -%186 = OpCompositeExtract %5 %109 1 -%187 = OpFMul %5 %185 %186 -%188 = OpFAdd %5 %184 %187 +%182 = OpCompositeExtract %4 %174 0 +%183 = OpCompositeExtract %4 %109 0 +%184 = OpFMul %4 %182 %183 +%185 = OpCompositeExtract %4 %171 0 +%186 = OpCompositeExtract %4 %109 1 +%187 = OpFMul %4 %185 %186 +%188 = OpFAdd %4 %184 %187 %189 = OpVectorShuffle %6 %174 %174 1 2 %190 = OpLoad %7 %91 %191 = OpVectorShuffle %6 %190 %190 0 2 @@ -827,15 +827,15 @@ OpLine %3 29 13 %195 = OpVectorShuffle %6 %194 %194 1 3 %196 = OpFMul %6 %193 %195 %197 = OpFAdd %6 %192 %196 -%198 = OpCompositeConstruct %4 %188 %197 +%198 = OpCompositeConstruct %5 %188 %197 OpLine %3 30 19 -%199 = OpLoad %4 %94 -%200 = OpDot %5 %199 %198 +%199 = OpLoad %5 %94 +%200 = OpDot %4 %199 %198 OpLine %3 30 12 -%201 = OpFMul %5 %85 %200 +%201 = OpFMul %4 %85 %200 OpReturnValue %201 OpFunctionEnd -%204 = OpFunction %5 None %68 +%204 = OpFunction %4 None %68 %203 = OpFunctionParameter %6 %202 = OpLabel %214 = OpVariable %215 Function %74 @@ -851,11 +851,11 @@ OpStore %212 %220 OpLine %3 39 17 OpLine %3 40 14 OpLine %3 41 15 -%221 = OpCompositeExtract %5 %211 0 -%222 = OpCompositeExtract %5 %211 1 -%223 = OpCompositeExtract %5 %211 1 -%224 = OpFNegate %5 %223 -%225 = OpCompositeExtract %5 %211 0 +%221 = OpCompositeExtract %4 %211 0 +%222 = OpCompositeExtract %4 %211 1 +%223 = OpCompositeExtract %4 %211 1 +%224 = OpFNegate %4 %223 +%225 = OpCompositeExtract %4 %211 0 %226 = OpCompositeConstruct %6 %221 %222 %227 = OpCompositeConstruct %6 %224 %225 %228 = OpCompositeConstruct %9 %226 %227 @@ -877,14 +877,14 @@ OpBranch %230 OpBranch %237 %237 = OpLabel OpLine %3 1 1 -%239 = OpLoad %5 %214 -%240 = OpLoad %5 %216 +%239 = OpLoad %4 %214 +%240 = OpLoad %4 %216 %241 = OpLoad %6 %212 OpLine %3 44 21 -%242 = OpFunctionCall %5 %67 %241 +%242 = OpFunctionCall %4 %67 %241 OpLine %3 44 13 -%243 = OpFMul %5 %240 %242 -%244 = OpFAdd %5 %239 %243 +%243 = OpFMul %4 %240 %242 +%244 = OpFAdd %4 %239 %243 OpLine %3 44 9 OpStore %214 %244 OpLine %3 45 13 @@ -896,9 +896,9 @@ OpLine %3 45 13 OpLine %3 45 9 OpStore %212 %248 OpLine %3 1 1 -%249 = OpLoad %5 %216 +%249 = OpLoad %4 %216 OpLine %3 46 13 -%250 = OpFMul %5 %249 %78 +%250 = OpFMul %4 %249 %78 OpLine %3 46 9 OpStore %216 %250 OpBranch %238 @@ -914,25 +914,25 @@ OpStore %217 %252 OpBranch %229 %230 = OpLabel OpLine %3 1 1 -%253 = OpLoad %5 %214 +%253 = OpLoad %4 %214 OpReturnValue %253 OpFunctionEnd -%257 = OpFunction %4 None %258 +%257 = OpFunction %5 None %258 %255 = OpFunctionParameter %6 %256 = OpFunctionParameter %6 %254 = OpLabel OpBranch %259 %259 = OpLabel OpLine %3 77 9 -%260 = OpCompositeExtract %5 %255 0 -%261 = OpCompositeExtract %5 %256 0 -%262 = OpCompositeExtract %5 %256 1 +%260 = OpCompositeExtract %4 %255 0 +%261 = OpCompositeExtract %4 %256 0 +%262 = OpCompositeExtract %4 %256 1 OpLine %3 78 49 -%263 = OpFunctionCall %5 %204 %255 +%263 = OpFunctionCall %4 %204 %255 OpLine %3 76 12 -%264 = OpExtInst %5 %1 FMix %261 %262 %263 -%265 = OpCompositeExtract %5 %255 1 -%266 = OpCompositeConstruct %4 %260 %264 %265 +%264 = OpExtInst %4 %1 FMix %261 %262 %263 +%265 = OpCompositeExtract %4 %255 1 +%266 = OpCompositeConstruct %5 %260 %264 %265 OpReturnValue %266 OpFunctionEnd %270 = OpFunction %14 None %271 @@ -942,41 +942,41 @@ OpFunctionEnd OpBranch %278 %278 = OpLabel OpLine %3 84 13 -%279 = OpFunctionCall %4 %257 %268 %269 +%279 = OpFunctionCall %5 %257 %268 %269 OpLine %3 86 29 %280 = OpFAdd %6 %268 %273 OpLine %3 86 15 -%281 = OpFunctionCall %4 %257 %280 %269 +%281 = OpFunctionCall %5 %257 %280 %269 OpLine %3 86 15 -%282 = OpFSub %4 %281 %279 +%282 = OpFSub %5 %281 %279 OpLine %3 87 29 %283 = OpFAdd %6 %268 %274 OpLine %3 87 15 -%284 = OpFunctionCall %4 %257 %283 %269 +%284 = OpFunctionCall %5 %257 %283 %269 OpLine %3 87 15 -%285 = OpFSub %4 %284 %279 +%285 = OpFSub %5 %284 %279 OpLine %3 88 29 %286 = OpFAdd %6 %268 %276 OpLine %3 88 15 -%287 = OpFunctionCall %4 %257 %286 %269 +%287 = OpFunctionCall %5 %257 %286 %269 OpLine %3 88 15 -%288 = OpFSub %4 %287 %279 +%288 = OpFSub %5 %287 %279 OpLine %3 89 29 %289 = OpFAdd %6 %268 %277 OpLine %3 89 15 -%290 = OpFunctionCall %4 %257 %289 %269 +%290 = OpFunctionCall %5 %257 %289 %269 OpLine %3 89 15 -%291 = OpFSub %4 %290 %279 +%291 = OpFSub %5 %290 %279 OpLine %3 91 14 -%292 = OpExtInst %4 %1 Cross %285 %282 -%293 = OpExtInst %4 %1 Normalize %292 +%292 = OpExtInst %5 %1 Cross %285 %282 +%293 = OpExtInst %5 %1 Normalize %292 OpLine %3 92 14 -%294 = OpExtInst %4 %1 Cross %291 %288 -%295 = OpExtInst %4 %1 Normalize %294 +%294 = OpExtInst %5 %1 Cross %291 %288 +%295 = OpExtInst %5 %1 Normalize %294 OpLine %3 94 14 -%296 = OpFAdd %4 %293 %295 +%296 = OpFAdd %5 %293 %295 OpLine %3 94 13 -%297 = OpVectorTimesScalar %4 %296 %78 +%297 = OpVectorTimesScalar %5 %296 %78 OpLine %3 96 12 %298 = OpCompositeConstruct %14 %279 %297 OpReturnValue %298 @@ -989,50 +989,50 @@ OpFunctionEnd OpBranch %305 %305 = OpLabel OpLine %3 101 9 -%306 = OpConvertUToF %5 %300 +%306 = OpConvertUToF %4 %300 %307 = OpCompositeExtract %8 %301 0 OpLine %3 101 9 %308 = OpIAdd %8 %307 %126 -%309 = OpConvertUToF %5 %308 -%310 = OpFRem %5 %306 %309 +%309 = OpConvertUToF %4 %308 +%310 = OpFRem %4 %306 %309 %311 = OpCompositeExtract %8 %301 0 OpLine %3 100 12 %312 = OpIAdd %8 %311 %126 %313 = OpUDiv %8 %300 %312 -%314 = OpConvertUToF %5 %313 +%314 = OpConvertUToF %4 %313 %315 = OpCompositeConstruct %6 %310 %314 %316 = OpConvertSToF %6 %302 %317 = OpFAdd %6 %315 %316 OpReturnValue %317 OpFunctionEnd -%320 = OpFunction %4 None %321 +%320 = OpFunction %5 None %321 %319 = OpFunctionParameter %6 %318 = OpLabel OpBranch %328 %328 = OpLabel OpLine %3 270 9 -%329 = OpFunctionCall %5 %67 %319 +%329 = OpFunctionCall %4 %67 %319 OpLine %3 270 9 -%330 = OpFMul %5 %329 %78 +%330 = OpFMul %4 %329 %78 OpLine %3 270 9 -%331 = OpFAdd %5 %330 %78 +%331 = OpFAdd %4 %330 %78 OpLine %3 271 17 %332 = OpFAdd %6 %319 %324 OpLine %3 271 9 -%333 = OpFunctionCall %5 %67 %332 +%333 = OpFunctionCall %4 %67 %332 OpLine %3 271 9 -%334 = OpFMul %5 %333 %78 +%334 = OpFMul %4 %333 %78 OpLine %3 271 9 -%335 = OpFAdd %5 %334 %78 +%335 = OpFAdd %4 %334 %78 OpLine %3 272 17 %336 = OpFAdd %6 %319 %327 OpLine %3 272 9 -%337 = OpFunctionCall %5 %67 %336 +%337 = OpFunctionCall %4 %67 %336 OpLine %3 272 9 -%338 = OpFMul %5 %337 %78 +%338 = OpFMul %4 %337 %78 OpLine %3 269 12 -%339 = OpFAdd %5 %338 %78 -%340 = OpCompositeConstruct %4 %331 %335 %339 +%339 = OpFAdd %4 %338 %78 +%340 = OpCompositeConstruct %5 %331 %335 %339 OpReturnValue %340 OpFunctionEnd %345 = OpFunction %2 None %346 @@ -1145,14 +1145,14 @@ OpLine %3 161 18 %422 = OpUDiv %8 %421 %351 OpLine %3 161 13 %423 = OpUMod %8 %422 %350 -%424 = OpConvertUToF %5 %423 +%424 = OpConvertUToF %4 %423 OpLine %3 162 19 %425 = OpIAdd %8 %408 %126 OpLine %3 162 18 %426 = OpUDiv %8 %425 %351 OpLine %3 162 13 %427 = OpUMod %8 %426 %350 -%428 = OpConvertUToF %5 %427 +%428 = OpConvertUToF %4 %427 OpLine %3 163 14 %429 = OpCompositeConstruct %6 %424 %428 OpLine %3 165 30 @@ -1162,19 +1162,19 @@ OpLine %3 165 30 OpLine %3 165 20 %432 = OpCompositeConstruct %7 %431 %74 %56 OpLine %3 168 21 -%433 = OpCompositeExtract %5 %429 0 +%433 = OpCompositeExtract %4 %429 0 OpLine %3 168 21 %435 = OpAccessChain %434 %417 %351 %436 = OpLoad %8 %435 -%437 = OpConvertUToF %5 %436 -%438 = OpFMul %5 %433 %437 -%439 = OpCompositeExtract %5 %429 1 +%437 = OpConvertUToF %4 %436 +%438 = OpFMul %4 %433 %437 +%439 = OpCompositeExtract %4 %429 1 OpLine %3 168 17 %440 = OpAccessChain %434 %417 %351 %441 = OpLoad %8 %440 -%442 = OpConvertUToF %5 %441 -%443 = OpFMul %5 %439 %442 -%444 = OpFAdd %5 %438 %443 +%442 = OpConvertUToF %4 %441 +%443 = OpFMul %4 %439 %442 +%444 = OpFAdd %4 %438 %443 %445 = OpConvertFToU %8 %444 OpLine %3 168 17 %446 = OpAccessChain %434 %417 %352 @@ -1203,14 +1203,14 @@ OpBranch %470 %470 = OpLabel OpLine %3 181 17 %471 = OpCompositeExtract %6 %454 2 -%472 = OpCompositeExtract %5 %471 0 +%472 = OpCompositeExtract %4 %471 0 OpLine %3 181 17 %473 = OpAccessChain %434 %466 %351 %474 = OpLoad %8 %473 -%475 = OpConvertUToF %5 %474 -%476 = OpFMul %5 %472 %475 +%475 = OpConvertUToF %4 %474 +%476 = OpFMul %4 %472 %475 %477 = OpCompositeExtract %6 %454 2 -%478 = OpCompositeExtract %5 %477 1 +%478 = OpCompositeExtract %4 %477 1 OpLine %3 181 70 %479 = OpAccessChain %434 %466 %351 %480 = OpLoad %8 %479 @@ -1218,19 +1218,19 @@ OpLine %3 181 13 %481 = OpAccessChain %434 %466 %351 %482 = OpLoad %8 %481 %483 = OpIMul %8 %480 %482 -%484 = OpConvertUToF %5 %483 -%485 = OpFMul %5 %478 %484 -%486 = OpFAdd %5 %476 %485 +%484 = OpConvertUToF %4 %483 +%485 = OpFMul %4 %478 %484 +%486 = OpFAdd %4 %476 %485 %487 = OpConvertFToU %8 %486 OpLine %3 181 13 %488 = OpAccessChain %434 %466 %352 %489 = OpLoad %8 %488 %490 = OpIAdd %8 %487 %489 OpLine %3 182 32 -%491 = OpConvertUToF %5 %490 +%491 = OpConvertUToF %4 %490 OpLine %3 182 22 -%492 = OpFDiv %5 %491 %467 -%493 = OpExtInst %5 %1 Floor %492 +%492 = OpFDiv %4 %491 %467 +%493 = OpExtInst %4 %1 Floor %492 %494 = OpConvertFToU %8 %493 OpLine %3 183 22 %495 = OpUMod %8 %490 %349 @@ -1252,43 +1252,43 @@ OpSelectionMerge %504 None OpSwitch %495 %511 0 %505 1 %506 2 %507 3 %508 4 %509 5 %510 %505 = OpLabel OpLine %3 191 37 -%512 = OpCompositeExtract %4 %503 0 -%513 = OpCompositeExtract %5 %512 0 +%512 = OpCompositeExtract %5 %503 0 +%513 = OpCompositeExtract %4 %512 0 OpLine %3 191 20 OpStore %468 %513 OpBranch %504 %506 = OpLabel OpLine %3 192 37 -%514 = OpCompositeExtract %4 %503 0 -%515 = OpCompositeExtract %5 %514 1 +%514 = OpCompositeExtract %5 %503 0 +%515 = OpCompositeExtract %4 %514 1 OpLine %3 192 20 OpStore %468 %515 OpBranch %504 %507 = OpLabel OpLine %3 193 37 -%516 = OpCompositeExtract %4 %503 0 -%517 = OpCompositeExtract %5 %516 2 +%516 = OpCompositeExtract %5 %503 0 +%517 = OpCompositeExtract %4 %516 2 OpLine %3 193 20 OpStore %468 %517 OpBranch %504 %508 = OpLabel OpLine %3 194 37 -%518 = OpCompositeExtract %4 %503 1 -%519 = OpCompositeExtract %5 %518 0 +%518 = OpCompositeExtract %5 %503 1 +%519 = OpCompositeExtract %4 %518 0 OpLine %3 194 20 OpStore %468 %519 OpBranch %504 %509 = OpLabel OpLine %3 195 37 -%520 = OpCompositeExtract %4 %503 1 -%521 = OpCompositeExtract %5 %520 1 +%520 = OpCompositeExtract %5 %503 1 +%521 = OpCompositeExtract %4 %520 1 OpLine %3 195 20 OpStore %468 %521 OpBranch %504 %510 = OpLabel OpLine %3 196 37 -%522 = OpCompositeExtract %4 %503 1 -%523 = OpCompositeExtract %5 %522 2 +%522 = OpCompositeExtract %5 %503 1 +%523 = OpCompositeExtract %4 %522 2 OpLine %3 196 20 OpStore %468 %523 OpBranch %504 @@ -1337,7 +1337,7 @@ OpLine %3 213 13 OpLine %3 213 5 OpStore %469 %540 OpLine %3 222 27 -%541 = OpLoad %5 %468 +%541 = OpLoad %4 %468 %542 = OpBitcast %8 %541 OpLine %3 223 12 %543 = OpLoad %8 %469 @@ -1350,8 +1350,8 @@ OpReturn OpFunctionEnd %558 = OpFunction %2 None %346 %547 = OpLabel -%551 = OpLoad %4 %549 -%553 = OpLoad %4 %552 +%551 = OpLoad %5 %549 +%553 = OpLoad %5 %552 %548 = OpCompositeConstruct %14 %551 %553 %560 = OpAccessChain %559 %39 %135 OpBranch %561 @@ -1359,20 +1359,20 @@ OpBranch %561 OpLine %3 254 25 %563 = OpAccessChain %562 %560 %126 %564 = OpLoad %23 %563 -%565 = OpCompositeExtract %4 %548 0 +%565 = OpCompositeExtract %5 %548 0 OpLine %3 254 25 %566 = OpCompositeConstruct %7 %565 %56 %567 = OpMatrixTimesVector %7 %564 %566 OpLine %3 255 18 -%568 = OpCompositeExtract %4 %548 1 +%568 = OpCompositeExtract %5 %548 1 OpLine %3 256 12 -%569 = OpCompositeExtract %4 %548 0 +%569 = OpCompositeExtract %5 %548 0 %570 = OpCompositeConstruct %26 %567 %568 %569 %571 = OpCompositeExtract %7 %570 0 OpStore %554 %571 -%572 = OpCompositeExtract %4 %570 1 +%572 = OpCompositeExtract %5 %570 1 OpStore %555 %572 -%573 = OpCompositeExtract %4 %570 2 +%573 = OpCompositeExtract %5 %570 2 OpStore %557 %573 OpReturn OpFunctionEnd @@ -1380,8 +1380,8 @@ OpFunctionEnd %574 = OpLabel %592 = OpVariable %95 Function %593 %577 = OpLoad %7 %576 -%579 = OpLoad %4 %578 -%581 = OpLoad %4 %580 +%579 = OpLoad %5 %578 +%581 = OpLoad %5 %580 %575 = OpCompositeConstruct %26 %577 %579 %581 %584 = OpAccessChain %559 %39 %135 %586 = OpAccessChain %585 %42 %135 @@ -1389,70 +1389,70 @@ OpBranch %594 %594 = OpLabel OpLine %3 278 28 OpLine %3 278 17 -%595 = OpCompositeExtract %4 %575 2 -%596 = OpExtInst %4 %1 Fract %595 -%597 = OpExtInst %4 %1 SmoothStep %80 %587 %596 +%595 = OpCompositeExtract %5 %575 2 +%596 = OpExtInst %5 %1 Fract %595 +%597 = OpExtInst %5 %1 SmoothStep %80 %587 %596 OpLine %3 278 5 OpStore %592 %597 OpLine %3 279 17 OpLine %3 279 13 %598 = OpAccessChain %125 %592 %135 -%599 = OpLoad %5 %598 +%599 = OpLoad %4 %598 %600 = OpAccessChain %125 %592 %126 -%601 = OpLoad %5 %600 -%602 = OpFMul %5 %599 %601 +%601 = OpLoad %4 %600 +%602 = OpFMul %4 %599 %601 %603 = OpAccessChain %125 %592 %350 -%604 = OpLoad %5 %603 -%605 = OpFMul %5 %602 %604 -%606 = OpCompositeConstruct %4 %605 %605 %605 -%607 = OpExtInst %4 %1 FMix %589 %591 %606 +%604 = OpLoad %4 %603 +%605 = OpFMul %4 %602 %604 +%606 = OpCompositeConstruct %5 %605 %605 %605 +%607 = OpExtInst %5 %1 FMix %589 %591 %606 OpLine %3 279 5 OpStore %592 %607 OpLine %3 282 25 %609 = OpAccessChain %608 %586 %126 -%610 = OpLoad %4 %609 -%611 = OpVectorTimesScalar %4 %610 %272 +%610 = OpLoad %5 %609 +%611 = OpVectorTimesScalar %5 %610 %272 OpLine %3 284 21 %612 = OpAccessChain %608 %586 %135 -%613 = OpLoad %4 %612 -%614 = OpCompositeExtract %4 %575 2 -%615 = OpFSub %4 %613 %614 -%616 = OpExtInst %4 %1 Normalize %615 +%613 = OpLoad %5 %612 +%614 = OpCompositeExtract %5 %575 2 +%615 = OpFSub %5 %613 %614 +%616 = OpExtInst %5 %1 Normalize %615 OpLine %3 285 20 %618 = OpAccessChain %617 %584 %135 %619 = OpLoad %7 %618 -%620 = OpVectorShuffle %4 %619 %619 0 1 2 -%621 = OpCompositeExtract %4 %575 2 -%622 = OpFSub %4 %620 %621 -%623 = OpExtInst %4 %1 Normalize %622 +%620 = OpVectorShuffle %5 %619 %619 0 1 2 +%621 = OpCompositeExtract %5 %575 2 +%622 = OpFSub %5 %620 %621 +%623 = OpExtInst %5 %1 Normalize %622 OpLine %3 286 20 -%624 = OpFAdd %4 %623 %616 -%625 = OpExtInst %4 %1 Normalize %624 +%624 = OpFAdd %5 %623 %616 +%625 = OpExtInst %5 %1 Normalize %624 OpLine %3 288 32 -%626 = OpCompositeExtract %4 %575 1 -%627 = OpDot %5 %626 %616 +%626 = OpCompositeExtract %5 %575 1 +%627 = OpDot %4 %626 %616 OpLine %3 288 28 -%628 = OpExtInst %5 %1 FMax %627 %74 +%628 = OpExtInst %4 %1 FMax %627 %74 OpLine %3 289 25 %629 = OpAccessChain %608 %586 %126 -%630 = OpLoad %4 %629 -%631 = OpVectorTimesScalar %4 %630 %628 +%630 = OpLoad %5 %629 +%631 = OpVectorTimesScalar %5 %630 %628 OpLine %3 291 37 -%632 = OpCompositeExtract %4 %575 1 -%633 = OpDot %5 %632 %625 +%632 = OpCompositeExtract %5 %575 1 +%633 = OpDot %4 %632 %625 OpLine %3 291 33 -%634 = OpExtInst %5 %1 FMax %633 %74 +%634 = OpExtInst %4 %1 FMax %633 %74 OpLine %3 291 29 -%635 = OpExtInst %5 %1 Pow %634 %323 +%635 = OpExtInst %4 %1 Pow %634 %323 OpLine %3 292 26 %636 = OpAccessChain %608 %586 %126 -%637 = OpLoad %4 %636 -%638 = OpVectorTimesScalar %4 %637 %635 +%637 = OpLoad %5 %636 +%638 = OpVectorTimesScalar %5 %637 %635 OpLine %3 294 18 -%639 = OpFAdd %4 %611 %631 -%640 = OpFAdd %4 %639 %638 -%641 = OpLoad %4 %592 -%642 = OpFMul %4 %640 %641 +%639 = OpFAdd %5 %611 %631 +%640 = OpFAdd %5 %639 %638 +%641 = OpLoad %5 %592 +%642 = OpFMul %5 %640 %641 OpLine %3 296 12 %643 = OpCompositeConstruct %7 %642 %56 OpStore %582 %643 diff --git a/naga/tests/out/spv/fragment-output.spvasm b/naga/tests/out/spv/fragment-output.spvasm index c61ffb8258..414c646c73 100644 --- a/naga/tests/out/spv/fragment-output.spvasm +++ b/naga/tests/out/spv/fragment-output.spvasm @@ -34,25 +34,25 @@ OpDecorate %76 Location 3 OpDecorate %78 Location 4 OpDecorate %80 Location 5 %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 -%6 = OpTypeInt 32 1 -%5 = OpTypeVector %6 4 -%8 = OpTypeInt 32 0 -%7 = OpTypeVector %8 4 -%9 = OpTypeVector %4 3 -%10 = OpTypeVector %6 3 -%11 = OpTypeVector %8 3 -%12 = OpTypeStruct %3 %5 %7 %9 %10 %11 -%13 = OpTypeVector %4 2 -%14 = OpTypeVector %6 2 -%15 = OpTypeVector %8 2 -%16 = OpTypeStruct %13 %14 %15 %4 %6 %8 -%19 = OpTypePointer Output %3 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 +%5 = OpTypeInt 32 1 +%6 = OpTypeVector %5 4 +%7 = OpTypeInt 32 0 +%8 = OpTypeVector %7 4 +%9 = OpTypeVector %3 3 +%10 = OpTypeVector %5 3 +%11 = OpTypeVector %7 3 +%12 = OpTypeStruct %4 %6 %8 %9 %10 %11 +%13 = OpTypeVector %3 2 +%14 = OpTypeVector %5 2 +%15 = OpTypeVector %7 2 +%16 = OpTypeStruct %13 %14 %15 %3 %5 %7 +%19 = OpTypePointer Output %4 %18 = OpVariable %19 Output -%21 = OpTypePointer Output %5 +%21 = OpTypePointer Output %6 %20 = OpVariable %21 Output -%23 = OpTypePointer Output %7 +%23 = OpTypePointer Output %8 %22 = OpVariable %23 Output %25 = OpTypePointer Output %9 %24 = OpVariable %25 Output @@ -61,39 +61,39 @@ OpDecorate %80 Location 5 %29 = OpTypePointer Output %11 %28 = OpVariable %29 Output %31 = OpTypeFunction %2 -%32 = OpConstant %4 0.0 -%33 = OpConstantComposite %3 %32 %32 %32 %32 -%34 = OpConstant %6 0 -%35 = OpConstantComposite %5 %34 %34 %34 %34 -%36 = OpConstant %8 0 -%37 = OpConstantComposite %7 %36 %36 %36 %36 +%32 = OpConstant %3 0.0 +%33 = OpConstantComposite %4 %32 %32 %32 %32 +%34 = OpConstant %5 0 +%35 = OpConstantComposite %6 %34 %34 %34 %34 +%36 = OpConstant %7 0 +%37 = OpConstantComposite %8 %36 %36 %36 %36 %38 = OpConstantComposite %9 %32 %32 %32 %39 = OpConstantComposite %10 %34 %34 %34 %40 = OpConstantComposite %11 %36 %36 %36 %42 = OpTypePointer Function %12 %43 = OpConstantNull %12 -%45 = OpTypePointer Function %3 -%47 = OpTypePointer Function %5 -%48 = OpConstant %8 1 -%50 = OpTypePointer Function %7 -%51 = OpConstant %8 2 +%45 = OpTypePointer Function %4 +%47 = OpTypePointer Function %6 +%48 = OpConstant %7 1 +%50 = OpTypePointer Function %8 +%51 = OpConstant %7 2 %53 = OpTypePointer Function %9 -%54 = OpConstant %8 3 +%54 = OpConstant %7 3 %56 = OpTypePointer Function %10 -%57 = OpConstant %8 4 +%57 = OpConstant %7 4 %59 = OpTypePointer Function %11 -%60 = OpConstant %8 5 +%60 = OpConstant %7 5 %71 = OpTypePointer Output %13 %70 = OpVariable %71 Output %73 = OpTypePointer Output %14 %72 = OpVariable %73 Output %75 = OpTypePointer Output %15 %74 = OpVariable %75 Output -%77 = OpTypePointer Output %4 +%77 = OpTypePointer Output %3 %76 = OpVariable %77 Output -%79 = OpTypePointer Output %6 +%79 = OpTypePointer Output %5 %78 = OpVariable %79 Output -%81 = OpTypePointer Output %8 +%81 = OpTypePointer Output %7 %80 = OpVariable %81 Output %83 = OpConstantComposite %13 %32 %32 %84 = OpConstantComposite %14 %34 %34 @@ -103,9 +103,9 @@ OpDecorate %80 Location 5 %90 = OpTypePointer Function %13 %92 = OpTypePointer Function %14 %94 = OpTypePointer Function %15 -%96 = OpTypePointer Function %4 -%98 = OpTypePointer Function %6 -%100 = OpTypePointer Function %8 +%96 = OpTypePointer Function %3 +%98 = OpTypePointer Function %5 +%100 = OpTypePointer Function %7 %30 = OpFunction %2 None %31 %17 = OpLabel %41 = OpVariable %42 Function %43 @@ -124,11 +124,11 @@ OpStore %58 %39 %61 = OpAccessChain %59 %41 %60 OpStore %61 %40 %62 = OpLoad %12 %41 -%63 = OpCompositeExtract %3 %62 0 +%63 = OpCompositeExtract %4 %62 0 OpStore %18 %63 -%64 = OpCompositeExtract %5 %62 1 +%64 = OpCompositeExtract %6 %62 1 OpStore %20 %64 -%65 = OpCompositeExtract %7 %62 2 +%65 = OpCompositeExtract %8 %62 2 OpStore %22 %65 %66 = OpCompositeExtract %9 %62 3 OpStore %24 %66 @@ -162,11 +162,11 @@ OpStore %70 %103 OpStore %72 %104 %105 = OpCompositeExtract %15 %102 2 OpStore %74 %105 -%106 = OpCompositeExtract %4 %102 3 +%106 = OpCompositeExtract %3 %102 3 OpStore %76 %106 -%107 = OpCompositeExtract %6 %102 4 +%107 = OpCompositeExtract %5 %102 4 OpStore %78 %107 -%108 = OpCompositeExtract %8 %102 5 +%108 = OpCompositeExtract %7 %102 5 OpStore %80 %108 OpReturn OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/interface.compute.spvasm b/naga/tests/out/spv/interface.compute.spvasm index 73f6ecb2c2..912d28d5b0 100644 --- a/naga/tests/out/spv/interface.compute.spvasm +++ b/naga/tests/out/spv/interface.compute.spvasm @@ -21,11 +21,11 @@ OpDecorate %22 BuiltIn LocalInvocationIndex OpDecorate %25 BuiltIn WorkgroupId OpDecorate %27 BuiltIn NumWorkgroups %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 -%5 = OpTypeStruct %3 %4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 +%5 = OpTypeStruct %4 %3 %6 = OpTypeInt 32 0 -%7 = OpTypeStruct %4 %6 %4 +%7 = OpTypeStruct %3 %6 %3 %8 = OpTypeBool %10 = OpConstant %6 1 %9 = OpTypeArray %6 %10 diff --git a/naga/tests/out/spv/interface.fragment.spvasm b/naga/tests/out/spv/interface.fragment.spvasm index bb42c678ec..891b10d863 100644 --- a/naga/tests/out/spv/interface.fragment.spvasm +++ b/naga/tests/out/spv/interface.fragment.spvasm @@ -30,38 +30,38 @@ OpDecorate %30 BuiltIn FragDepth OpDecorate %32 BuiltIn SampleMask OpDecorate %34 Location 0 %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 -%5 = OpTypeStruct %3 %4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 +%5 = OpTypeStruct %4 %3 %6 = OpTypeInt 32 0 -%7 = OpTypeStruct %4 %6 %4 +%7 = OpTypeStruct %3 %6 %3 %8 = OpTypeBool %10 = OpConstant %6 1 %9 = OpTypeArray %6 %10 %11 = OpTypeVector %6 3 %12 = OpTypeStruct %6 %13 = OpTypeStruct %6 -%17 = OpTypePointer Input %3 +%17 = OpTypePointer Input %4 %16 = OpVariable %17 Input -%20 = OpTypePointer Input %4 +%20 = OpTypePointer Input %3 %19 = OpVariable %20 Input %23 = OpTypePointer Input %8 %22 = OpVariable %23 Input %26 = OpTypePointer Input %6 %25 = OpVariable %26 Input %28 = OpVariable %26 Input -%31 = OpTypePointer Output %4 +%31 = OpTypePointer Output %3 %30 = OpVariable %31 Output %33 = OpTypePointer Output %6 %32 = OpVariable %33 Output %34 = OpVariable %31 Output %36 = OpTypeFunction %2 -%37 = OpConstant %4 0.0 -%38 = OpConstant %4 1.0 +%37 = OpConstant %3 0.0 +%38 = OpConstant %3 1.0 %35 = OpFunction %2 None %36 %14 = OpLabel -%18 = OpLoad %3 %16 -%21 = OpLoad %4 %19 +%18 = OpLoad %4 %16 +%21 = OpLoad %3 %19 %15 = OpCompositeConstruct %5 %18 %21 %24 = OpLoad %8 %22 %27 = OpLoad %6 %25 @@ -70,17 +70,17 @@ OpBranch %39 %39 = OpLabel %40 = OpShiftLeftLogical %6 %10 %27 %41 = OpBitwiseAnd %6 %29 %40 -%42 = OpSelect %4 %24 %38 %37 -%43 = OpCompositeExtract %4 %15 1 +%42 = OpSelect %3 %24 %38 %37 +%43 = OpCompositeExtract %3 %15 1 %44 = OpCompositeConstruct %7 %43 %41 %42 -%45 = OpCompositeExtract %4 %44 0 +%45 = OpCompositeExtract %3 %44 0 OpStore %30 %45 -%46 = OpLoad %4 %30 -%47 = OpExtInst %4 %1 FClamp %46 %37 %38 +%46 = OpLoad %3 %30 +%47 = OpExtInst %3 %1 FClamp %46 %37 %38 OpStore %30 %47 %48 = OpCompositeExtract %6 %44 1 OpStore %32 %48 -%49 = OpCompositeExtract %4 %44 2 +%49 = OpCompositeExtract %3 %44 2 OpStore %34 %49 OpReturn OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/interface.vertex.spvasm b/naga/tests/out/spv/interface.vertex.spvasm index eaa947f5b3..93cd5adedd 100644 --- a/naga/tests/out/spv/interface.vertex.spvasm +++ b/naga/tests/out/spv/interface.vertex.spvasm @@ -22,11 +22,11 @@ OpDecorate %22 BuiltIn Position OpDecorate %24 Location 1 OpDecorate %26 BuiltIn PointSize %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 -%5 = OpTypeStruct %3 %4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 +%5 = OpTypeStruct %4 %3 %6 = OpTypeInt 32 0 -%7 = OpTypeStruct %4 %6 %4 +%7 = OpTypeStruct %3 %6 %3 %8 = OpTypeBool %10 = OpConstant %6 1 %9 = OpTypeArray %6 %10 @@ -37,15 +37,15 @@ OpDecorate %26 BuiltIn PointSize %15 = OpVariable %16 Input %18 = OpVariable %16 Input %20 = OpVariable %16 Input -%23 = OpTypePointer Output %3 +%23 = OpTypePointer Output %4 %22 = OpVariable %23 Output -%25 = OpTypePointer Output %4 +%25 = OpTypePointer Output %3 %24 = OpVariable %25 Output -%27 = OpTypePointer Output %4 +%27 = OpTypePointer Output %3 %26 = OpVariable %27 Output -%28 = OpConstant %4 1.0 +%28 = OpConstant %3 1.0 %30 = OpTypeFunction %2 -%31 = OpConstantComposite %3 %28 %28 %28 %28 +%31 = OpConstantComposite %4 %28 %28 %28 %28 %29 = OpFunction %2 None %30 %14 = OpLabel %17 = OpLoad %6 %15 @@ -56,11 +56,11 @@ OpBranch %32 %32 = OpLabel %33 = OpIAdd %6 %17 %19 %34 = OpIAdd %6 %33 %21 -%35 = OpConvertUToF %4 %34 +%35 = OpConvertUToF %3 %34 %36 = OpCompositeConstruct %5 %31 %35 -%37 = OpCompositeExtract %3 %36 0 +%37 = OpCompositeExtract %4 %36 0 OpStore %22 %37 -%38 = OpCompositeExtract %4 %36 1 +%38 = OpCompositeExtract %3 %36 1 OpStore %24 %38 OpReturn OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/interface.vertex_two_structs.spvasm b/naga/tests/out/spv/interface.vertex_two_structs.spvasm index bcc4aab4e5..cad89cc551 100644 --- a/naga/tests/out/spv/interface.vertex_two_structs.spvasm +++ b/naga/tests/out/spv/interface.vertex_two_structs.spvasm @@ -20,11 +20,11 @@ OpDecorate %22 Invariant OpDecorate %22 BuiltIn Position OpDecorate %24 BuiltIn PointSize %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 -%5 = OpTypeStruct %3 %4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 +%5 = OpTypeStruct %4 %3 %6 = OpTypeInt 32 0 -%7 = OpTypeStruct %4 %6 %4 +%7 = OpTypeStruct %3 %6 %3 %8 = OpTypeBool %10 = OpConstant %6 1 %9 = OpTypeArray %6 %10 @@ -34,14 +34,14 @@ OpDecorate %24 BuiltIn PointSize %17 = OpTypePointer Input %6 %16 = OpVariable %17 Input %20 = OpVariable %17 Input -%23 = OpTypePointer Output %3 +%23 = OpTypePointer Output %4 %22 = OpVariable %23 Output -%25 = OpTypePointer Output %4 +%25 = OpTypePointer Output %3 %24 = OpVariable %25 Output -%26 = OpConstant %4 1.0 +%26 = OpConstant %3 1.0 %28 = OpTypeFunction %2 %29 = OpConstant %6 2 -%30 = OpConstant %4 0.0 +%30 = OpConstant %3 0.0 %32 = OpTypePointer Function %6 %27 = OpFunction %2 None %28 %14 = OpLabel @@ -54,12 +54,12 @@ OpStore %24 %26 OpBranch %33 %33 = OpLabel %34 = OpCompositeExtract %6 %15 0 -%35 = OpConvertUToF %4 %34 +%35 = OpConvertUToF %3 %34 %36 = OpCompositeExtract %6 %19 0 -%37 = OpConvertUToF %4 %36 +%37 = OpConvertUToF %3 %36 %38 = OpLoad %6 %31 -%39 = OpConvertUToF %4 %38 -%40 = OpCompositeConstruct %3 %35 %37 %39 %30 +%39 = OpConvertUToF %3 %38 +%40 = OpCompositeConstruct %4 %35 %37 %39 %30 OpStore %22 %40 OpReturn OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/interpolate.spvasm b/naga/tests/out/spv/interpolate.spvasm index d2a67a9fd2..f40c257408 100644 --- a/naga/tests/out/spv/interpolate.spvasm +++ b/naga/tests/out/spv/interpolate.spvasm @@ -1,213 +1,279 @@ ; SPIR-V ; Version: 1.0 ; Generator: rspirv -; Bound: 111 +; Bound: 139 OpCapability Shader OpCapability SampleRateShading %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 -OpEntryPoint Vertex %26 "vert_main" %10 %12 %14 %16 %18 %20 %21 %22 %23 -OpEntryPoint Fragment %109 "frag_main" %88 %91 %94 %97 %100 %103 %105 %107 -OpExecutionMode %109 OriginUpperLeft +OpEntryPoint Vertex %30 "vert_main" %10 %12 %14 %15 %16 %18 %20 %22 %23 %24 %25 %26 %27 +OpEntryPoint Fragment %137 "frag_main" %108 %111 %114 %116 %118 %121 %124 %127 %129 %131 %133 %135 +OpExecutionMode %137 OriginUpperLeft OpMemberName %8 0 "position" OpMemberName %8 1 "_flat" -OpMemberName %8 2 "_linear" -OpMemberName %8 3 "linear_centroid" -OpMemberName %8 4 "linear_sample" -OpMemberName %8 5 "perspective" -OpMemberName %8 6 "perspective_centroid" -OpMemberName %8 7 "perspective_sample" +OpMemberName %8 2 "flat_first" +OpMemberName %8 3 "flat_either" +OpMemberName %8 4 "_linear" +OpMemberName %8 5 "linear_centroid" +OpMemberName %8 6 "linear_sample" +OpMemberName %8 7 "linear_center" +OpMemberName %8 8 "perspective" +OpMemberName %8 9 "perspective_centroid" +OpMemberName %8 10 "perspective_sample" +OpMemberName %8 11 "perspective_center" OpName %8 "FragmentInput" OpName %10 "position" OpName %12 "_flat" -OpName %14 "_linear" -OpName %16 "linear_centroid" -OpName %18 "linear_sample" -OpName %20 "perspective" -OpName %21 "perspective_centroid" -OpName %22 "perspective_sample" -OpName %26 "vert_main" -OpName %49 "out" -OpName %88 "position" -OpName %91 "_flat" -OpName %94 "_linear" -OpName %97 "linear_centroid" -OpName %100 "linear_sample" -OpName %103 "perspective" -OpName %105 "perspective_centroid" -OpName %107 "perspective_sample" -OpName %109 "frag_main" +OpName %14 "flat_first" +OpName %15 "flat_either" +OpName %16 "_linear" +OpName %18 "linear_centroid" +OpName %20 "linear_sample" +OpName %22 "linear_center" +OpName %23 "perspective" +OpName %24 "perspective_centroid" +OpName %25 "perspective_sample" +OpName %26 "perspective_center" +OpName %30 "vert_main" +OpName %60 "out" +OpName %108 "position" +OpName %111 "_flat" +OpName %114 "flat_first" +OpName %116 "flat_either" +OpName %118 "_linear" +OpName %121 "linear_centroid" +OpName %124 "linear_sample" +OpName %127 "linear_center" +OpName %129 "perspective" +OpName %131 "perspective_centroid" +OpName %133 "perspective_sample" +OpName %135 "perspective_center" +OpName %137 "frag_main" OpMemberDecorate %8 0 Offset 0 OpMemberDecorate %8 1 Offset 16 OpMemberDecorate %8 2 Offset 20 OpMemberDecorate %8 3 Offset 24 -OpMemberDecorate %8 4 Offset 32 -OpMemberDecorate %8 5 Offset 48 -OpMemberDecorate %8 6 Offset 64 -OpMemberDecorate %8 7 Offset 68 +OpMemberDecorate %8 4 Offset 28 +OpMemberDecorate %8 5 Offset 32 +OpMemberDecorate %8 6 Offset 48 +OpMemberDecorate %8 7 Offset 64 +OpMemberDecorate %8 8 Offset 80 +OpMemberDecorate %8 9 Offset 96 +OpMemberDecorate %8 10 Offset 100 +OpMemberDecorate %8 11 Offset 104 OpDecorate %10 BuiltIn Position OpDecorate %12 Location 0 OpDecorate %12 Flat OpDecorate %14 Location 1 -OpDecorate %14 NoPerspective -OpDecorate %16 Location 2 +OpDecorate %14 Flat +OpDecorate %15 Location 2 +OpDecorate %15 Flat +OpDecorate %16 Location 3 OpDecorate %16 NoPerspective -OpDecorate %16 Centroid -OpDecorate %18 Location 3 +OpDecorate %18 Location 4 OpDecorate %18 NoPerspective -OpDecorate %18 Sample -OpDecorate %20 Location 4 -OpDecorate %21 Location 5 -OpDecorate %21 Centroid -OpDecorate %22 Location 6 -OpDecorate %22 Sample -OpDecorate %23 BuiltIn PointSize -OpDecorate %88 BuiltIn FragCoord -OpDecorate %91 Location 0 -OpDecorate %91 Flat -OpDecorate %94 Location 1 -OpDecorate %94 NoPerspective -OpDecorate %97 Location 2 -OpDecorate %97 NoPerspective -OpDecorate %97 Centroid -OpDecorate %100 Location 3 -OpDecorate %100 NoPerspective -OpDecorate %100 Sample -OpDecorate %103 Location 4 -OpDecorate %105 Location 5 -OpDecorate %105 Centroid -OpDecorate %107 Location 6 -OpDecorate %107 Sample +OpDecorate %18 Centroid +OpDecorate %20 Location 6 +OpDecorate %20 NoPerspective +OpDecorate %20 Sample +OpDecorate %22 Location 7 +OpDecorate %22 NoPerspective +OpDecorate %23 Location 8 +OpDecorate %24 Location 9 +OpDecorate %24 Centroid +OpDecorate %25 Location 10 +OpDecorate %25 Sample +OpDecorate %26 Location 11 +OpDecorate %27 BuiltIn PointSize +OpDecorate %108 BuiltIn FragCoord +OpDecorate %111 Location 0 +OpDecorate %111 Flat +OpDecorate %114 Location 1 +OpDecorate %114 Flat +OpDecorate %116 Location 2 +OpDecorate %116 Flat +OpDecorate %118 Location 3 +OpDecorate %118 NoPerspective +OpDecorate %121 Location 4 +OpDecorate %121 NoPerspective +OpDecorate %121 Centroid +OpDecorate %124 Location 6 +OpDecorate %124 NoPerspective +OpDecorate %124 Sample +OpDecorate %127 Location 7 +OpDecorate %127 NoPerspective +OpDecorate %129 Location 8 +OpDecorate %131 Location 9 +OpDecorate %131 Centroid +OpDecorate %133 Location 10 +OpDecorate %133 Sample +OpDecorate %135 Location 11 %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 %5 = OpTypeInt 32 0 -%6 = OpTypeVector %4 2 -%7 = OpTypeVector %4 3 -%8 = OpTypeStruct %3 %5 %4 %6 %7 %3 %4 %4 -%11 = OpTypePointer Output %3 +%6 = OpTypeVector %3 2 +%7 = OpTypeVector %3 3 +%8 = OpTypeStruct %4 %5 %5 %5 %3 %6 %7 %7 %4 %3 %3 %3 +%11 = OpTypePointer Output %4 %10 = OpVariable %11 Output %13 = OpTypePointer Output %5 %12 = OpVariable %13 Output -%15 = OpTypePointer Output %4 -%14 = OpVariable %15 Output -%17 = OpTypePointer Output %6 +%14 = OpVariable %13 Output +%15 = OpVariable %13 Output +%17 = OpTypePointer Output %3 %16 = OpVariable %17 Output -%19 = OpTypePointer Output %7 +%19 = OpTypePointer Output %6 %18 = OpVariable %19 Output -%20 = OpVariable %11 Output -%21 = OpVariable %15 Output -%22 = OpVariable %15 Output -%24 = OpTypePointer Output %4 -%23 = OpVariable %24 Output -%25 = OpConstant %4 1.0 -%27 = OpTypeFunction %2 -%28 = OpConstant %4 2.0 -%29 = OpConstant %4 4.0 -%30 = OpConstant %4 5.0 -%31 = OpConstant %4 6.0 -%32 = OpConstantComposite %3 %28 %29 %30 %31 -%33 = OpConstant %5 8 -%34 = OpConstant %4 27.0 -%35 = OpConstant %4 64.0 -%36 = OpConstant %4 125.0 -%37 = OpConstantComposite %6 %35 %36 -%38 = OpConstant %4 216.0 -%39 = OpConstant %4 343.0 -%40 = OpConstant %4 512.0 -%41 = OpConstantComposite %7 %38 %39 %40 -%42 = OpConstant %4 729.0 -%43 = OpConstant %4 1000.0 -%44 = OpConstant %4 1331.0 -%45 = OpConstant %4 1728.0 -%46 = OpConstantComposite %3 %42 %43 %44 %45 -%47 = OpConstant %4 2197.0 -%48 = OpConstant %4 2744.0 -%50 = OpTypePointer Function %8 -%51 = OpConstantNull %8 -%53 = OpTypePointer Function %3 -%54 = OpConstant %5 0 -%56 = OpTypePointer Function %5 -%57 = OpConstant %5 1 -%59 = OpTypePointer Function %4 -%60 = OpConstant %5 2 -%62 = OpTypePointer Function %6 -%63 = OpConstant %5 3 -%65 = OpTypePointer Function %7 -%66 = OpConstant %5 4 -%68 = OpConstant %5 5 -%70 = OpConstant %5 6 -%72 = OpConstant %5 7 -%89 = OpTypePointer Input %3 -%88 = OpVariable %89 Input -%92 = OpTypePointer Input %5 -%91 = OpVariable %92 Input -%95 = OpTypePointer Input %4 -%94 = OpVariable %95 Input -%98 = OpTypePointer Input %6 -%97 = OpVariable %98 Input -%101 = OpTypePointer Input %7 -%100 = OpVariable %101 Input -%103 = OpVariable %89 Input -%105 = OpVariable %95 Input -%107 = OpVariable %95 Input -%26 = OpFunction %2 None %27 +%21 = OpTypePointer Output %7 +%20 = OpVariable %21 Output +%22 = OpVariable %21 Output +%23 = OpVariable %11 Output +%24 = OpVariable %17 Output +%25 = OpVariable %17 Output +%26 = OpVariable %17 Output +%28 = OpTypePointer Output %3 +%27 = OpVariable %28 Output +%29 = OpConstant %3 1.0 +%31 = OpTypeFunction %2 +%32 = OpConstant %3 2.0 +%33 = OpConstant %3 4.0 +%34 = OpConstant %3 5.0 +%35 = OpConstant %3 6.0 +%36 = OpConstantComposite %4 %32 %33 %34 %35 +%37 = OpConstant %5 8 +%38 = OpConstant %5 9 +%39 = OpConstant %5 10 +%40 = OpConstant %3 27.0 +%41 = OpConstant %3 64.0 +%42 = OpConstant %3 125.0 +%43 = OpConstantComposite %6 %41 %42 +%44 = OpConstant %3 216.0 +%45 = OpConstant %3 343.0 +%46 = OpConstant %3 512.0 +%47 = OpConstantComposite %7 %44 %45 %46 +%48 = OpConstant %3 255.0 +%49 = OpConstant %3 511.0 +%50 = OpConstant %3 1024.0 +%51 = OpConstantComposite %7 %48 %49 %50 +%52 = OpConstant %3 729.0 +%53 = OpConstant %3 1000.0 +%54 = OpConstant %3 1331.0 +%55 = OpConstant %3 1728.0 +%56 = OpConstantComposite %4 %52 %53 %54 %55 +%57 = OpConstant %3 2197.0 +%58 = OpConstant %3 2744.0 +%59 = OpConstant %3 2812.0 +%61 = OpTypePointer Function %8 +%62 = OpConstantNull %8 +%64 = OpTypePointer Function %4 +%65 = OpConstant %5 0 +%67 = OpTypePointer Function %5 +%68 = OpConstant %5 1 +%70 = OpConstant %5 2 +%72 = OpConstant %5 3 +%74 = OpTypePointer Function %3 +%75 = OpConstant %5 4 +%77 = OpTypePointer Function %6 +%78 = OpConstant %5 5 +%80 = OpTypePointer Function %7 +%81 = OpConstant %5 6 +%83 = OpConstant %5 7 +%88 = OpConstant %5 11 +%109 = OpTypePointer Input %4 +%108 = OpVariable %109 Input +%112 = OpTypePointer Input %5 +%111 = OpVariable %112 Input +%114 = OpVariable %112 Input +%116 = OpVariable %112 Input +%119 = OpTypePointer Input %3 +%118 = OpVariable %119 Input +%122 = OpTypePointer Input %6 +%121 = OpVariable %122 Input +%125 = OpTypePointer Input %7 +%124 = OpVariable %125 Input +%127 = OpVariable %125 Input +%129 = OpVariable %109 Input +%131 = OpVariable %119 Input +%133 = OpVariable %119 Input +%135 = OpVariable %119 Input +%30 = OpFunction %2 None %31 %9 = OpLabel -%49 = OpVariable %50 Function %51 -OpStore %23 %25 -OpBranch %52 -%52 = OpLabel -%55 = OpAccessChain %53 %49 %54 -OpStore %55 %32 -%58 = OpAccessChain %56 %49 %57 -OpStore %58 %33 -%61 = OpAccessChain %59 %49 %60 -OpStore %61 %34 -%64 = OpAccessChain %62 %49 %63 -OpStore %64 %37 -%67 = OpAccessChain %65 %49 %66 -OpStore %67 %41 -%69 = OpAccessChain %53 %49 %68 -OpStore %69 %46 -%71 = OpAccessChain %59 %49 %70 -OpStore %71 %47 -%73 = OpAccessChain %59 %49 %72 -OpStore %73 %48 -%74 = OpLoad %8 %49 -%75 = OpCompositeExtract %3 %74 0 -OpStore %10 %75 -%76 = OpAccessChain %24 %10 %57 -%77 = OpLoad %4 %76 -%78 = OpFNegate %4 %77 -OpStore %76 %78 -%79 = OpCompositeExtract %5 %74 1 -OpStore %12 %79 -%80 = OpCompositeExtract %4 %74 2 -OpStore %14 %80 -%81 = OpCompositeExtract %6 %74 3 -OpStore %16 %81 -%82 = OpCompositeExtract %7 %74 4 -OpStore %18 %82 -%83 = OpCompositeExtract %3 %74 5 -OpStore %20 %83 -%84 = OpCompositeExtract %4 %74 6 -OpStore %21 %84 -%85 = OpCompositeExtract %4 %74 7 -OpStore %22 %85 +%60 = OpVariable %61 Function %62 +OpStore %27 %29 +OpBranch %63 +%63 = OpLabel +%66 = OpAccessChain %64 %60 %65 +OpStore %66 %36 +%69 = OpAccessChain %67 %60 %68 +OpStore %69 %37 +%71 = OpAccessChain %67 %60 %70 +OpStore %71 %38 +%73 = OpAccessChain %67 %60 %72 +OpStore %73 %39 +%76 = OpAccessChain %74 %60 %75 +OpStore %76 %40 +%79 = OpAccessChain %77 %60 %78 +OpStore %79 %43 +%82 = OpAccessChain %80 %60 %81 +OpStore %82 %47 +%84 = OpAccessChain %80 %60 %83 +OpStore %84 %51 +%85 = OpAccessChain %64 %60 %37 +OpStore %85 %56 +%86 = OpAccessChain %74 %60 %38 +OpStore %86 %57 +%87 = OpAccessChain %74 %60 %39 +OpStore %87 %58 +%89 = OpAccessChain %74 %60 %88 +OpStore %89 %59 +%90 = OpLoad %8 %60 +%91 = OpCompositeExtract %4 %90 0 +OpStore %10 %91 +%92 = OpAccessChain %28 %10 %68 +%93 = OpLoad %3 %92 +%94 = OpFNegate %3 %93 +OpStore %92 %94 +%95 = OpCompositeExtract %5 %90 1 +OpStore %12 %95 +%96 = OpCompositeExtract %5 %90 2 +OpStore %14 %96 +%97 = OpCompositeExtract %5 %90 3 +OpStore %15 %97 +%98 = OpCompositeExtract %3 %90 4 +OpStore %16 %98 +%99 = OpCompositeExtract %6 %90 5 +OpStore %18 %99 +%100 = OpCompositeExtract %7 %90 6 +OpStore %20 %100 +%101 = OpCompositeExtract %7 %90 7 +OpStore %22 %101 +%102 = OpCompositeExtract %4 %90 8 +OpStore %23 %102 +%103 = OpCompositeExtract %3 %90 9 +OpStore %24 %103 +%104 = OpCompositeExtract %3 %90 10 +OpStore %25 %104 +%105 = OpCompositeExtract %3 %90 11 +OpStore %26 %105 OpReturn OpFunctionEnd -%109 = OpFunction %2 None %27 -%86 = OpLabel -%90 = OpLoad %3 %88 -%93 = OpLoad %5 %91 -%96 = OpLoad %4 %94 -%99 = OpLoad %6 %97 -%102 = OpLoad %7 %100 -%104 = OpLoad %3 %103 -%106 = OpLoad %4 %105 -%108 = OpLoad %4 %107 -%87 = OpCompositeConstruct %8 %90 %93 %96 %99 %102 %104 %106 %108 -OpBranch %110 -%110 = OpLabel +%137 = OpFunction %2 None %31 +%106 = OpLabel +%110 = OpLoad %4 %108 +%113 = OpLoad %5 %111 +%115 = OpLoad %5 %114 +%117 = OpLoad %5 %116 +%120 = OpLoad %3 %118 +%123 = OpLoad %6 %121 +%126 = OpLoad %7 %124 +%128 = OpLoad %7 %127 +%130 = OpLoad %4 %129 +%132 = OpLoad %3 %131 +%134 = OpLoad %3 %133 +%136 = OpLoad %3 %135 +%107 = OpCompositeConstruct %8 %110 %113 %115 %117 %120 %123 %126 %128 %130 %132 %134 %136 +OpBranch %138 +%138 = OpLabel OpReturn OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/interpolate_compat.spvasm b/naga/tests/out/spv/interpolate_compat.spvasm new file mode 100644 index 0000000000..90f7237fd1 --- /dev/null +++ b/naga/tests/out/spv/interpolate_compat.spvasm @@ -0,0 +1,263 @@ +; SPIR-V +; Version: 1.0 +; Generator: rspirv +; Bound: 133 +OpCapability Shader +OpCapability SampleRateShading +%1 = OpExtInstImport "GLSL.std.450" +OpMemoryModel Logical GLSL450 +OpEntryPoint Vertex %29 "vert_main" %10 %12 %14 %15 %17 %19 %21 %22 %23 %24 %25 %26 +OpEntryPoint Fragment %131 "frag_main" %104 %107 %110 %112 %115 %118 %121 %123 %125 %127 %129 +OpExecutionMode %131 OriginUpperLeft +OpMemberName %8 0 "position" +OpMemberName %8 1 "_flat" +OpMemberName %8 2 "flat_either" +OpMemberName %8 3 "_linear" +OpMemberName %8 4 "linear_centroid" +OpMemberName %8 5 "linear_sample" +OpMemberName %8 6 "linear_center" +OpMemberName %8 7 "perspective" +OpMemberName %8 8 "perspective_centroid" +OpMemberName %8 9 "perspective_sample" +OpMemberName %8 10 "perspective_center" +OpName %8 "FragmentInput" +OpName %10 "position" +OpName %12 "_flat" +OpName %14 "flat_either" +OpName %15 "_linear" +OpName %17 "linear_centroid" +OpName %19 "linear_sample" +OpName %21 "linear_center" +OpName %22 "perspective" +OpName %23 "perspective_centroid" +OpName %24 "perspective_sample" +OpName %25 "perspective_center" +OpName %29 "vert_main" +OpName %58 "out" +OpName %104 "position" +OpName %107 "_flat" +OpName %110 "flat_either" +OpName %112 "_linear" +OpName %115 "linear_centroid" +OpName %118 "linear_sample" +OpName %121 "linear_center" +OpName %123 "perspective" +OpName %125 "perspective_centroid" +OpName %127 "perspective_sample" +OpName %129 "perspective_center" +OpName %131 "frag_main" +OpMemberDecorate %8 0 Offset 0 +OpMemberDecorate %8 1 Offset 16 +OpMemberDecorate %8 2 Offset 20 +OpMemberDecorate %8 3 Offset 24 +OpMemberDecorate %8 4 Offset 32 +OpMemberDecorate %8 5 Offset 48 +OpMemberDecorate %8 6 Offset 64 +OpMemberDecorate %8 7 Offset 80 +OpMemberDecorate %8 8 Offset 96 +OpMemberDecorate %8 9 Offset 100 +OpMemberDecorate %8 10 Offset 104 +OpDecorate %10 BuiltIn Position +OpDecorate %12 Location 0 +OpDecorate %12 Flat +OpDecorate %14 Location 2 +OpDecorate %14 Flat +OpDecorate %15 Location 3 +OpDecorate %15 NoPerspective +OpDecorate %17 Location 4 +OpDecorate %17 NoPerspective +OpDecorate %17 Centroid +OpDecorate %19 Location 6 +OpDecorate %19 NoPerspective +OpDecorate %19 Sample +OpDecorate %21 Location 7 +OpDecorate %21 NoPerspective +OpDecorate %22 Location 8 +OpDecorate %23 Location 9 +OpDecorate %23 Centroid +OpDecorate %24 Location 10 +OpDecorate %24 Sample +OpDecorate %25 Location 11 +OpDecorate %26 BuiltIn PointSize +OpDecorate %104 BuiltIn FragCoord +OpDecorate %107 Location 0 +OpDecorate %107 Flat +OpDecorate %110 Location 2 +OpDecorate %110 Flat +OpDecorate %112 Location 3 +OpDecorate %112 NoPerspective +OpDecorate %115 Location 4 +OpDecorate %115 NoPerspective +OpDecorate %115 Centroid +OpDecorate %118 Location 6 +OpDecorate %118 NoPerspective +OpDecorate %118 Sample +OpDecorate %121 Location 7 +OpDecorate %121 NoPerspective +OpDecorate %123 Location 8 +OpDecorate %125 Location 9 +OpDecorate %125 Centroid +OpDecorate %127 Location 10 +OpDecorate %127 Sample +OpDecorate %129 Location 11 +%2 = OpTypeVoid +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 +%5 = OpTypeInt 32 0 +%6 = OpTypeVector %3 2 +%7 = OpTypeVector %3 3 +%8 = OpTypeStruct %4 %5 %5 %3 %6 %7 %7 %4 %3 %3 %3 +%11 = OpTypePointer Output %4 +%10 = OpVariable %11 Output +%13 = OpTypePointer Output %5 +%12 = OpVariable %13 Output +%14 = OpVariable %13 Output +%16 = OpTypePointer Output %3 +%15 = OpVariable %16 Output +%18 = OpTypePointer Output %6 +%17 = OpVariable %18 Output +%20 = OpTypePointer Output %7 +%19 = OpVariable %20 Output +%21 = OpVariable %20 Output +%22 = OpVariable %11 Output +%23 = OpVariable %16 Output +%24 = OpVariable %16 Output +%25 = OpVariable %16 Output +%27 = OpTypePointer Output %3 +%26 = OpVariable %27 Output +%28 = OpConstant %3 1.0 +%30 = OpTypeFunction %2 +%31 = OpConstant %3 2.0 +%32 = OpConstant %3 4.0 +%33 = OpConstant %3 5.0 +%34 = OpConstant %3 6.0 +%35 = OpConstantComposite %4 %31 %32 %33 %34 +%36 = OpConstant %5 8 +%37 = OpConstant %5 10 +%38 = OpConstant %3 27.0 +%39 = OpConstant %3 64.0 +%40 = OpConstant %3 125.0 +%41 = OpConstantComposite %6 %39 %40 +%42 = OpConstant %3 216.0 +%43 = OpConstant %3 343.0 +%44 = OpConstant %3 512.0 +%45 = OpConstantComposite %7 %42 %43 %44 +%46 = OpConstant %3 255.0 +%47 = OpConstant %3 511.0 +%48 = OpConstant %3 1024.0 +%49 = OpConstantComposite %7 %46 %47 %48 +%50 = OpConstant %3 729.0 +%51 = OpConstant %3 1000.0 +%52 = OpConstant %3 1331.0 +%53 = OpConstant %3 1728.0 +%54 = OpConstantComposite %4 %50 %51 %52 %53 +%55 = OpConstant %3 2197.0 +%56 = OpConstant %3 2744.0 +%57 = OpConstant %3 2812.0 +%59 = OpTypePointer Function %8 +%60 = OpConstantNull %8 +%62 = OpTypePointer Function %4 +%63 = OpConstant %5 0 +%65 = OpTypePointer Function %5 +%66 = OpConstant %5 1 +%68 = OpConstant %5 2 +%70 = OpTypePointer Function %3 +%71 = OpConstant %5 3 +%73 = OpTypePointer Function %6 +%74 = OpConstant %5 4 +%76 = OpTypePointer Function %7 +%77 = OpConstant %5 5 +%79 = OpConstant %5 6 +%81 = OpConstant %5 7 +%84 = OpConstant %5 9 +%105 = OpTypePointer Input %4 +%104 = OpVariable %105 Input +%108 = OpTypePointer Input %5 +%107 = OpVariable %108 Input +%110 = OpVariable %108 Input +%113 = OpTypePointer Input %3 +%112 = OpVariable %113 Input +%116 = OpTypePointer Input %6 +%115 = OpVariable %116 Input +%119 = OpTypePointer Input %7 +%118 = OpVariable %119 Input +%121 = OpVariable %119 Input +%123 = OpVariable %105 Input +%125 = OpVariable %113 Input +%127 = OpVariable %113 Input +%129 = OpVariable %113 Input +%29 = OpFunction %2 None %30 +%9 = OpLabel +%58 = OpVariable %59 Function %60 +OpStore %26 %28 +OpBranch %61 +%61 = OpLabel +%64 = OpAccessChain %62 %58 %63 +OpStore %64 %35 +%67 = OpAccessChain %65 %58 %66 +OpStore %67 %36 +%69 = OpAccessChain %65 %58 %68 +OpStore %69 %37 +%72 = OpAccessChain %70 %58 %71 +OpStore %72 %38 +%75 = OpAccessChain %73 %58 %74 +OpStore %75 %41 +%78 = OpAccessChain %76 %58 %77 +OpStore %78 %45 +%80 = OpAccessChain %76 %58 %79 +OpStore %80 %49 +%82 = OpAccessChain %62 %58 %81 +OpStore %82 %54 +%83 = OpAccessChain %70 %58 %36 +OpStore %83 %55 +%85 = OpAccessChain %70 %58 %84 +OpStore %85 %56 +%86 = OpAccessChain %70 %58 %37 +OpStore %86 %57 +%87 = OpLoad %8 %58 +%88 = OpCompositeExtract %4 %87 0 +OpStore %10 %88 +%89 = OpAccessChain %27 %10 %66 +%90 = OpLoad %3 %89 +%91 = OpFNegate %3 %90 +OpStore %89 %91 +%92 = OpCompositeExtract %5 %87 1 +OpStore %12 %92 +%93 = OpCompositeExtract %5 %87 2 +OpStore %14 %93 +%94 = OpCompositeExtract %3 %87 3 +OpStore %15 %94 +%95 = OpCompositeExtract %6 %87 4 +OpStore %17 %95 +%96 = OpCompositeExtract %7 %87 5 +OpStore %19 %96 +%97 = OpCompositeExtract %7 %87 6 +OpStore %21 %97 +%98 = OpCompositeExtract %4 %87 7 +OpStore %22 %98 +%99 = OpCompositeExtract %3 %87 8 +OpStore %23 %99 +%100 = OpCompositeExtract %3 %87 9 +OpStore %24 %100 +%101 = OpCompositeExtract %3 %87 10 +OpStore %25 %101 +OpReturn +OpFunctionEnd +%131 = OpFunction %2 None %30 +%102 = OpLabel +%106 = OpLoad %4 %104 +%109 = OpLoad %5 %107 +%111 = OpLoad %5 %110 +%114 = OpLoad %3 %112 +%117 = OpLoad %6 %115 +%120 = OpLoad %7 %118 +%122 = OpLoad %7 %121 +%124 = OpLoad %4 %123 +%126 = OpLoad %3 %125 +%128 = OpLoad %3 %127 +%130 = OpLoad %3 %129 +%103 = OpCompositeConstruct %8 %106 %109 %111 %114 %117 %120 %122 %124 %126 %128 %130 +OpBranch %132 +%132 = OpLabel +OpReturn +OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/math-functions.spvasm b/naga/tests/out/spv/math-functions.spvasm index 366857f91f..47efe2a6fe 100644 --- a/naga/tests/out/spv/math-functions.spvasm +++ b/naga/tests/out/spv/math-functions.spvasm @@ -18,27 +18,27 @@ OpMemberDecorate %14 1 Offset 4 OpMemberDecorate %15 0 Offset 0 OpMemberDecorate %15 1 Offset 16 %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 %6 = OpTypeInt 32 1 %5 = OpTypeVector %6 4 %7 = OpTypeVector %6 2 %9 = OpTypeInt 32 0 %8 = OpTypeVector %9 2 -%10 = OpTypeVector %4 2 -%11 = OpTypeStruct %4 %4 +%10 = OpTypeVector %3 2 +%11 = OpTypeStruct %3 %3 %12 = OpTypeStruct %10 %10 -%13 = OpTypeStruct %3 %3 -%14 = OpTypeStruct %4 %6 -%15 = OpTypeStruct %3 %5 +%13 = OpTypeStruct %4 %4 +%14 = OpTypeStruct %3 %6 +%15 = OpTypeStruct %4 %5 %18 = OpTypeFunction %2 -%19 = OpConstant %4 1.0 -%20 = OpConstant %4 0.0 -%21 = OpConstantComposite %3 %20 %20 %20 %20 +%19 = OpConstant %3 1.0 +%20 = OpConstant %3 0.0 +%21 = OpConstantComposite %4 %20 %20 %20 %20 %22 = OpConstant %6 -1 %23 = OpConstantComposite %5 %22 %22 %22 %22 -%24 = OpConstant %4 -1.0 -%25 = OpConstantComposite %3 %24 %24 %24 %24 +%24 = OpConstant %3 -1.0 +%25 = OpConstantComposite %4 %24 %24 %24 %24 %26 = OpConstantNull %7 %27 = OpConstant %9 4294967295 %28 = OpConstantComposite %7 %22 %22 @@ -53,26 +53,26 @@ OpMemberDecorate %15 1 Offset 16 %37 = OpConstant %9 31 %38 = OpConstantComposite %8 %37 %37 %39 = OpConstant %6 2 -%40 = OpConstant %4 2.0 +%40 = OpConstant %3 2.0 %41 = OpConstantComposite %10 %19 %40 %42 = OpConstant %6 3 %43 = OpConstant %6 4 %44 = OpConstantComposite %7 %42 %43 -%45 = OpConstant %4 1.5 +%45 = OpConstant %3 1.5 %46 = OpConstantComposite %10 %45 %45 -%47 = OpConstantComposite %3 %45 %45 %45 %45 -%54 = OpConstantComposite %3 %19 %19 %19 %19 +%47 = OpConstantComposite %4 %45 %45 %45 %45 +%54 = OpConstantComposite %4 %19 %19 %19 %19 %57 = OpConstantNull %6 %17 = OpFunction %2 None %18 %16 = OpLabel OpBranch %48 %48 = OpLabel -%49 = OpExtInst %4 %1 Degrees %19 -%50 = OpExtInst %4 %1 Radians %19 -%51 = OpExtInst %3 %1 Degrees %21 -%52 = OpExtInst %3 %1 Radians %21 -%53 = OpExtInst %3 %1 FClamp %21 %21 %54 -%55 = OpExtInst %3 %1 Refract %21 %21 %19 +%49 = OpExtInst %3 %1 Degrees %19 +%50 = OpExtInst %3 %1 Radians %19 +%51 = OpExtInst %4 %1 Degrees %21 +%52 = OpExtInst %4 %1 Radians %21 +%53 = OpExtInst %4 %1 FClamp %21 %21 %54 +%55 = OpExtInst %4 %1 Refract %21 %21 %19 %58 = OpCompositeExtract %6 %26 0 %59 = OpCompositeExtract %6 %26 0 %60 = OpIMul %6 %58 %59 @@ -81,23 +81,23 @@ OpBranch %48 %63 = OpCompositeExtract %6 %26 1 %64 = OpIMul %6 %62 %63 %56 = OpIAdd %6 %61 %64 -%65 = OpExtInst %4 %1 Ldexp %19 %39 +%65 = OpExtInst %3 %1 Ldexp %19 %39 %66 = OpExtInst %10 %1 Ldexp %41 %44 %67 = OpExtInst %11 %1 ModfStruct %45 %68 = OpExtInst %11 %1 ModfStruct %45 -%69 = OpCompositeExtract %4 %68 0 +%69 = OpCompositeExtract %3 %68 0 %70 = OpExtInst %11 %1 ModfStruct %45 -%71 = OpCompositeExtract %4 %70 1 +%71 = OpCompositeExtract %3 %70 1 %72 = OpExtInst %12 %1 ModfStruct %46 %73 = OpExtInst %13 %1 ModfStruct %47 -%74 = OpCompositeExtract %3 %73 1 -%75 = OpCompositeExtract %4 %74 0 +%74 = OpCompositeExtract %4 %73 1 +%75 = OpCompositeExtract %3 %74 0 %76 = OpExtInst %12 %1 ModfStruct %46 %77 = OpCompositeExtract %10 %76 0 -%78 = OpCompositeExtract %4 %77 1 +%78 = OpCompositeExtract %3 %77 1 %79 = OpExtInst %14 %1 FrexpStruct %45 %80 = OpExtInst %14 %1 FrexpStruct %45 -%81 = OpCompositeExtract %4 %80 0 +%81 = OpCompositeExtract %3 %80 0 %82 = OpExtInst %14 %1 FrexpStruct %45 %83 = OpCompositeExtract %6 %82 1 %84 = OpExtInst %15 %1 FrexpStruct %47 diff --git a/naga/tests/out/spv/operators.spvasm b/naga/tests/out/spv/operators.spvasm index 974623bc70..a59c2e5558 100644 --- a/naga/tests/out/spv/operators.spvasm +++ b/naga/tests/out/spv/operators.spvasm @@ -9,47 +9,47 @@ OpEntryPoint GLCompute %374 "main" %371 OpExecutionMode %374 LocalSize 1 1 1 OpDecorate %371 BuiltIn WorkgroupId %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 -%6 = OpTypeInt 32 1 -%5 = OpTypeVector %6 4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 +%5 = OpTypeInt 32 1 +%6 = OpTypeVector %5 4 %8 = OpTypeBool %7 = OpTypeVector %8 4 -%9 = OpTypeVector %4 2 -%10 = OpTypeVector %4 3 +%9 = OpTypeVector %3 2 +%10 = OpTypeVector %3 3 %11 = OpTypeMatrix %10 3 %12 = OpTypeMatrix %10 4 -%13 = OpTypeMatrix %3 3 -%14 = OpTypeVector %6 3 +%13 = OpTypeMatrix %4 3 +%14 = OpTypeVector %5 3 %16 = OpTypeInt 32 0 %15 = OpTypeVector %16 3 -%17 = OpConstant %4 1.0 -%18 = OpConstantComposite %3 %17 %17 %17 %17 -%19 = OpConstant %4 0.0 -%20 = OpConstantComposite %3 %19 %19 %19 %19 -%21 = OpConstant %4 0.5 -%22 = OpConstantComposite %3 %21 %21 %21 %21 -%23 = OpConstant %6 1 -%24 = OpConstantComposite %5 %23 %23 %23 %23 -%27 = OpTypeFunction %3 +%17 = OpConstant %3 1.0 +%18 = OpConstantComposite %4 %17 %17 %17 %17 +%19 = OpConstant %3 0.0 +%20 = OpConstantComposite %4 %19 %19 %19 %19 +%21 = OpConstant %3 0.5 +%22 = OpConstantComposite %4 %21 %21 %21 %21 +%23 = OpConstant %5 1 +%24 = OpConstantComposite %6 %23 %23 %23 %23 +%27 = OpTypeFunction %4 %28 = OpConstantTrue %8 -%29 = OpConstant %6 0 +%29 = OpConstant %5 0 %30 = OpConstantFalse %8 %31 = OpConstantComposite %7 %30 %30 %30 %30 -%32 = OpConstant %4 0.1 -%33 = OpConstantComposite %5 %29 %29 %29 %29 -%57 = OpTypeFunction %3 %4 %6 -%58 = OpConstant %4 2.0 +%32 = OpConstant %3 0.1 +%33 = OpConstantComposite %6 %29 %29 %29 %29 +%57 = OpTypeFunction %4 %3 %5 +%58 = OpConstant %3 2.0 %59 = OpConstantComposite %9 %58 %58 -%60 = OpConstant %4 4.0 +%60 = OpConstant %3 4.0 %61 = OpConstantComposite %9 %60 %60 -%62 = OpConstant %4 8.0 +%62 = OpConstant %3 8.0 %63 = OpConstantComposite %9 %62 %62 -%64 = OpConstant %6 2 -%65 = OpConstantComposite %5 %64 %64 %64 %64 +%64 = OpConstant %5 2 +%65 = OpConstantComposite %6 %64 %64 %64 %64 %78 = OpTypeFunction %9 %79 = OpConstantComposite %9 %17 %17 -%80 = OpConstant %4 3.0 +%80 = OpConstant %3 3.0 %81 = OpConstantComposite %9 %80 %80 %83 = OpTypePointer Function %9 %95 = OpTypeFunction %10 %10 @@ -65,13 +65,13 @@ OpDecorate %371 BuiltIn WorkgroupId %110 = OpConstantComposite %7 %30 %30 %30 %30 %122 = OpConstant %16 1 %123 = OpConstant %16 2 -%124 = OpTypeVector %6 2 +%124 = OpTypeVector %5 2 %125 = OpConstantComposite %124 %23 %23 %126 = OpConstantComposite %124 %64 %64 %127 = OpConstantComposite %15 %123 %123 %123 %128 = OpConstantComposite %15 %122 %122 %122 -%129 = OpConstantComposite %3 %58 %58 %58 %58 -%130 = OpConstantComposite %3 %17 %17 %17 %17 +%129 = OpConstantComposite %4 %58 %58 %58 %58 +%130 = OpConstantComposite %4 %17 %17 %17 %17 %131 = OpTypeVector %16 2 %132 = OpConstantComposite %131 %123 %123 %133 = OpConstantComposite %131 %122 %122 @@ -80,40 +80,40 @@ OpDecorate %371 BuiltIn WorkgroupId %136 = OpConstantComposite %10 %58 %58 %58 %137 = OpConstantNull %13 %301 = OpConstantNull %14 -%303 = OpTypePointer Function %6 -%304 = OpConstantNull %6 +%303 = OpTypePointer Function %5 +%304 = OpConstantNull %5 %306 = OpTypePointer Function %14 -%334 = OpTypePointer Function %6 +%334 = OpTypePointer Function %5 %372 = OpTypePointer Input %15 %371 = OpVariable %372 Input %375 = OpConstantComposite %10 %17 %17 %17 -%26 = OpFunction %3 None %27 +%26 = OpFunction %4 None %27 %25 = OpLabel OpBranch %34 %34 = OpLabel -%35 = OpSelect %6 %28 %23 %29 +%35 = OpSelect %5 %28 %23 %29 %37 = OpCompositeConstruct %7 %28 %28 %28 %28 -%36 = OpSelect %3 %37 %18 %20 -%38 = OpSelect %3 %31 %20 %18 -%39 = OpExtInst %3 %1 FMix %20 %18 %22 -%41 = OpCompositeConstruct %3 %32 %32 %32 %32 -%40 = OpExtInst %3 %1 FMix %20 %18 %41 -%42 = OpBitcast %4 %23 -%43 = OpBitcast %3 %24 -%44 = OpCompositeConstruct %5 %35 %35 %35 %35 -%45 = OpIAdd %5 %44 %33 -%46 = OpConvertSToF %3 %45 -%47 = OpFAdd %3 %46 %36 -%48 = OpFAdd %3 %47 %39 -%49 = OpFAdd %3 %48 %40 -%50 = OpCompositeConstruct %3 %42 %42 %42 %42 -%51 = OpFAdd %3 %49 %50 -%52 = OpFAdd %3 %51 %43 +%36 = OpSelect %4 %37 %18 %20 +%38 = OpSelect %4 %31 %20 %18 +%39 = OpExtInst %4 %1 FMix %20 %18 %22 +%41 = OpCompositeConstruct %4 %32 %32 %32 %32 +%40 = OpExtInst %4 %1 FMix %20 %18 %41 +%42 = OpBitcast %3 %23 +%43 = OpBitcast %4 %24 +%44 = OpCompositeConstruct %6 %35 %35 %35 %35 +%45 = OpIAdd %6 %44 %33 +%46 = OpConvertSToF %4 %45 +%47 = OpFAdd %4 %46 %36 +%48 = OpFAdd %4 %47 %39 +%49 = OpFAdd %4 %48 %40 +%50 = OpCompositeConstruct %4 %42 %42 %42 %42 +%51 = OpFAdd %4 %49 %50 +%52 = OpFAdd %4 %51 %43 OpReturnValue %52 OpFunctionEnd -%56 = OpFunction %3 None %57 -%54 = OpFunctionParameter %4 -%55 = OpFunctionParameter %6 +%56 = OpFunction %4 None %57 +%54 = OpFunctionParameter %3 +%55 = OpFunctionParameter %5 %53 = OpLabel OpBranch %66 %66 = OpLabel @@ -121,11 +121,11 @@ OpBranch %66 %68 = OpFAdd %9 %59 %67 %69 = OpFSub %9 %68 %61 %70 = OpFDiv %9 %69 %63 -%71 = OpCompositeConstruct %5 %55 %55 %55 %55 -%72 = OpSRem %5 %71 %65 -%73 = OpVectorShuffle %3 %70 %70 0 1 0 1 -%74 = OpConvertSToF %3 %72 -%75 = OpFAdd %3 %73 %74 +%71 = OpCompositeConstruct %6 %55 %55 %55 %55 +%72 = OpSRem %6 %71 %65 +%73 = OpVectorShuffle %4 %70 %70 0 1 0 1 +%74 = OpConvertSToF %4 %72 +%75 = OpFAdd %4 %73 %74 OpReturnValue %75 OpFunctionEnd %77 = OpFunction %9 None %78 @@ -172,39 +172,39 @@ OpFunctionEnd %120 = OpLabel OpBranch %138 %138 = OpLabel -%139 = OpFNegate %4 %17 +%139 = OpFNegate %3 %17 %140 = OpSNegate %124 %125 %141 = OpFNegate %9 %79 -%142 = OpIAdd %6 %64 %23 +%142 = OpIAdd %5 %64 %23 %143 = OpIAdd %16 %123 %122 -%144 = OpFAdd %4 %58 %17 +%144 = OpFAdd %3 %58 %17 %145 = OpIAdd %124 %126 %125 %146 = OpIAdd %15 %127 %128 -%147 = OpFAdd %3 %129 %130 -%148 = OpISub %6 %64 %23 +%147 = OpFAdd %4 %129 %130 +%148 = OpISub %5 %64 %23 %149 = OpISub %16 %123 %122 -%150 = OpFSub %4 %58 %17 +%150 = OpFSub %3 %58 %17 %151 = OpISub %124 %126 %125 %152 = OpISub %15 %127 %128 -%153 = OpFSub %3 %129 %130 -%154 = OpIMul %6 %64 %23 +%153 = OpFSub %4 %129 %130 +%154 = OpIMul %5 %64 %23 %155 = OpIMul %16 %123 %122 -%156 = OpFMul %4 %58 %17 +%156 = OpFMul %3 %58 %17 %157 = OpIMul %124 %126 %125 %158 = OpIMul %15 %127 %128 -%159 = OpFMul %3 %129 %130 -%160 = OpSDiv %6 %64 %23 +%159 = OpFMul %4 %129 %130 +%160 = OpSDiv %5 %64 %23 %161 = OpUDiv %16 %123 %122 -%162 = OpFDiv %4 %58 %17 +%162 = OpFDiv %3 %58 %17 %163 = OpSDiv %124 %126 %125 %164 = OpUDiv %15 %127 %128 -%165 = OpFDiv %3 %129 %130 -%166 = OpSRem %6 %64 %23 +%165 = OpFDiv %4 %129 %130 +%166 = OpSRem %5 %64 %23 %167 = OpUMod %16 %123 %122 -%168 = OpFRem %4 %58 %17 +%168 = OpFRem %3 %58 %17 %169 = OpSRem %124 %126 %125 %170 = OpUMod %15 %127 %128 -%171 = OpFRem %3 %129 %130 +%171 = OpFRem %4 %129 %130 OpBranch %172 %172 = OpLabel %174 = OpIAdd %124 %126 %125 @@ -266,7 +266,7 @@ OpBranch %173 %228 = OpMatrixTimesScalar %11 %134 %17 %229 = OpMatrixTimesScalar %11 %134 %58 %230 = OpMatrixTimesVector %10 %135 %130 -%231 = OpVectorTimesMatrix %3 %136 %135 +%231 = OpVectorTimesMatrix %4 %136 %135 %232 = OpMatrixTimesMatrix %11 %135 %137 OpReturn OpFunctionEnd @@ -274,27 +274,27 @@ OpFunctionEnd %233 = OpLabel OpBranch %235 %235 = OpLabel -%236 = OpNot %6 %23 +%236 = OpNot %5 %23 %237 = OpNot %16 %122 %238 = OpNot %124 %125 %239 = OpNot %15 %128 -%240 = OpBitwiseOr %6 %64 %23 +%240 = OpBitwiseOr %5 %64 %23 %241 = OpBitwiseOr %16 %123 %122 %242 = OpBitwiseOr %124 %126 %125 %243 = OpBitwiseOr %15 %127 %128 -%244 = OpBitwiseAnd %6 %64 %23 +%244 = OpBitwiseAnd %5 %64 %23 %245 = OpBitwiseAnd %16 %123 %122 %246 = OpBitwiseAnd %124 %126 %125 %247 = OpBitwiseAnd %15 %127 %128 -%248 = OpBitwiseXor %6 %64 %23 +%248 = OpBitwiseXor %5 %64 %23 %249 = OpBitwiseXor %16 %123 %122 %250 = OpBitwiseXor %124 %126 %125 %251 = OpBitwiseXor %15 %127 %128 -%252 = OpShiftLeftLogical %6 %64 %122 +%252 = OpShiftLeftLogical %5 %64 %122 %253 = OpShiftLeftLogical %16 %123 %122 %254 = OpShiftLeftLogical %124 %126 %133 %255 = OpShiftLeftLogical %15 %127 %128 -%256 = OpShiftRightArithmetic %6 %64 %122 +%256 = OpShiftRightArithmetic %5 %64 %122 %257 = OpShiftRightLogical %16 %123 %122 %258 = OpShiftRightArithmetic %124 %126 %133 %259 = OpShiftRightLogical %15 %127 %128 @@ -349,52 +349,52 @@ OpFunctionEnd OpBranch %307 %307 = OpLabel OpStore %302 %23 -%308 = OpLoad %6 %302 -%309 = OpIAdd %6 %308 %23 +%308 = OpLoad %5 %302 +%309 = OpIAdd %5 %308 %23 OpStore %302 %309 -%310 = OpLoad %6 %302 -%311 = OpISub %6 %310 %23 +%310 = OpLoad %5 %302 +%311 = OpISub %5 %310 %23 OpStore %302 %311 -%312 = OpLoad %6 %302 -%313 = OpLoad %6 %302 -%314 = OpIMul %6 %313 %312 +%312 = OpLoad %5 %302 +%313 = OpLoad %5 %302 +%314 = OpIMul %5 %313 %312 OpStore %302 %314 -%315 = OpLoad %6 %302 -%316 = OpLoad %6 %302 -%317 = OpSDiv %6 %316 %315 +%315 = OpLoad %5 %302 +%316 = OpLoad %5 %302 +%317 = OpSDiv %5 %316 %315 OpStore %302 %317 -%318 = OpLoad %6 %302 -%319 = OpSRem %6 %318 %23 +%318 = OpLoad %5 %302 +%319 = OpSRem %5 %318 %23 OpStore %302 %319 -%320 = OpLoad %6 %302 -%321 = OpBitwiseAnd %6 %320 %29 +%320 = OpLoad %5 %302 +%321 = OpBitwiseAnd %5 %320 %29 OpStore %302 %321 -%322 = OpLoad %6 %302 -%323 = OpBitwiseOr %6 %322 %29 +%322 = OpLoad %5 %302 +%323 = OpBitwiseOr %5 %322 %29 OpStore %302 %323 -%324 = OpLoad %6 %302 -%325 = OpBitwiseXor %6 %324 %29 +%324 = OpLoad %5 %302 +%325 = OpBitwiseXor %5 %324 %29 OpStore %302 %325 -%326 = OpLoad %6 %302 -%327 = OpShiftLeftLogical %6 %326 %123 +%326 = OpLoad %5 %302 +%327 = OpShiftLeftLogical %5 %326 %123 OpStore %302 %327 -%328 = OpLoad %6 %302 -%329 = OpShiftRightArithmetic %6 %328 %122 +%328 = OpLoad %5 %302 +%329 = OpShiftRightArithmetic %5 %328 %122 OpStore %302 %329 -%330 = OpLoad %6 %302 -%331 = OpIAdd %6 %330 %23 +%330 = OpLoad %5 %302 +%331 = OpIAdd %5 %330 %23 OpStore %302 %331 -%332 = OpLoad %6 %302 -%333 = OpISub %6 %332 %23 +%332 = OpLoad %5 %302 +%333 = OpISub %5 %332 %23 OpStore %302 %333 %335 = OpAccessChain %334 %305 %23 -%336 = OpLoad %6 %335 -%337 = OpIAdd %6 %336 %23 +%336 = OpLoad %5 %335 +%337 = OpIAdd %5 %336 %23 %338 = OpAccessChain %334 %305 %23 OpStore %338 %337 %339 = OpAccessChain %334 %305 %23 -%340 = OpLoad %6 %339 -%341 = OpISub %6 %340 %23 +%340 = OpLoad %5 %339 +%341 = OpISub %5 %340 %23 %342 = OpAccessChain %334 %305 %23 OpStore %342 %341 OpReturn @@ -403,30 +403,30 @@ OpFunctionEnd %343 = OpLabel OpBranch %345 %345 = OpLabel -%346 = OpSNegate %6 %23 -%347 = OpSNegate %6 %23 -%348 = OpSNegate %6 %347 -%349 = OpSNegate %6 %23 -%350 = OpSNegate %6 %349 -%351 = OpSNegate %6 %23 -%352 = OpSNegate %6 %351 -%353 = OpSNegate %6 %23 -%354 = OpSNegate %6 %353 -%355 = OpSNegate %6 %354 -%356 = OpSNegate %6 %23 -%357 = OpSNegate %6 %356 -%358 = OpSNegate %6 %357 -%359 = OpSNegate %6 %358 -%360 = OpSNegate %6 %23 -%361 = OpSNegate %6 %360 -%362 = OpSNegate %6 %361 -%363 = OpSNegate %6 %362 -%364 = OpSNegate %6 %363 -%365 = OpSNegate %6 %23 -%366 = OpSNegate %6 %365 -%367 = OpSNegate %6 %366 -%368 = OpSNegate %6 %367 -%369 = OpSNegate %6 %368 +%346 = OpSNegate %5 %23 +%347 = OpSNegate %5 %23 +%348 = OpSNegate %5 %347 +%349 = OpSNegate %5 %23 +%350 = OpSNegate %5 %349 +%351 = OpSNegate %5 %23 +%352 = OpSNegate %5 %351 +%353 = OpSNegate %5 %23 +%354 = OpSNegate %5 %353 +%355 = OpSNegate %5 %354 +%356 = OpSNegate %5 %23 +%357 = OpSNegate %5 %356 +%358 = OpSNegate %5 %357 +%359 = OpSNegate %5 %358 +%360 = OpSNegate %5 %23 +%361 = OpSNegate %5 %360 +%362 = OpSNegate %5 %361 +%363 = OpSNegate %5 %362 +%364 = OpSNegate %5 %363 +%365 = OpSNegate %5 %23 +%366 = OpSNegate %5 %365 +%367 = OpSNegate %5 %366 +%368 = OpSNegate %5 %367 +%369 = OpSNegate %5 %368 OpReturn OpFunctionEnd %374 = OpFunction %2 None %104 @@ -434,12 +434,12 @@ OpFunctionEnd %373 = OpLoad %15 %371 OpBranch %376 %376 = OpLabel -%377 = OpFunctionCall %3 %26 +%377 = OpFunctionCall %4 %26 %378 = OpCompositeExtract %16 %373 0 -%379 = OpConvertUToF %4 %378 +%379 = OpConvertUToF %3 %378 %380 = OpCompositeExtract %16 %373 1 -%381 = OpBitcast %6 %380 -%382 = OpFunctionCall %3 %56 %379 %381 +%381 = OpBitcast %5 %380 +%382 = OpFunctionCall %4 %56 %379 %381 %383 = OpFunctionCall %10 %94 %375 %384 = OpFunctionCall %2 %103 %385 = OpFunctionCall %2 %121 diff --git a/naga/tests/out/spv/padding.spvasm b/naga/tests/out/spv/padding.spvasm index aae9f2cb74..b7b21f17ed 100644 --- a/naga/tests/out/spv/padding.spvasm +++ b/naga/tests/out/spv/padding.spvasm @@ -45,17 +45,17 @@ OpDecorate %21 Block OpMemberDecorate %21 0 Offset 0 OpDecorate %24 BuiltIn Position %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 3 -%5 = OpTypeStruct %3 -%6 = OpTypeStruct %5 %4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 3 +%5 = OpTypeStruct %4 +%6 = OpTypeStruct %5 %3 %9 = OpTypeInt 32 0 %8 = OpConstant %9 2 -%7 = OpTypeArray %3 %8 -%10 = OpTypeStruct %7 %4 -%11 = OpTypeMatrix %3 4 -%12 = OpTypeStruct %11 %4 -%13 = OpTypeVector %4 4 +%7 = OpTypeArray %4 %8 +%10 = OpTypeStruct %7 %3 +%11 = OpTypeMatrix %4 4 +%12 = OpTypeStruct %11 %3 +%13 = OpTypeVector %3 4 %15 = OpTypeStruct %6 %16 = OpTypePointer Uniform %15 %14 = OpVariable %16 Uniform @@ -72,9 +72,9 @@ OpDecorate %24 BuiltIn Position %29 = OpConstant %9 0 %31 = OpTypePointer Uniform %10 %33 = OpTypePointer Uniform %12 -%35 = OpConstant %4 1.0 +%35 = OpConstant %3 1.0 %36 = OpConstantComposite %13 %35 %35 %35 %35 -%38 = OpTypePointer Uniform %4 +%38 = OpTypePointer Uniform %3 %39 = OpConstant %9 1 %26 = OpFunction %2 None %27 %23 = OpLabel @@ -84,13 +84,13 @@ OpDecorate %24 BuiltIn Position OpBranch %37 %37 = OpLabel %40 = OpAccessChain %38 %30 %39 -%41 = OpLoad %4 %40 +%41 = OpLoad %3 %40 %42 = OpVectorTimesScalar %13 %36 %41 %43 = OpAccessChain %38 %32 %39 -%44 = OpLoad %4 %43 +%44 = OpLoad %3 %43 %45 = OpVectorTimesScalar %13 %42 %44 %46 = OpAccessChain %38 %34 %39 -%47 = OpLoad %4 %46 +%47 = OpLoad %3 %46 %48 = OpVectorTimesScalar %13 %45 %47 OpStore %24 %48 OpReturn diff --git a/naga/tests/out/spv/pointers.spvasm b/naga/tests/out/spv/pointers.spvasm index ae42aed2e0..07658a21a1 100644 --- a/naga/tests/out/spv/pointers.spvasm +++ b/naga/tests/out/spv/pointers.spvasm @@ -24,20 +24,20 @@ OpDecorate %7 Block OpDecorate %8 DescriptorSet 0 OpDecorate %8 Binding 0 %2 = OpTypeVoid -%4 = OpTypeInt 32 1 -%3 = OpTypeVector %4 2 +%3 = OpTypeInt 32 1 +%4 = OpTypeVector %3 2 %5 = OpTypeInt 32 0 %6 = OpTypeRuntimeArray %5 %7 = OpTypeStruct %6 %9 = OpTypePointer StorageBuffer %7 %8 = OpVariable %9 StorageBuffer %12 = OpTypeFunction %2 -%13 = OpConstant %4 10 -%15 = OpTypePointer Function %3 -%16 = OpConstantNull %3 -%18 = OpTypePointer Function %4 +%13 = OpConstant %3 10 +%15 = OpTypePointer Function %4 +%16 = OpConstantNull %4 +%18 = OpTypePointer Function %3 %19 = OpConstant %5 0 -%25 = OpTypeFunction %2 %4 %5 +%25 = OpTypeFunction %2 %3 %5 %27 = OpTypePointer StorageBuffer %6 %28 = OpTypePointer StorageBuffer %5 %11 = OpFunction %2 None %12 @@ -50,7 +50,7 @@ OpStore %20 %13 OpReturn OpFunctionEnd %24 = OpFunction %2 None %25 -%22 = OpFunctionParameter %4 +%22 = OpFunctionParameter %3 %23 = OpFunctionParameter %5 %21 = OpLabel OpBranch %26 @@ -63,7 +63,7 @@ OpStore %32 %31 OpReturn OpFunctionEnd %36 = OpFunction %2 None %25 -%34 = OpFunctionParameter %4 +%34 = OpFunctionParameter %3 %35 = OpFunctionParameter %5 %33 = OpLabel OpBranch %37 diff --git a/naga/tests/out/spv/policy-mix.spvasm b/naga/tests/out/spv/policy-mix.spvasm index a10ff1121f..f517777987 100644 --- a/naga/tests/out/spv/policy-mix.spvasm +++ b/naga/tests/out/spv/policy-mix.spvasm @@ -41,24 +41,24 @@ OpMemberDecorate %25 0 Offset 0 OpDecorate %27 DescriptorSet 0 OpDecorate %27 Binding 2 %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 4 %7 = OpTypeInt 32 0 %6 = OpConstant %7 10 -%5 = OpTypeArray %3 %6 +%5 = OpTypeArray %4 %6 %8 = OpTypeStruct %5 %10 = OpConstant %7 20 -%9 = OpTypeArray %3 %10 +%9 = OpTypeArray %4 %10 %11 = OpTypeStruct %9 -%12 = OpTypeImage %4 2D 0 1 0 1 Unknown +%12 = OpTypeImage %3 2D 0 1 0 1 Unknown %14 = OpConstant %7 30 -%13 = OpTypeArray %4 %14 +%13 = OpTypeArray %3 %14 %16 = OpConstant %7 40 -%15 = OpTypeArray %4 %16 -%18 = OpTypeInt 32 1 -%17 = OpTypeVector %18 2 +%15 = OpTypeArray %3 %16 +%17 = OpTypeInt 32 1 +%18 = OpTypeVector %17 2 %20 = OpConstant %7 2 -%19 = OpTypeArray %3 %20 +%19 = OpTypeArray %4 %20 %22 = OpTypeStruct %8 %23 = OpTypePointer StorageBuffer %22 %21 = OpVariable %23 StorageBuffer @@ -72,35 +72,35 @@ OpDecorate %27 Binding 2 %32 = OpTypePointer Private %15 %33 = OpConstantNull %15 %31 = OpVariable %32 Private %33 -%39 = OpTypeFunction %3 %17 %18 %18 +%39 = OpTypeFunction %4 %18 %17 %17 %40 = OpTypePointer StorageBuffer %8 %41 = OpConstant %7 0 %43 = OpTypePointer Uniform %11 -%46 = OpConstant %4 0.707 -%47 = OpConstant %4 0.0 -%48 = OpConstant %4 1.0 -%49 = OpConstantComposite %3 %46 %47 %47 %48 -%50 = OpConstantComposite %3 %47 %46 %47 %48 +%46 = OpConstant %3 0.707 +%47 = OpConstant %3 0.0 +%48 = OpConstant %3 1.0 +%49 = OpConstantComposite %4 %46 %47 %47 %48 +%50 = OpConstantComposite %4 %47 %46 %47 %48 %51 = OpConstantComposite %19 %49 %50 %53 = OpTypePointer Function %19 %55 = OpTypePointer StorageBuffer %5 -%56 = OpTypePointer StorageBuffer %3 +%56 = OpTypePointer StorageBuffer %4 %59 = OpTypePointer Uniform %9 -%60 = OpTypePointer Uniform %3 -%64 = OpTypeVector %18 3 +%60 = OpTypePointer Uniform %4 +%64 = OpTypeVector %17 3 %66 = OpTypeBool -%67 = OpConstantNull %3 +%67 = OpConstantNull %4 %73 = OpTypeVector %66 3 -%80 = OpTypePointer Workgroup %4 +%80 = OpTypePointer Workgroup %3 %81 = OpConstant %7 29 -%87 = OpTypePointer Private %4 +%87 = OpTypePointer Private %3 %88 = OpConstant %7 39 -%94 = OpTypePointer Function %3 +%94 = OpTypePointer Function %4 %95 = OpConstant %7 1 -%38 = OpFunction %3 None %39 -%35 = OpFunctionParameter %17 -%36 = OpFunctionParameter %18 -%37 = OpFunctionParameter %18 +%38 = OpFunction %4 None %39 +%35 = OpFunctionParameter %18 +%36 = OpFunctionParameter %17 +%37 = OpFunctionParameter %17 %34 = OpLabel %52 = OpVariable %53 Function %51 %42 = OpAccessChain %40 %21 %41 @@ -109,12 +109,12 @@ OpDecorate %27 Binding 2 OpBranch %54 %54 = OpLabel %57 = OpAccessChain %56 %42 %41 %36 -%58 = OpLoad %3 %57 +%58 = OpLoad %4 %57 %61 = OpAccessChain %60 %44 %41 %36 -%62 = OpLoad %3 %61 -%63 = OpFAdd %3 %58 %62 +%62 = OpLoad %4 %61 +%63 = OpFAdd %4 %58 %62 %65 = OpCompositeConstruct %64 %35 %36 -%68 = OpImageQueryLevels %18 %45 +%68 = OpImageQueryLevels %17 %45 %69 = OpULessThan %66 %37 %68 OpSelectionMerge %70 None OpBranchConditional %69 %71 %70 @@ -124,24 +124,24 @@ OpBranchConditional %69 %71 %70 %75 = OpAll %66 %74 OpBranchConditional %75 %76 %70 %76 = OpLabel -%77 = OpImageFetch %3 %45 %65 Lod %37 +%77 = OpImageFetch %4 %45 %65 Lod %37 OpBranch %70 %70 = OpLabel -%78 = OpPhi %3 %67 %54 %67 %71 %77 %76 -%79 = OpFAdd %3 %63 %78 +%78 = OpPhi %4 %67 %54 %67 %71 %77 %76 +%79 = OpFAdd %4 %63 %78 %82 = OpExtInst %7 %1 UMin %36 %81 %83 = OpAccessChain %80 %29 %82 -%84 = OpLoad %4 %83 -%85 = OpCompositeConstruct %3 %84 %84 %84 %84 -%86 = OpFAdd %3 %79 %85 +%84 = OpLoad %3 %83 +%85 = OpCompositeConstruct %4 %84 %84 %84 %84 +%86 = OpFAdd %4 %79 %85 %89 = OpExtInst %7 %1 UMin %36 %88 %90 = OpAccessChain %87 %31 %89 -%91 = OpLoad %4 %90 -%92 = OpCompositeConstruct %3 %91 %91 %91 %91 -%93 = OpFAdd %3 %86 %92 +%91 = OpLoad %3 %90 +%92 = OpCompositeConstruct %4 %91 %91 %91 %91 +%93 = OpFAdd %4 %86 %92 %96 = OpExtInst %7 %1 UMin %36 %95 %97 = OpAccessChain %94 %52 %96 -%98 = OpLoad %3 %97 -%99 = OpFAdd %3 %93 %98 +%98 = OpLoad %4 %97 +%99 = OpFAdd %4 %93 %98 OpReturnValue %99 OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/ray-query.spvasm b/naga/tests/out/spv/ray-query.spvasm index 328c820fea..8b784f2fa0 100644 --- a/naga/tests/out/spv/ray-query.spvasm +++ b/naga/tests/out/spv/ray-query.spvasm @@ -39,44 +39,44 @@ OpDecorate %17 Binding 1 OpDecorate %18 Block OpMemberDecorate %18 0 Offset 0 %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 3 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 3 %5 = OpTypeAccelerationStructureNV %6 = OpTypeInt 32 0 -%7 = OpTypeVector %4 2 +%7 = OpTypeVector %3 2 %8 = OpTypeBool -%9 = OpTypeMatrix %3 4 -%10 = OpTypeStruct %6 %4 %6 %6 %6 %6 %6 %7 %8 %9 %9 +%9 = OpTypeMatrix %4 4 +%10 = OpTypeStruct %6 %3 %6 %6 %6 %6 %6 %7 %8 %9 %9 %11 = OpTypeRayQueryKHR -%12 = OpTypeStruct %6 %6 %4 %4 %3 %3 -%13 = OpTypeStruct %6 %3 -%14 = OpTypeVector %4 4 +%12 = OpTypeStruct %6 %6 %3 %3 %4 %4 +%13 = OpTypeStruct %6 %4 +%14 = OpTypeVector %3 4 %16 = OpTypePointer UniformConstant %5 %15 = OpVariable %16 UniformConstant %18 = OpTypeStruct %13 %19 = OpTypePointer StorageBuffer %18 %17 = OpVariable %19 StorageBuffer -%26 = OpTypeFunction %10 %3 %3 %16 +%26 = OpTypeFunction %10 %4 %4 %16 %27 = OpConstant %6 4 %28 = OpConstant %6 255 -%29 = OpConstant %4 0.1 -%30 = OpConstant %4 100.0 +%29 = OpConstant %3 0.1 +%30 = OpConstant %3 100.0 %32 = OpTypePointer Function %11 %50 = OpConstant %6 1 -%67 = OpTypeFunction %3 %3 %10 -%68 = OpConstant %4 1.0 -%69 = OpConstant %4 2.4 -%70 = OpConstant %4 0.0 +%67 = OpTypeFunction %4 %4 %10 +%68 = OpConstant %3 1.0 +%69 = OpConstant %3 2.4 +%70 = OpConstant %3 0.0 %85 = OpTypeFunction %2 %87 = OpTypePointer StorageBuffer %13 %88 = OpConstant %6 0 -%90 = OpConstantComposite %3 %70 %70 %70 -%91 = OpConstantComposite %3 %70 %68 %70 +%90 = OpConstantComposite %4 %70 %70 %70 +%91 = OpConstantComposite %4 %70 %68 %70 %94 = OpTypePointer StorageBuffer %6 -%99 = OpTypePointer StorageBuffer %3 +%99 = OpTypePointer StorageBuffer %4 %25 = OpFunction %10 None %26 -%21 = OpFunctionParameter %3 -%22 = OpFunctionParameter %3 +%21 = OpFunctionParameter %4 +%22 = OpFunctionParameter %4 %23 = OpFunctionParameter %16 %20 = OpLabel %31 = OpVariable %32 Function @@ -86,10 +86,10 @@ OpBranch %33 %34 = OpCompositeConstruct %12 %27 %28 %29 %30 %21 %22 %35 = OpCompositeExtract %6 %34 0 %36 = OpCompositeExtract %6 %34 1 -%37 = OpCompositeExtract %4 %34 2 -%38 = OpCompositeExtract %4 %34 3 -%39 = OpCompositeExtract %3 %34 4 -%40 = OpCompositeExtract %3 %34 5 +%37 = OpCompositeExtract %3 %34 2 +%38 = OpCompositeExtract %3 %34 3 +%39 = OpCompositeExtract %4 %34 4 +%40 = OpCompositeExtract %4 %34 5 OpRayQueryInitializeKHR %31 %24 %35 %36 %39 %37 %40 %38 OpBranch %41 %41 = OpLabel @@ -116,7 +116,7 @@ OpBranch %41 %54 = OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR %6 %31 %50 %55 = OpRayQueryGetIntersectionGeometryIndexKHR %6 %31 %50 %56 = OpRayQueryGetIntersectionPrimitiveIndexKHR %6 %31 %50 -%57 = OpRayQueryGetIntersectionTKHR %4 %31 %50 +%57 = OpRayQueryGetIntersectionTKHR %3 %31 %50 %58 = OpRayQueryGetIntersectionBarycentricsKHR %7 %31 %50 %59 = OpRayQueryGetIntersectionFrontFaceKHR %8 %31 %50 %60 = OpRayQueryGetIntersectionObjectToWorldKHR %9 %31 %50 @@ -124,23 +124,23 @@ OpBranch %41 %62 = OpCompositeConstruct %10 %51 %57 %52 %53 %54 %55 %56 %58 %59 %60 %61 OpReturnValue %62 OpFunctionEnd -%66 = OpFunction %3 None %67 -%64 = OpFunctionParameter %3 +%66 = OpFunction %4 None %67 +%64 = OpFunctionParameter %4 %65 = OpFunctionParameter %10 %63 = OpLabel OpBranch %71 %71 = OpLabel %72 = OpCompositeExtract %9 %65 10 %73 = OpCompositeConstruct %14 %64 %68 -%74 = OpMatrixTimesVector %3 %72 %73 +%74 = OpMatrixTimesVector %4 %72 %73 %75 = OpVectorShuffle %7 %74 %74 0 1 %76 = OpExtInst %7 %1 Normalize %75 %77 = OpVectorTimesScalar %7 %76 %69 %78 = OpCompositeExtract %9 %65 9 %79 = OpCompositeConstruct %14 %77 %70 %68 -%80 = OpMatrixTimesVector %3 %78 %79 -%81 = OpFSub %3 %64 %80 -%82 = OpExtInst %3 %1 Normalize %81 +%80 = OpMatrixTimesVector %4 %78 %79 +%81 = OpFSub %4 %64 %80 +%82 = OpExtInst %4 %1 Normalize %81 OpReturnValue %82 OpFunctionEnd %84 = OpFunction %2 None %85 @@ -155,9 +155,9 @@ OpBranch %92 %97 = OpSelect %6 %96 %50 %88 %98 = OpAccessChain %94 %89 %88 OpStore %98 %97 -%100 = OpCompositeExtract %4 %93 1 -%101 = OpVectorTimesScalar %3 %91 %100 -%102 = OpFunctionCall %3 %66 %101 %93 +%100 = OpCompositeExtract %3 %93 1 +%101 = OpVectorTimesScalar %4 %91 %100 +%102 = OpFunctionCall %4 %66 %101 %93 %103 = OpAccessChain %99 %89 %50 OpStore %103 %102 OpReturn diff --git a/naga/tests/out/spv/shadow.spvasm b/naga/tests/out/spv/shadow.spvasm index 55bfa4fec0..d64bb2336b 100644 --- a/naga/tests/out/spv/shadow.spvasm +++ b/naga/tests/out/spv/shadow.spvasm @@ -108,26 +108,26 @@ OpDecorate %205 Location 0 OpDecorate %207 Location 1 OpDecorate %209 Location 0 %2 = OpTypeVoid -%5 = OpTypeFloat 32 -%4 = OpTypeVector %5 4 -%3 = OpTypeMatrix %4 4 -%7 = OpTypeInt 32 0 -%6 = OpTypeVector %7 4 -%8 = OpTypeStruct %3 %6 -%9 = OpTypeStruct %3 %4 -%10 = OpTypeVector %5 3 -%11 = OpTypeStruct %4 %10 %4 +%3 = OpTypeFloat 32 +%5 = OpTypeVector %3 4 +%4 = OpTypeMatrix %5 4 +%6 = OpTypeInt 32 0 +%7 = OpTypeVector %6 4 +%8 = OpTypeStruct %4 %7 +%9 = OpTypeStruct %4 %5 +%10 = OpTypeVector %3 3 +%11 = OpTypeStruct %5 %10 %5 %13 = OpTypeInt 32 1 %12 = OpTypeVector %13 4 %14 = OpTypeMatrix %10 3 -%15 = OpTypeStruct %3 %4 %4 +%15 = OpTypeStruct %4 %5 %5 %16 = OpTypeRuntimeArray %15 -%18 = OpConstant %7 10 +%18 = OpConstant %6 10 %17 = OpTypeArray %15 %18 -%19 = OpTypeImage %5 2D 1 1 0 1 Unknown +%19 = OpTypeImage %3 2D 1 1 0 1 Unknown %20 = OpTypeSampler -%21 = OpTypeVector %5 2 -%22 = OpConstant %5 0.05 +%21 = OpTypeVector %3 2 +%22 = OpConstant %3 0.05 %23 = OpConstantComposite %10 %22 %22 %22 %25 = OpTypeStruct %8 %26 = OpTypePointer Uniform %25 @@ -145,11 +145,11 @@ OpDecorate %209 Location 0 %36 = OpVariable %37 UniformConstant %39 = OpTypePointer UniformConstant %20 %38 = OpVariable %39 UniformConstant -%44 = OpTypeFunction %5 %7 %4 -%47 = OpConstant %5 0.0 -%48 = OpConstant %5 1.0 -%49 = OpConstant %5 0.5 -%50 = OpConstant %5 -0.5 +%44 = OpTypeFunction %3 %6 %5 +%47 = OpConstant %3 0.0 +%48 = OpConstant %3 1.0 +%49 = OpConstant %3 0.5 +%50 = OpConstant %3 -0.5 %51 = OpConstantComposite %21 %49 %50 %52 = OpConstantComposite %21 %49 %49 %55 = OpTypeBool @@ -157,70 +157,70 @@ OpDecorate %209 Location 0 %75 = OpTypePointer Input %12 %74 = OpVariable %75 Input %77 = OpVariable %75 Input -%80 = OpTypePointer Output %4 +%80 = OpTypePointer Output %5 %79 = OpVariable %80 Output %82 = OpTypePointer Output %10 %81 = OpVariable %82 Output %83 = OpVariable %80 Output %85 = OpTypeFunction %2 %86 = OpTypePointer Uniform %8 -%87 = OpConstant %7 0 +%87 = OpConstant %6 0 %89 = OpTypePointer Uniform %9 %92 = OpTypePointer Function %11 %93 = OpConstantNull %11 -%95 = OpTypePointer Uniform %3 +%95 = OpTypePointer Uniform %4 %102 = OpTypePointer Function %10 %110 = OpTypeVector %13 3 -%114 = OpConstant %7 1 -%116 = OpTypePointer Function %4 -%117 = OpConstant %7 2 -%125 = OpTypePointer Output %5 -%134 = OpTypePointer Input %4 +%114 = OpConstant %6 1 +%116 = OpTypePointer Function %5 +%117 = OpConstant %6 2 +%125 = OpTypePointer Output %3 +%134 = OpTypePointer Input %5 %133 = OpVariable %134 Input %137 = OpTypePointer Input %10 %136 = OpVariable %137 Input %139 = OpVariable %134 Input %141 = OpVariable %80 Output %145 = OpTypePointer StorageBuffer %16 -%151 = OpTypePointer Function %7 -%160 = OpTypePointer Uniform %6 -%161 = OpTypePointer Uniform %7 +%151 = OpTypePointer Function %6 +%160 = OpTypePointer Uniform %7 +%161 = OpTypePointer Uniform %6 %171 = OpTypePointer StorageBuffer %15 -%197 = OpTypePointer Uniform %4 +%197 = OpTypePointer Uniform %5 %203 = OpVariable %134 Input %205 = OpVariable %137 Input %207 = OpVariable %134 Input %209 = OpVariable %80 Output %213 = OpTypePointer Uniform %17 %236 = OpTypePointer Uniform %15 -%43 = OpFunction %5 None %44 -%41 = OpFunctionParameter %7 -%42 = OpFunctionParameter %4 +%43 = OpFunction %3 None %44 +%41 = OpFunctionParameter %6 +%42 = OpFunctionParameter %5 %40 = OpLabel %45 = OpLoad %19 %36 %46 = OpLoad %20 %38 OpBranch %53 %53 = OpLabel -%54 = OpCompositeExtract %5 %42 3 +%54 = OpCompositeExtract %3 %42 3 %56 = OpFOrdLessThanEqual %55 %54 %47 OpSelectionMerge %57 None OpBranchConditional %56 %58 %57 %58 = OpLabel OpReturnValue %48 %57 = OpLabel -%59 = OpCompositeExtract %5 %42 3 -%60 = OpFDiv %5 %48 %59 +%59 = OpCompositeExtract %3 %42 3 +%60 = OpFDiv %3 %48 %59 %61 = OpVectorShuffle %21 %42 %42 0 1 %62 = OpFMul %21 %61 %51 %63 = OpVectorTimesScalar %21 %62 %60 %64 = OpFAdd %21 %63 %52 %65 = OpBitcast %13 %41 -%66 = OpCompositeExtract %5 %42 2 -%67 = OpFMul %5 %66 %60 -%69 = OpConvertSToF %5 %65 +%66 = OpCompositeExtract %3 %42 2 +%67 = OpFMul %3 %66 %60 +%69 = OpConvertSToF %3 %65 %70 = OpCompositeConstruct %10 %64 %69 %71 = OpSampledImage %68 %45 %46 -%72 = OpImageSampleDrefExplicitLod %5 %71 %70 %67 Lod %47 +%72 = OpImageSampleDrefExplicitLod %3 %71 %70 %67 Lod %47 OpReturnValue %72 OpFunctionEnd %84 = OpFunction %2 None %85 @@ -233,16 +233,16 @@ OpFunctionEnd OpBranch %94 %94 = OpLabel %96 = OpAccessChain %95 %90 %87 -%97 = OpLoad %3 %96 +%97 = OpLoad %4 %96 %98 = OpAccessChain %95 %90 %87 -%99 = OpLoad %3 %98 -%100 = OpConvertSToF %4 %76 -%101 = OpMatrixTimesVector %4 %99 %100 -%103 = OpCompositeExtract %4 %97 0 +%99 = OpLoad %4 %98 +%100 = OpConvertSToF %5 %76 +%101 = OpMatrixTimesVector %5 %99 %100 +%103 = OpCompositeExtract %5 %97 0 %104 = OpVectorShuffle %10 %103 %103 0 1 2 -%105 = OpCompositeExtract %4 %97 1 +%105 = OpCompositeExtract %5 %97 1 %106 = OpVectorShuffle %10 %105 %105 0 1 2 -%107 = OpCompositeExtract %4 %97 2 +%107 = OpCompositeExtract %5 %97 2 %108 = OpVectorShuffle %10 %107 %107 0 1 2 %109 = OpCompositeConstruct %14 %104 %106 %108 %111 = OpVectorShuffle %110 %78 %78 0 1 2 @@ -253,20 +253,20 @@ OpStore %115 %113 %118 = OpAccessChain %116 %91 %117 OpStore %118 %101 %119 = OpAccessChain %95 %88 %87 -%120 = OpLoad %3 %119 -%121 = OpMatrixTimesVector %4 %120 %101 +%120 = OpLoad %4 %119 +%121 = OpMatrixTimesVector %5 %120 %101 %122 = OpAccessChain %116 %91 %87 OpStore %122 %121 %123 = OpLoad %11 %91 -%124 = OpCompositeExtract %4 %123 0 +%124 = OpCompositeExtract %5 %123 0 OpStore %79 %124 %126 = OpAccessChain %125 %79 %114 -%127 = OpLoad %5 %126 -%128 = OpFNegate %5 %127 +%127 = OpLoad %3 %126 +%128 = OpFNegate %3 %127 OpStore %126 %128 %129 = OpCompositeExtract %10 %123 1 OpStore %81 %129 -%130 = OpCompositeExtract %4 %123 2 +%130 = OpCompositeExtract %5 %123 2 OpStore %83 %130 OpReturn OpFunctionEnd @@ -274,9 +274,9 @@ OpFunctionEnd %131 = OpLabel %149 = OpVariable %102 Function %23 %150 = OpVariable %151 Function %87 -%135 = OpLoad %4 %133 +%135 = OpLoad %5 %133 %138 = OpLoad %10 %136 -%140 = OpLoad %4 %139 +%140 = OpLoad %5 %139 %132 = OpCompositeConstruct %11 %135 %138 %140 %143 = OpAccessChain %86 %24 %87 %144 = OpAccessChain %89 %27 %87 @@ -292,10 +292,10 @@ OpBranch %155 OpLoopMerge %156 %158 None OpBranch %157 %157 = OpLabel -%159 = OpLoad %7 %150 +%159 = OpLoad %6 %150 %162 = OpAccessChain %161 %143 %114 %87 -%163 = OpLoad %7 %162 -%164 = OpExtInst %7 %1 UMin %163 %18 +%163 = OpLoad %6 %162 +%164 = OpExtInst %6 %1 UMin %163 %18 %165 = OpULessThan %55 %159 %164 OpSelectionMerge %166 None OpBranchConditional %165 %166 %167 @@ -304,24 +304,24 @@ OpBranch %156 %166 = OpLabel OpBranch %168 %168 = OpLabel -%170 = OpLoad %7 %150 +%170 = OpLoad %6 %150 %172 = OpAccessChain %171 %146 %170 %173 = OpLoad %15 %172 -%174 = OpLoad %7 %150 -%175 = OpCompositeExtract %3 %173 0 -%176 = OpCompositeExtract %4 %132 2 -%177 = OpMatrixTimesVector %4 %175 %176 -%178 = OpFunctionCall %5 %43 %174 %177 -%179 = OpCompositeExtract %4 %173 1 +%174 = OpLoad %6 %150 +%175 = OpCompositeExtract %4 %173 0 +%176 = OpCompositeExtract %5 %132 2 +%177 = OpMatrixTimesVector %5 %175 %176 +%178 = OpFunctionCall %3 %43 %174 %177 +%179 = OpCompositeExtract %5 %173 1 %180 = OpVectorShuffle %10 %179 %179 0 1 2 -%181 = OpCompositeExtract %4 %132 2 +%181 = OpCompositeExtract %5 %132 2 %182 = OpVectorShuffle %10 %181 %181 0 1 2 %183 = OpFSub %10 %180 %182 %184 = OpExtInst %10 %1 Normalize %183 -%185 = OpDot %5 %154 %184 -%186 = OpExtInst %5 %1 FMax %47 %185 -%187 = OpFMul %5 %178 %186 -%188 = OpCompositeExtract %4 %173 2 +%185 = OpDot %3 %154 %184 +%186 = OpExtInst %3 %1 FMax %47 %185 +%187 = OpFMul %3 %178 %186 +%188 = OpCompositeExtract %5 %173 2 %189 = OpVectorShuffle %10 %188 %188 0 1 2 %190 = OpVectorTimesScalar %10 %189 %187 %191 = OpLoad %10 %149 @@ -331,16 +331,16 @@ OpBranch %169 %169 = OpLabel OpBranch %158 %158 = OpLabel -%193 = OpLoad %7 %150 -%194 = OpIAdd %7 %193 %114 +%193 = OpLoad %6 %150 +%194 = OpIAdd %6 %193 %114 OpStore %150 %194 OpBranch %155 %156 = OpLabel %195 = OpLoad %10 %149 -%196 = OpCompositeConstruct %4 %195 %48 +%196 = OpCompositeConstruct %5 %195 %48 %198 = OpAccessChain %197 %144 %114 -%199 = OpLoad %4 %198 -%200 = OpFMul %4 %196 %199 +%199 = OpLoad %5 %198 +%200 = OpFMul %5 %196 %199 OpStore %141 %200 OpReturn OpFunctionEnd @@ -348,9 +348,9 @@ OpFunctionEnd %201 = OpLabel %217 = OpVariable %102 Function %23 %218 = OpVariable %151 Function %87 -%204 = OpLoad %4 %203 +%204 = OpLoad %5 %203 %206 = OpLoad %10 %205 -%208 = OpLoad %4 %207 +%208 = OpLoad %5 %207 %202 = OpCompositeConstruct %11 %204 %206 %208 %211 = OpAccessChain %86 %24 %87 %212 = OpAccessChain %89 %27 %87 @@ -366,10 +366,10 @@ OpBranch %222 OpLoopMerge %223 %225 None OpBranch %224 %224 = OpLabel -%226 = OpLoad %7 %218 +%226 = OpLoad %6 %218 %227 = OpAccessChain %161 %211 %114 %87 -%228 = OpLoad %7 %227 -%229 = OpExtInst %7 %1 UMin %228 %18 +%228 = OpLoad %6 %227 +%229 = OpExtInst %6 %1 UMin %228 %18 %230 = OpULessThan %55 %226 %229 OpSelectionMerge %231 None OpBranchConditional %230 %231 %232 @@ -378,24 +378,24 @@ OpBranch %223 %231 = OpLabel OpBranch %233 %233 = OpLabel -%235 = OpLoad %7 %218 +%235 = OpLoad %6 %218 %237 = OpAccessChain %236 %214 %235 %238 = OpLoad %15 %237 -%239 = OpLoad %7 %218 -%240 = OpCompositeExtract %3 %238 0 -%241 = OpCompositeExtract %4 %202 2 -%242 = OpMatrixTimesVector %4 %240 %241 -%243 = OpFunctionCall %5 %43 %239 %242 -%244 = OpCompositeExtract %4 %238 1 +%239 = OpLoad %6 %218 +%240 = OpCompositeExtract %4 %238 0 +%241 = OpCompositeExtract %5 %202 2 +%242 = OpMatrixTimesVector %5 %240 %241 +%243 = OpFunctionCall %3 %43 %239 %242 +%244 = OpCompositeExtract %5 %238 1 %245 = OpVectorShuffle %10 %244 %244 0 1 2 -%246 = OpCompositeExtract %4 %202 2 +%246 = OpCompositeExtract %5 %202 2 %247 = OpVectorShuffle %10 %246 %246 0 1 2 %248 = OpFSub %10 %245 %247 %249 = OpExtInst %10 %1 Normalize %248 -%250 = OpDot %5 %221 %249 -%251 = OpExtInst %5 %1 FMax %47 %250 -%252 = OpFMul %5 %243 %251 -%253 = OpCompositeExtract %4 %238 2 +%250 = OpDot %3 %221 %249 +%251 = OpExtInst %3 %1 FMax %47 %250 +%252 = OpFMul %3 %243 %251 +%253 = OpCompositeExtract %5 %238 2 %254 = OpVectorShuffle %10 %253 %253 0 1 2 %255 = OpVectorTimesScalar %10 %254 %252 %256 = OpLoad %10 %217 @@ -405,16 +405,16 @@ OpBranch %234 %234 = OpLabel OpBranch %225 %225 = OpLabel -%258 = OpLoad %7 %218 -%259 = OpIAdd %7 %258 %114 +%258 = OpLoad %6 %218 +%259 = OpIAdd %6 %258 %114 OpStore %218 %259 OpBranch %222 %223 = OpLabel %260 = OpLoad %10 %217 -%261 = OpCompositeConstruct %4 %260 %48 +%261 = OpCompositeConstruct %5 %260 %48 %262 = OpAccessChain %197 %212 %114 -%263 = OpLoad %4 %262 -%264 = OpFMul %4 %261 %263 +%263 = OpLoad %5 %262 +%264 = OpFMul %5 %261 %263 OpStore %209 %264 OpReturn OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/spv/struct-layout.spvasm b/naga/tests/out/spv/struct-layout.spvasm index fccaef5269..42899de9b5 100644 --- a/naga/tests/out/spv/struct-layout.spvasm +++ b/naga/tests/out/spv/struct-layout.spvasm @@ -52,11 +52,11 @@ OpDecorate %73 Location 1 OpDecorate %75 Location 2 OpDecorate %77 BuiltIn Position %2 = OpTypeVoid -%4 = OpTypeFloat 32 -%3 = OpTypeVector %4 3 -%5 = OpTypeStruct %3 %4 -%6 = OpTypeVector %4 4 -%7 = OpTypeStruct %4 %3 %4 +%3 = OpTypeFloat 32 +%4 = OpTypeVector %3 3 +%5 = OpTypeStruct %4 %3 +%6 = OpTypeVector %3 4 +%7 = OpTypeStruct %3 %4 %3 %9 = OpTypeStruct %5 %10 = OpTypePointer Uniform %9 %8 = OpVariable %10 Uniform @@ -69,14 +69,14 @@ OpDecorate %77 BuiltIn Position %18 = OpTypeStruct %7 %19 = OpTypePointer StorageBuffer %18 %17 = OpVariable %19 StorageBuffer -%23 = OpTypePointer Input %3 +%23 = OpTypePointer Input %4 %22 = OpVariable %23 Input -%26 = OpTypePointer Input %4 +%26 = OpTypePointer Input %3 %25 = OpVariable %26 Input %29 = OpTypePointer Output %6 %28 = OpVariable %29 Output %31 = OpTypeFunction %2 -%32 = OpConstant %4 0.0 +%32 = OpConstant %3 0.0 %33 = OpConstantComposite %6 %32 %32 %32 %32 %37 = OpVariable %23 Input %39 = OpVariable %26 Input @@ -101,8 +101,8 @@ OpDecorate %77 BuiltIn Position %88 = OpConstantNull %7 %30 = OpFunction %2 None %31 %20 = OpLabel -%24 = OpLoad %3 %22 -%27 = OpLoad %4 %25 +%24 = OpLoad %4 %22 +%27 = OpLoad %3 %25 %21 = OpCompositeConstruct %5 %24 %27 OpBranch %34 %34 = OpLabel @@ -111,8 +111,8 @@ OpReturn OpFunctionEnd %42 = OpFunction %2 None %31 %35 = OpLabel -%38 = OpLoad %3 %37 -%40 = OpLoad %4 %39 +%38 = OpLoad %4 %37 +%40 = OpLoad %3 %39 %36 = OpCompositeConstruct %5 %38 %40 OpBranch %43 %43 = OpLabel @@ -134,9 +134,9 @@ OpReturn OpFunctionEnd %67 = OpFunction %2 None %31 %58 = OpLabel -%61 = OpLoad %4 %60 -%63 = OpLoad %3 %62 -%65 = OpLoad %4 %64 +%61 = OpLoad %3 %60 +%63 = OpLoad %4 %62 +%65 = OpLoad %3 %64 %59 = OpCompositeConstruct %7 %61 %63 %65 OpBranch %68 %68 = OpLabel @@ -145,9 +145,9 @@ OpReturn OpFunctionEnd %78 = OpFunction %2 None %31 %69 = OpLabel -%72 = OpLoad %4 %71 -%74 = OpLoad %3 %73 -%76 = OpLoad %4 %75 +%72 = OpLoad %3 %71 +%74 = OpLoad %4 %73 +%76 = OpLoad %3 %75 %70 = OpCompositeConstruct %7 %72 %74 %76 OpBranch %79 %79 = OpLabel diff --git a/naga/tests/out/wgsl/abstract-types-var.wgsl b/naga/tests/out/wgsl/abstract-types-var.wgsl index 0533f19442..596595ba44 100644 --- a/naga/tests/out/wgsl/abstract-types-var.wgsl +++ b/naga/tests/out/wgsl/abstract-types-var.wgsl @@ -22,6 +22,17 @@ var xafpaiai_1: array = array(1i, 2i); var xafpaiaf_1: array = array(1f, 2f); var xafpafai_1: array = array(1f, 2f); var xafpafaf_1: array = array(1f, 2f); +var ivispai: vec2 = vec2(1i); +var ivfspaf: vec2 = vec2(1f); +var ivis_ai: vec2 = vec2(1i); +var ivus_ai: vec2 = vec2(1u); +var ivfs_ai: vec2 = vec2(1f); +var ivfs_af: vec2 = vec2(1f); +var iafafaf: array = array(1f, 2f); +var iafaiai: array = array(1f, 2f); +var iafpafaf: array = array(1f, 2f); +var iafpaiaf: array = array(1f, 2f); +var iafpafai: array = array(1f, 2f); fn all_constant_arguments() { var xvipaiai: vec2 = vec2(42i, 43i); diff --git a/naga/tests/out/wgsl/binding-arrays.wgsl b/naga/tests/out/wgsl/binding-arrays.wgsl index 86bcfc1bff..5bed8ef007 100644 --- a/naga/tests/out/wgsl/binding-arrays.wgsl +++ b/naga/tests/out/wgsl/binding-arrays.wgsl @@ -34,8 +34,8 @@ fn main(fragment_in: FragmentIn) -> @location(0) vec4 { let uniform_index = uni.index; let non_uniform_index = fragment_in.index; - let uv = vec2(0f); - let pix = vec2(0i); + const uv = vec2(0f); + const pix = vec2(0i); let _e21 = textureDimensions(texture_array_unbounded[0]); let _e22 = u2_; u2_ = (_e22 + _e21); diff --git a/naga/tests/out/wgsl/const_assert.wgsl b/naga/tests/out/wgsl/const_assert.wgsl new file mode 100644 index 0000000000..fe1c1c02f4 --- /dev/null +++ b/naga/tests/out/wgsl/const_assert.wgsl @@ -0,0 +1,7 @@ +const x: i32 = 1i; +const y: i32 = 2i; + +fn foo() { + return; +} + diff --git a/naga/tests/out/wgsl/constructors.wgsl b/naga/tests/out/wgsl/constructors.wgsl index 0e5eec734a..6d9d7e2f5d 100644 --- a/naga/tests/out/wgsl/constructors.wgsl +++ b/naga/tests/out/wgsl/constructors.wgsl @@ -21,11 +21,11 @@ fn main() { var foo: Foo; foo = Foo(vec4(1f), 1i); - let m0_ = mat2x2(vec2(1f, 0f), vec2(0f, 1f)); - let m1_ = mat4x4(vec4(1f, 0f, 0f, 0f), vec4(0f, 1f, 0f, 0f), vec4(0f, 0f, 1f, 0f), vec4(0f, 0f, 0f, 1f)); - let cit0_ = vec2(0u); - let cit1_ = mat2x2(vec2(0f), vec2(0f)); - let cit2_ = array(0i, 1i, 2i, 3i); - let ic4_ = vec2(0u, 0u); - let ic5_ = mat2x3(vec3(0f, 0f, 0f), vec3(0f, 0f, 0f)); + const m0_ = mat2x2(vec2(1f, 0f), vec2(0f, 1f)); + const m1_ = mat4x4(vec4(1f, 0f, 0f, 0f), vec4(0f, 1f, 0f, 0f), vec4(0f, 0f, 1f, 0f), vec4(0f, 0f, 0f, 1f)); + const cit0_ = vec2(0u); + const cit1_ = mat2x2(vec2(0f), vec2(0f)); + const cit2_ = array(0i, 1i, 2i, 3i); + const ic4_ = vec2(0u, 0u); + const ic5_ = mat2x3(vec3(0f, 0f, 0f), vec3(0f, 0f, 0f)); } diff --git a/naga/tests/out/wgsl/cross.wgsl b/naga/tests/out/wgsl/cross.wgsl new file mode 100644 index 0000000000..2e213aa9c4 --- /dev/null +++ b/naga/tests/out/wgsl/cross.wgsl @@ -0,0 +1,4 @@ +@compute @workgroup_size(1, 1, 1) +fn main() { + let a = cross(vec3(0f, 1f, 2f), vec3(0f, 1f, 2f)); +} diff --git a/naga/tests/out/wgsl/expressions.frag.wgsl b/naga/tests/out/wgsl/expressions.frag.wgsl index 0ba5962ab2..ec53847d5f 100644 --- a/naga/tests/out/wgsl/expressions.frag.wgsl +++ b/naga/tests/out/wgsl/expressions.frag.wgsl @@ -268,12 +268,12 @@ fn testUnaryOpMat(a_16: mat3x3) { let _e3 = a_17; v_8 = -(_e3); let _e5 = a_17; - let _e7 = vec3(1f); + const _e7 = vec3(1f); let _e9 = (_e5 - mat3x3(_e7, _e7, _e7)); a_17 = _e9; v_8 = _e9; let _e10 = a_17; - let _e12 = vec3(1f); + const _e12 = vec3(1f); a_17 = (_e10 - mat3x3(_e12, _e12, _e12)); v_8 = _e10; return; diff --git a/naga/tests/out/wgsl/functions.wgsl b/naga/tests/out/wgsl/functions.wgsl index 79f000ce22..db7b81b146 100644 --- a/naga/tests/out/wgsl/functions.wgsl +++ b/naga/tests/out/wgsl/functions.wgsl @@ -1,16 +1,16 @@ fn test_fma() -> vec2 { - let a = vec2(2f, 2f); - let b = vec2(0.5f, 0.5f); - let c = vec2(0.5f, 0.5f); + const a = vec2(2f, 2f); + const b = vec2(0.5f, 0.5f); + const c = vec2(0.5f, 0.5f); return fma(a, b, c); } fn test_integer_dot_product() -> i32 { - let a_2_ = vec2(1i); - let b_2_ = vec2(1i); + const a_2_ = vec2(1i); + const b_2_ = vec2(1i); let c_2_ = dot(a_2_, b_2_); - let a_3_ = vec3(1u); - let b_3_ = vec3(1u); + const a_3_ = vec3(1u); + const b_3_ = vec3(1u); let c_3_ = dot(a_3_, b_3_); let c_4_ = dot(vec4(4i), vec4(2i)); return c_4_; diff --git a/naga/tests/out/wgsl/image.wgsl b/naga/tests/out/wgsl/image.wgsl index 008b4c20c1..a680e70aba 100644 --- a/naga/tests/out/wgsl/image.wgsl +++ b/naga/tests/out/wgsl/image.wgsl @@ -110,8 +110,8 @@ fn levels_queries() -> @builtin(position) vec4 { fn texture_sample() -> @location(0) vec4 { var a: vec4; - let tc = vec2(0.5f); - let tc3_ = vec3(0.5f); + const tc = vec2(0.5f); + const tc3_ = vec3(0.5f); let _e9 = textureSample(image_1d, sampler_reg, tc.x); let _e10 = a; a = (_e10 + _e9); @@ -186,8 +186,8 @@ fn texture_sample() -> @location(0) vec4 { fn texture_sample_comparison() -> @location(0) f32 { var a_1: f32; - let tc_1 = vec2(0.5f); - let tc3_1 = vec3(0.5f); + const tc_1 = vec2(0.5f); + const tc3_1 = vec3(0.5f); let _e8 = textureSampleCompare(image_2d_depth, sampler_cmp, tc_1, 0.5f); let _e9 = a_1; a_1 = (_e9 + _e8); @@ -218,7 +218,7 @@ fn texture_sample_comparison() -> @location(0) f32 { @fragment fn gather() -> @location(0) vec4 { - let tc_2 = vec2(0.5f); + const tc_2 = vec2(0.5f); let s2d = textureGather(1, image_2d, sampler_reg, tc_2); let s2d_offset = textureGather(3, image_2d, sampler_reg, tc_2, vec2(3i, 1i)); let s2d_depth = textureGatherCompare(image_2d_depth, sampler_cmp, tc_2, 0.5f); @@ -231,7 +231,7 @@ fn gather() -> @location(0) vec4 { @fragment fn depth_no_comparison() -> @location(0) vec4 { - let tc_3 = vec2(0.5f); + const tc_3 = vec2(0.5f); let s2d_1 = textureSample(image_2d_depth, sampler_reg, tc_3); let s2d_gather = textureGather(image_2d_depth, sampler_reg, tc_3); return (vec4(s2d_1) + s2d_gather); diff --git a/naga/tests/out/wgsl/interpolate.wgsl b/naga/tests/out/wgsl/interpolate.wgsl index 402e60cef5..adb90568e4 100644 --- a/naga/tests/out/wgsl/interpolate.wgsl +++ b/naga/tests/out/wgsl/interpolate.wgsl @@ -1,12 +1,16 @@ struct FragmentInput { @builtin(position) position: vec4, @location(0) @interpolate(flat) _flat: u32, - @location(1) @interpolate(linear) _linear: f32, - @location(2) @interpolate(linear, centroid) linear_centroid: vec2, - @location(3) @interpolate(linear, sample) linear_sample: vec3, - @location(4) perspective: vec4, - @location(5) @interpolate(perspective, centroid) perspective_centroid: f32, - @location(6) @interpolate(perspective, sample) perspective_sample: f32, + @location(1) @interpolate(flat, first) flat_first: u32, + @location(2) @interpolate(flat, either) flat_either: u32, + @location(3) @interpolate(linear) _linear: f32, + @location(4) @interpolate(linear, centroid) linear_centroid: vec2, + @location(6) @interpolate(linear, sample) linear_sample: vec3, + @location(7) @interpolate(linear) linear_center: vec3, + @location(8) perspective: vec4, + @location(9) @interpolate(perspective, centroid) perspective_centroid: f32, + @location(10) @interpolate(perspective, sample) perspective_sample: f32, + @location(11) perspective_center: f32, } @vertex @@ -15,14 +19,18 @@ fn vert_main() -> FragmentInput { out.position = vec4(2f, 4f, 5f, 6f); out._flat = 8u; + out.flat_first = 9u; + out.flat_either = 10u; out._linear = 27f; out.linear_centroid = vec2(64f, 125f); out.linear_sample = vec3(216f, 343f, 512f); + out.linear_center = vec3(255f, 511f, 1024f); out.perspective = vec4(729f, 1000f, 1331f, 1728f); out.perspective_centroid = 2197f; out.perspective_sample = 2744f; - let _e30 = out; - return _e30; + out.perspective_center = 2812f; + let _e41 = out; + return _e41; } @fragment diff --git a/naga/tests/out/wgsl/interpolate_compat.wgsl b/naga/tests/out/wgsl/interpolate_compat.wgsl new file mode 100644 index 0000000000..aeaac63547 --- /dev/null +++ b/naga/tests/out/wgsl/interpolate_compat.wgsl @@ -0,0 +1,37 @@ +struct FragmentInput { + @builtin(position) position: vec4, + @location(0) @interpolate(flat) _flat: u32, + @location(2) @interpolate(flat, either) flat_either: u32, + @location(3) @interpolate(linear) _linear: f32, + @location(4) @interpolate(linear, centroid) linear_centroid: vec2, + @location(6) @interpolate(linear, sample) linear_sample: vec3, + @location(7) @interpolate(linear) linear_center: vec3, + @location(8) perspective: vec4, + @location(9) @interpolate(perspective, centroid) perspective_centroid: f32, + @location(10) @interpolate(perspective, sample) perspective_sample: f32, + @location(11) perspective_center: f32, +} + +@vertex +fn vert_main() -> FragmentInput { + var out: FragmentInput; + + out.position = vec4(2f, 4f, 5f, 6f); + out._flat = 8u; + out.flat_either = 10u; + out._linear = 27f; + out.linear_centroid = vec2(64f, 125f); + out.linear_sample = vec3(216f, 343f, 512f); + out.linear_center = vec3(255f, 511f, 1024f); + out.perspective = vec4(729f, 1000f, 1331f, 1728f); + out.perspective_centroid = 2197f; + out.perspective_sample = 2744f; + out.perspective_center = 2812f; + let _e39 = out; + return _e39; +} + +@fragment +fn frag_main(val: FragmentInput) { + return; +} diff --git a/naga/tests/out/wgsl/local-const.wgsl b/naga/tests/out/wgsl/local-const.wgsl new file mode 100644 index 0000000000..587f5a8e54 --- /dev/null +++ b/naga/tests/out/wgsl/local-const.wgsl @@ -0,0 +1,11 @@ +const ga: i32 = 4i; +const gb: i32 = 4i; +const gc: u32 = 4u; +const gd: f32 = 4f; +const ge: vec3 = vec3(4i, 4i, 4i); +const gf: f32 = 2f; + +fn const_in_fn() { + const e = vec3(4i, 4i, 4i); +} + diff --git a/naga/tests/out/wgsl/math-functions.wgsl b/naga/tests/out/wgsl/math-functions.wgsl index 2271bb9cb0..732f7acdcf 100644 --- a/naga/tests/out/wgsl/math-functions.wgsl +++ b/naga/tests/out/wgsl/math-functions.wgsl @@ -1,25 +1,25 @@ @fragment fn main() { - let v = vec4(0f); + const v = vec4(0f); let a = degrees(1f); let b = radians(1f); let c = degrees(v); let d = radians(v); let e = saturate(v); let g = refract(v, v, 1f); - let sign_b = vec4(-1i, -1i, -1i, -1i); - let sign_d = vec4(-1f, -1f, -1f, -1f); + const sign_b = vec4(-1i, -1i, -1i, -1i); + const sign_d = vec4(-1f, -1f, -1f, -1f); let const_dot = dot(vec2(), vec2()); - let flb_b = vec2(-1i, -1i); - let flb_c = vec2(0u, 0u); - let ftb_c = vec2(0i, 0i); - let ftb_d = vec2(0u, 0u); - let ctz_e = vec2(32u, 32u); - let ctz_f = vec2(32i, 32i); - let ctz_g = vec2(0u, 0u); - let ctz_h = vec2(0i, 0i); - let clz_c = vec2(0i, 0i); - let clz_d = vec2(31u, 31u); + const flb_b = vec2(-1i, -1i); + const flb_c = vec2(0u, 0u); + const ftb_c = vec2(0i, 0i); + const ftb_d = vec2(0u, 0u); + const ctz_e = vec2(32u, 32u); + const ctz_f = vec2(32i, 32i); + const ctz_g = vec2(0u, 0u); + const ctz_h = vec2(0i, 0i); + const clz_c = vec2(0i, 0i); + const clz_d = vec2(31u, 31u); let lde_a = ldexp(1f, 2i); let lde_b = ldexp(vec2(1f, 2f), vec2(3i, 4i)); let modf_a = modf(1.5f); diff --git a/naga/tests/out/wgsl/operators.wgsl b/naga/tests/out/wgsl/operators.wgsl index dbf39556de..2194a01df1 100644 --- a/naga/tests/out/wgsl/operators.wgsl +++ b/naga/tests/out/wgsl/operators.wgsl @@ -11,7 +11,7 @@ fn builtins() -> vec4 { let m2_ = mix(v_f32_zero, v_f32_one, 0.1f); let b1_ = bitcast(1i); let b2_ = bitcast>(v_i32_one); - let v_i32_zero = vec4(0i, 0i, 0i, 0i); + const v_i32_zero = vec4(0i, 0i, 0i, 0i); return (((((vec4((vec4(s1_) + v_i32_zero)) + s2_) + m1_) + m2_) + vec4(b1_)) + b2_); } @@ -40,8 +40,8 @@ fn bool_cast(x: vec3) -> vec3 { } fn logical() { - let neg0_ = !(true); - let neg1_ = !(vec2(true)); + const neg0_ = !(true); + const neg1_ = !(vec2(true)); let or = (true || false); let and = (true && false); let bitwise_or0_ = (true | false); @@ -51,9 +51,9 @@ fn logical() { } fn arithmetic() { - let neg0_1 = -(1f); - let neg1_1 = -(vec2(1i)); - let neg2_ = -(vec2(1f)); + const neg0_1 = -(1f); + const neg1_1 = -(vec2(1i)); + const neg2_ = -(vec2(1f)); let add0_ = (2i + 1i); let add1_ = (2u + 1u); let add2_ = (2f + 1f); @@ -126,10 +126,10 @@ fn arithmetic() { } fn bit() { - let flip0_ = ~(1i); - let flip1_ = ~(1u); - let flip2_ = ~(vec2(1i)); - let flip3_ = ~(vec3(1u)); + const flip0_ = ~(1i); + const flip1_ = ~(1u); + const flip2_ = ~(vec2(1i)); + const flip3_ = ~(vec3(1u)); let or0_ = (2i | 1i); let or1_ = (2u | 1u); let or2_ = (vec2(2i) | vec2(1i)); @@ -230,14 +230,14 @@ fn assignment() { } fn negation_avoids_prefix_decrement() { - let p0_ = -(1i); - let p1_ = -(-(1i)); - let p2_ = -(-(1i)); - let p3_ = -(-(1i)); - let p4_ = -(-(-(1i))); - let p5_ = -(-(-(-(1i)))); - let p6_ = -(-(-(-(-(1i))))); - let p7_ = -(-(-(-(-(1i))))); + const p0_ = -(1i); + const p1_ = -(-(1i)); + const p2_ = -(-(1i)); + const p3_ = -(-(1i)); + const p4_ = -(-(-(1i))); + const p5_ = -(-(-(-(1i)))); + const p6_ = -(-(-(-(-(1i))))); + const p7_ = -(-(-(-(-(1i))))); } @compute @workgroup_size(1, 1, 1) diff --git a/naga/tests/out/wgsl/prepostfix.frag.wgsl b/naga/tests/out/wgsl/prepostfix.frag.wgsl index d2c59a0dd9..15916303bb 100644 --- a/naga/tests/out/wgsl/prepostfix.frag.wgsl +++ b/naga/tests/out/wgsl/prepostfix.frag.wgsl @@ -21,11 +21,11 @@ fn main_1() { vec = _e21; vec_target = _e21; let _e32 = mat; - let _e34 = vec3(1f); + const _e34 = vec3(1f); mat = (_e32 + mat4x3(_e34, _e34, _e34, _e34)); mat_target = _e32; let _e37 = mat; - let _e39 = vec3(1f); + const _e39 = vec3(1f); let _e41 = (_e37 - mat4x3(_e39, _e39, _e39, _e39)); mat = _e41; mat_target = _e41; diff --git a/naga/tests/out/wgsl/shadow.wgsl b/naga/tests/out/wgsl/shadow.wgsl index e9d5bbf1be..8b198d2ed1 100644 --- a/naga/tests/out/wgsl/shadow.wgsl +++ b/naga/tests/out/wgsl/shadow.wgsl @@ -40,7 +40,7 @@ fn fetch_shadow(light_id: u32, homogeneous_coords: vec4) -> f32 { if (homogeneous_coords.w <= 0f) { return 1f; } - let flip_correction = vec2(0.5f, -0.5f); + const flip_correction = vec2(0.5f, -0.5f); let proj_correction = (1f / homogeneous_coords.w); let light_local = (((homogeneous_coords.xy * flip_correction) * proj_correction) + vec2(0.5f, 0.5f)); let _e24 = textureSampleCompareLevel(t_shadow, sampler_shadow, light_local, i32(light_id), (homogeneous_coords.z * proj_correction)); diff --git a/naga/tests/out/wgsl/type-alias.wgsl b/naga/tests/out/wgsl/type-alias.wgsl index fe3cf79037..13bfcba82c 100644 --- a/naga/tests/out/wgsl/type-alias.wgsl +++ b/naga/tests/out/wgsl/type-alias.wgsl @@ -1,10 +1,10 @@ fn main() { - let a = vec3(0f, 0f, 0f); - let c = vec3(0f); - let b = vec3(vec2(0f), 0f); - let d = vec3(vec2(0f), 0f); - let e = vec3(d); - let f = mat2x2(vec2(1f, 2f), vec2(3f, 4f)); - let g = mat3x3(a, a, a); + const a = vec3(0f, 0f, 0f); + const c = vec3(0f); + const b = vec3(vec2(0f), 0f); + const d = vec3(vec2(0f), 0f); + const e = vec3(d); + const f = mat2x2(vec2(1f, 2f), vec2(3f, 4f)); + const g = mat3x3(a, a, a); } diff --git a/naga/tests/snapshots.rs b/naga/tests/snapshots.rs index f75a343a4b..14a15b383b 100644 --- a/naga/tests/snapshots.rs +++ b/naga/tests/snapshots.rs @@ -14,8 +14,12 @@ const BASE_DIR_OUT: &str = "tests/out"; bitflags::bitflags! { #[derive(Clone, Copy)] struct Targets: u32 { + /// A serialization of the `naga::Module`, in RON format. const IR = 1; + + /// A serialization of the `naga::valid::ModuleInfo`, in RON format. const ANALYSIS = 1 << 1; + const SPIRV = 1 << 2; const METAL = 1 << 3; const GLSL = 1 << 4; @@ -354,6 +358,10 @@ fn check_targets( let debug_info = source_code.map(|code| naga::back::spv::DebugInfo { source_code: code, file_name: name.as_ref(), + // wgpu#6266: we technically know all the information here to + // produce the valid language but it's not too important for + // validation purposes + language: naga::back::spv::SourceLanguage::Unknown, }); if targets.contains(Targets::SPIRV) { @@ -745,6 +753,10 @@ fn convert_wgsl() { ("functions-webgl", Targets::GLSL), ( "interpolate", + Targets::SPIRV | Targets::METAL | Targets::HLSL | Targets::WGSL, + ), + ( + "interpolate_compat", Targets::SPIRV | Targets::METAL | Targets::GLSL | Targets::HLSL | Targets::WGSL, ), ( @@ -822,6 +834,7 @@ fn convert_wgsl() { "use-gl-ext-over-grad-workaround-if-instructed", Targets::GLSL, ), + ("local-const", Targets::IR | Targets::WGSL), ( "math-functions", Targets::SPIRV | Targets::METAL | Targets::GLSL | Targets::HLSL | Targets::WGSL, @@ -867,6 +880,7 @@ fn convert_wgsl() { "const-exprs", Targets::SPIRV | Targets::METAL | Targets::GLSL | Targets::HLSL | Targets::WGSL, ), + ("const_assert", Targets::WGSL | Targets::IR), ("separate-entry-points", Targets::SPIRV | Targets::GLSL), ( "struct-layout", @@ -914,6 +928,10 @@ fn convert_wgsl() { Targets::IR | Targets::SPIRV | Targets::METAL, ), ("vertex-pulling-transform", Targets::METAL), + ( + "cross", + Targets::SPIRV | Targets::METAL | Targets::GLSL | Targets::HLSL | Targets::WGSL, + ), ]; for &(name, targets) in inputs.iter() { diff --git a/naga/tests/validation.rs b/naga/tests/validation.rs index f64b408841..f20ae688b8 100644 --- a/naga/tests/validation.rs +++ b/naga/tests/validation.rs @@ -260,3 +260,239 @@ fn emit_workgroup_uniform_load_result() { variant(true).expect("module should validate"); assert!(variant(false).is_err()); } + +#[cfg(feature = "wgsl-in")] +#[test] +fn bad_cross_builtin_args() { + // NOTE: Things we expect to actually compile are in the `cross` snapshot test. + let cases = [ + ( + "vec2(0., 1.)", + "\ +error: Entry point main at Compute is invalid + ┌─ wgsl:3:13 + │ +3 │ let a = cross(vec2(0., 1.), vec2(0., 1.)); + │ ^^^^^ naga::Expression [6] + │ + = Expression [6] is invalid + = Argument [0] to Cross as expression [2] has an invalid type. + +", + ), + ( + "vec4(0., 1., 2., 3.)", + "\ +error: Entry point main at Compute is invalid + ┌─ wgsl:3:13 + │ +3 │ let a = cross(vec4(0., 1., 2., 3.), vec4(0., 1., 2., 3.)); + │ ^^^^^ naga::Expression [10] + │ + = Expression [10] is invalid + = Argument [0] to Cross as expression [4] has an invalid type. + +", + ), + ]; + + for (invalid_arg, expected_err) in cases { + let source = format!( + "\ +@compute @workgroup_size(1) +fn main() {{ + let a = cross({invalid_arg}, {invalid_arg}); +}} +" + ); + let module = naga::front::wgsl::parse_str(&source).unwrap(); + let err = valid::Validator::new(Default::default(), valid::Capabilities::all()) + .validate_no_overrides(&module) + .expect_err("module should be invalid"); + assert_eq!(err.emit_to_string(&source), expected_err); + } +} + +#[cfg(feature = "wgsl-in")] +#[test] +fn incompatible_interpolation_and_sampling_types() { + use dummy_interpolation_shader::DummyInterpolationShader; + + // NOTE: Things we expect to actually compile are in the `interpolate` snapshot test. + use itertools::Itertools; + + let invalid_shader_module = |interpolation_and_sampling| { + let (interpolation, sampling) = interpolation_and_sampling; + + let valid = matches!( + (interpolation, sampling), + (_, None) + | ( + naga::Interpolation::Perspective | naga::Interpolation::Linear, + Some( + naga::Sampling::Center | naga::Sampling::Centroid | naga::Sampling::Sample + ), + ) + | ( + naga::Interpolation::Flat, + Some(naga::Sampling::First | naga::Sampling::Either) + ) + ); + + if valid { + None + } else { + let DummyInterpolationShader { + source, + module, + interpolate_attr, + entry_point: _, + } = DummyInterpolationShader::new(interpolation, sampling); + Some(( + source, + module, + interpolation, + sampling.expect("default interpolation sampling should be valid"), + interpolate_attr, + )) + } + }; + + let invalid_cases = [ + naga::Interpolation::Flat, + naga::Interpolation::Linear, + naga::Interpolation::Perspective, + ] + .into_iter() + .cartesian_product( + [ + naga::Sampling::Either, + naga::Sampling::First, + naga::Sampling::Sample, + naga::Sampling::Center, + naga::Sampling::Centroid, + ] + .into_iter() + .map(Some) + .chain([None]), + ) + .filter_map(invalid_shader_module); + + for (invalid_source, invalid_module, interpolation, sampling, interpolate_attr) in invalid_cases + { + let err = valid::Validator::new(Default::default(), valid::Capabilities::all()) + .validate_no_overrides(&invalid_module) + .expect_err(&format!( + "module should be invalid for {interpolate_attr:?}" + )); + assert!(dbg!(err.emit_to_string(&invalid_source)).contains(&dbg!( + naga::valid::VaryingError::InvalidInterpolationSamplingCombination { + interpolation, + sampling, + } + .to_string() + )),); + } +} + +#[cfg(all(feature = "wgsl-in", feature = "glsl-out"))] +#[test] +fn no_flat_first_in_glsl() { + use dummy_interpolation_shader::DummyInterpolationShader; + + let DummyInterpolationShader { + source: _, + module, + interpolate_attr, + entry_point, + } = DummyInterpolationShader::new(naga::Interpolation::Flat, Some(naga::Sampling::First)); + + let mut validator = naga::valid::Validator::new(Default::default(), Default::default()); + let module_info = validator.validate(&module).unwrap(); + + let options = Default::default(); + let pipeline_options = naga::back::glsl::PipelineOptions { + shader_stage: naga::ShaderStage::Fragment, + entry_point: entry_point.to_owned(), + multiview: None, + }; + let mut glsl_writer = naga::back::glsl::Writer::new( + String::new(), + &module, + &module_info, + &options, + &pipeline_options, + Default::default(), + ) + .unwrap(); + + let err = glsl_writer.write().expect_err(&format!( + "`{interpolate_attr}` should fail backend validation" + )); + + assert!(matches!( + err, + naga::back::glsl::Error::FirstSamplingNotSupported + )); +} + +#[cfg(all(test, feature = "wgsl-in"))] +mod dummy_interpolation_shader { + pub struct DummyInterpolationShader { + pub source: String, + pub module: naga::Module, + pub interpolate_attr: String, + pub entry_point: &'static str, + } + + impl DummyInterpolationShader { + pub fn new(interpolation: naga::Interpolation, sampling: Option) -> Self { + // NOTE: If you have to add variants below, make sure to add them to the + // `cartesian_product`'d combinations in tests around here! + let interpolation_str = match interpolation { + naga::Interpolation::Flat => "flat", + naga::Interpolation::Linear => "linear", + naga::Interpolation::Perspective => "perspective", + }; + let sampling_str = match sampling { + None => String::new(), + Some(sampling) => format!( + ", {}", + match sampling { + naga::Sampling::First => "first", + naga::Sampling::Either => "either", + naga::Sampling::Center => "center", + naga::Sampling::Centroid => "centroid", + naga::Sampling::Sample => "sample", + } + ), + }; + let member_type = match interpolation { + naga::Interpolation::Perspective | naga::Interpolation::Linear => "f32", + naga::Interpolation::Flat => "u32", + }; + + let interpolate_attr = format!("@interpolate({interpolation_str}{sampling_str})"); + let source = format!( + "\ + struct VertexOutput {{ + @location(0) {interpolate_attr} member: {member_type}, +}} + +@fragment +fn main(input: VertexOutput) {{ + // ... +}} +" + ); + let module = naga::front::wgsl::parse_str(&source).unwrap(); + + Self { + source, + module, + interpolate_attr, + entry_point: "main", + } + } + } +} diff --git a/naga/tests/wgsl_errors.rs b/naga/tests/wgsl_errors.rs index d6d1710f77..2d91ba01cf 100644 --- a/naga/tests/wgsl_errors.rs +++ b/naga/tests/wgsl_errors.rs @@ -363,13 +363,13 @@ fn unknown_ident() { fn unknown_scalar_type() { check( r#" - const a: vec2; + const a = vec2(); "#, - r#"error: unknown scalar type: 'something' - ┌─ wgsl:2:27 + r#"error: unknown scalar type: 'vec2f' + ┌─ wgsl:2:28 │ -2 │ const a: vec2; - │ ^^^^^^^^^ unknown scalar type +2 │ const a = vec2(); + │ ^^^^^ unknown scalar type │ = note: Valid scalar types are f32, f64, i32, u32, bool @@ -833,13 +833,13 @@ fn matrix_with_bad_type() { check( r#" fn main() { - let m: mat3x3; + var m: mat3x3; } "#, r#"error: matrix scalar type must be floating-point, but found `i32` ┌─ wgsl:3:31 │ -3 │ let m: mat3x3; +3 │ var m: mat3x3; │ ^^^ must be floating-point (e.g. `f32`) "#, @@ -2277,3 +2277,166 @@ fn too_many_unclosed_loops() { .join() .unwrap() } + +#[test] +fn local_const_wrong_type() { + check( + " + fn f() { + const c: i32 = 5u; + } + ", + r###"error: the type of `c` is expected to be `i32`, but got `u32` + ┌─ wgsl:3:19 + │ +3 │ const c: i32 = 5u; + │ ^ definition of `c` + +"###, + ); +} + +#[test] +fn local_const_from_let() { + check( + " + fn f() { + let a = 5; + const c = a; + } + ", + r###"error: this operation is not supported in a const context + ┌─ wgsl:4:23 + │ +4 │ const c = a; + │ ^ operation not supported here + +"###, + ); +} + +#[test] +fn local_const_from_var() { + check( + " + fn f() { + var a = 5; + const c = a; + } + ", + r###"error: this operation is not supported in a const context + ┌─ wgsl:4:23 + │ +4 │ const c = a; + │ ^ operation not supported here + +"###, + ); +} + +#[test] +fn local_const_from_override() { + check( + " + override o: i32; + fn f() { + const c = o; + } + ", + r###"error: Unexpected override-expression + ┌─ wgsl:4:23 + │ +4 │ const c = o; + │ ^ see msg + +"###, + ); +} + +#[test] +fn local_const_from_global_var() { + check( + " + var v: i32; + fn f() { + const c = v; + } + ", + r###"error: Unexpected runtime-expression + ┌─ wgsl:4:23 + │ +4 │ const c = v; + │ ^ see msg + +"###, + ); +} + +#[test] +fn only_one_swizzle_type() { + check( + " + const ok1 = vec2(0.0, 0.0).xy; + const ok2 = vec2(0.0, 0.0).rg; + const err = vec2(0.0, 0.0).xg; + ", + r###"error: invalid field accessor `xg` + ┌─ wgsl:4:36 + │ +4 │ const err = vec2(0.0, 0.0).xg; + │ ^^ invalid accessor + +"###, + ); +} + +#[test] +fn const_assert_must_be_const() { + check( + " + fn foo() { + let a = 5; + const_assert a != 0; + } + ", + r###"error: this operation is not supported in a const context + ┌─ wgsl:4:26 + │ +4 │ const_assert a != 0; + │ ^ operation not supported here + +"###, + ); +} + +#[test] +fn const_assert_must_be_bool() { + check( + " + const_assert(5); // 5 is not bool + ", + r###"error: must be a const-expression that resolves to a bool + ┌─ wgsl:2:26 + │ +2 │ const_assert(5); // 5 is not bool + │ ^ must resolve to bool + +"###, + ); +} + +#[test] +fn const_assert_failed() { + check( + " + const_assert(false); + ", + r###"error: const_assert failure + ┌─ wgsl:2:26 + │ +2 │ const_assert(false); + │ ^^^^^ evaluates to false + +"###, + ); +} diff --git a/player/src/bin/play.rs b/player/src/bin/play.rs index 4726fe63a7..558eb194ba 100644 --- a/player/src/bin/play.rs +++ b/player/src/bin/play.rs @@ -56,7 +56,7 @@ fn main() { global.instance_create_surface( window.display_handle().unwrap().into(), window.window_handle().unwrap().into(), - Some(wgc::id::Id::zip(0, 1, wgt::Backend::Empty)), + Some(wgc::id::Id::zip(0, 1)), ) } .unwrap(); @@ -74,22 +74,23 @@ fn main() { #[cfg(not(feature = "winit"))] compatible_surface: None, }, - wgc::instance::AdapterInputs::IdSet(&[wgc::id::AdapterId::zip(0, 0, backend)]), + wgt::Backends::from(backend), + Some(wgc::id::AdapterId::zip(0, 1)), ) .expect("Unable to find an adapter for selected backend"); - let info = global.adapter_get_info(adapter).unwrap(); + let info = global.adapter_get_info(adapter); log::info!("Picked '{}'", info.name); - let device_id = wgc::id::Id::zip(1, 0, backend); - let queue_id = wgc::id::Id::zip(1, 0, backend); - let (_, _, error) = global.adapter_request_device( + let device_id = wgc::id::Id::zip(0, 1); + let queue_id = wgc::id::Id::zip(0, 1); + let res = global.adapter_request_device( adapter, &desc, None, Some(device_id), Some(queue_id), ); - if let Some(e) = error { + if let Err(e) = res { panic!("{:?}", e); } (device_id, queue_id) diff --git a/player/src/lib.rs b/player/src/lib.rs index 8ea4e775bd..241c190960 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -352,11 +352,7 @@ impl GlobalPlay for wgc::global::Global { let (encoder, error) = self.device_create_command_encoder( device, &wgt::CommandEncoderDescriptor { label: None }, - Some( - comb_manager - .process(device.backend()) - .into_command_encoder_id(), - ), + Some(comb_manager.process().into_command_encoder_id()), ); if let Some(e) = error { panic!("{e}"); diff --git a/player/tests/data/bind-group.ron b/player/tests/data/bind-group.ron index a53a77b16f..80a5d18ba7 100644 --- a/player/tests/data/bind-group.ron +++ b/player/tests/data/bind-group.ron @@ -2,13 +2,13 @@ features: [], expectations: [], //not crash! actions: [ - CreateBuffer(Id(0, 1, Empty), ( + CreateBuffer(Id(0, 1), ( label: None, size: 16, usage: 64, mapped_at_creation: false, )), - CreateBindGroupLayout(Id(0, 1, Empty), ( + CreateBindGroupLayout(Id(0, 1), ( label: None, entries: [ ( @@ -20,29 +20,29 @@ ), ], )), - CreateBindGroup(Id(0, 1, Empty), ( + CreateBindGroup(Id(0, 1), ( label: None, - layout: Id(0, 1, Empty), + layout: Id(0, 1), entries: [ ( binding: 0, resource: Buffer(( - buffer_id: Id(0, 1, Empty), + buffer_id: Id(0, 1), offset: 0, size: None, )), ) ], )), - CreatePipelineLayout(Id(0, 1, Empty), ( + CreatePipelineLayout(Id(0, 1), ( label: Some("empty"), bind_group_layouts: [ - Id(0, 1, Empty), + Id(0, 1), ], push_constant_ranges: [], )), CreateShaderModule( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, flags: (bits: 3), @@ -50,12 +50,12 @@ data: "empty.wgsl", ), CreateComputePipeline( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, - layout: Some(Id(0, 1, Empty)), + layout: Some(Id(0, 1)), stage: ( - module: Id(0, 1, Empty), + module: Id(0, 1), entry_point: None, constants: {}, zero_initialize_workgroup_memory: true, @@ -70,9 +70,9 @@ SetBindGroup( index: 0, num_dynamic_offsets: 0, - bind_group_id: Id(0, 1, Empty), + bind_group_id: Some(Id(0, 1)), ), - SetPipeline(Id(0, 1, Empty)), + SetPipeline(Id(0, 1)), ], dynamic_offsets: [], string_data: [], diff --git a/player/tests/data/buffer-copy.ron b/player/tests/data/buffer-copy.ron index 5c66f2c019..0cce3b1f69 100644 --- a/player/tests/data/buffer-copy.ron +++ b/player/tests/data/buffer-copy.ron @@ -10,7 +10,7 @@ ], actions: [ CreateBuffer( - Id(0, 1, Empty), + Id(0, 1), ( label: Some("dummy"), size: 16, @@ -19,7 +19,7 @@ ), ), WriteBuffer( - id: Id(0, 1, Empty), + id: Id(0, 1), data: "data1.bin", range: ( start: 0, diff --git a/player/tests/data/clear-buffer-texture.ron b/player/tests/data/clear-buffer-texture.ron index 7b25fa42c5..4b7548d2fc 100644 --- a/player/tests/data/clear-buffer-texture.ron +++ b/player/tests/data/clear-buffer-texture.ron @@ -20,7 +20,7 @@ ) ], actions: [ - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1), ( label: Some("Output Texture"), size: ( width: 64, @@ -36,7 +36,7 @@ // First fill the texture to ensure it wasn't just zero initialized or "happened" to be zero. WriteTexture( to: ( - texture: Id(0, 1, Empty), + texture: Id(0, 1), mip_level: 0, array_layer: 0, ), @@ -52,7 +52,7 @@ ), ), CreateBuffer( - Id(0, 1, Empty), + Id(0, 1), ( label: Some("Output Buffer"), size: 16384, @@ -62,7 +62,7 @@ ), CreateBuffer( - Id(1, 1, Empty), + Id(1, 1), ( label: Some("Buffer to be cleared"), size: 16, @@ -72,7 +72,7 @@ ), // Make sure there is something in the buffer, otherwise it might be just zero init! WriteBuffer( - id: Id(1, 1, Empty), + id: Id(1, 1), data: "data1.bin", range: ( start: 0, @@ -82,7 +82,7 @@ ), Submit(1, [ ClearTexture( - dst: Id(0, 1, Empty), + dst: Id(0, 1), subresource_range: ImageSubresourceRange( aspect: all, baseMipLevel: 0, @@ -93,12 +93,12 @@ ), CopyTextureToBuffer( src: ( - texture: Id(0, 1, Empty), + texture: Id(0, 1), mip_level: 0, array_layer: 0, ), dst: ( - buffer: Id(0, 1, Empty), + buffer: Id(0, 1), layout: ( offset: 0, bytes_per_row: Some(256), @@ -112,7 +112,7 @@ ), // Partial clear to prove ClearBuffer( - dst: Id(1, 1, Empty), + dst: Id(1, 1), offset: 4, size: Some(8), ) diff --git a/player/tests/data/pipeline-statistics-query.ron b/player/tests/data/pipeline-statistics-query.ron index 8a6e4239b9..17ef08b1bb 100644 --- a/player/tests/data/pipeline-statistics-query.ron +++ b/player/tests/data/pipeline-statistics-query.ron @@ -9,13 +9,13 @@ ), ], actions: [ - CreatePipelineLayout(Id(0, 1, Empty), ( + CreatePipelineLayout(Id(0, 1), ( label: Some("empty"), bind_group_layouts: [], push_constant_ranges: [], )), CreateShaderModule( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, flags: (bits: 3), @@ -23,12 +23,12 @@ data: "empty.wgsl", ), CreateComputePipeline( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, - layout: Some(Id(0, 1, Empty)), + layout: Some(Id(0, 1)), stage: ( - module: Id(0, 1, Empty), + module: Id(0, 1), entry_point: None, constants: {}, zero_initialize_workgroup_memory: true, @@ -37,7 +37,7 @@ ), ), CreateQuerySet( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: Some("Compute Invocation QuerySet"), count: 2, @@ -45,7 +45,7 @@ ), ), CreateBuffer( - Id(0, 1, Empty), + Id(0, 1), ( label: Some("Compute Invocation Result Buffer"), size: 16, @@ -57,9 +57,9 @@ RunComputePass( base: ( commands: [ - SetPipeline(Id(0, 1, Empty)), + SetPipeline(Id(0, 1)), BeginPipelineStatisticsQuery( - query_set_id: Id(0, 1, Empty), + query_set_id: Id(0, 1), query_index: 0, ), Dispatch((2, 3, 7,)), @@ -71,10 +71,10 @@ ), ), ResolveQuerySet( - query_set_id: Id(0, 1, Empty), + query_set_id: Id(0, 1), start_query: 0, query_count: 1, - destination: Id(0, 1, Empty), + destination: Id(0, 1), destination_offset: 0, ) ]), diff --git a/player/tests/data/quad.ron b/player/tests/data/quad.ron index aad576c42b..a954cb597f 100644 --- a/player/tests/data/quad.ron +++ b/player/tests/data/quad.ron @@ -10,14 +10,14 @@ ], actions: [ CreateShaderModule( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, flags: (bits: 3), ), data: "quad.wgsl", ), - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1), ( label: Some("Output Texture"), size: ( width: 64, @@ -31,12 +31,12 @@ view_formats: [], )), CreateTextureView( - id: Id(0, 1, Empty), - parent_id: Id(0, 1, Empty), + id: Id(0, 1), + parent_id: Id(0, 1), desc: (), ), CreateBuffer( - Id(0, 1, Empty), + Id(0, 1), ( label: Some("Output Buffer"), size: 16384, @@ -44,19 +44,19 @@ mapped_at_creation: false, ), ), - CreatePipelineLayout(Id(0, 1, Empty), ( + CreatePipelineLayout(Id(0, 1), ( label: None, bind_group_layouts: [], push_constant_ranges: [], )), CreateRenderPipeline( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, - layout: Some(Id(0, 1, Empty)), + layout: Some(Id(0, 1)), vertex: ( stage: ( - module: Id(0, 1, Empty), + module: Id(0, 1), entry_point: None, constants: {}, zero_initialize_workgroup_memory: true, @@ -66,7 +66,7 @@ ), fragment: Some(( stage: ( - module: Id(0, 1, Empty), + module: Id(0, 1), entry_point: None, constants: {}, zero_initialize_workgroup_memory: true, @@ -84,7 +84,7 @@ RunRenderPass( base: ( commands: [ - SetPipeline(Id(0, 1, Empty)), + SetPipeline(Id(0, 1)), Draw( vertex_count: 3, instance_count: 1, @@ -98,7 +98,7 @@ ), target_colors: [ Some(( - view: Id(0, 1, Empty), + view: Id(0, 1), resolve_target: None, channel: ( load_op: clear, @@ -117,12 +117,12 @@ ), CopyTextureToBuffer( src: ( - texture: Id(0, 1, Empty), + texture: Id(0, 1), mip_level: 0, array_layer: 0, ), dst: ( - buffer: Id(0, 1, Empty), + buffer: Id(0, 1), layout: ( offset: 0, bytes_per_row: Some(256), diff --git a/player/tests/data/zero-init-buffer.ron b/player/tests/data/zero-init-buffer.ron index b13786e262..c4cf25f659 100644 --- a/player/tests/data/zero-init-buffer.ron +++ b/player/tests/data/zero-init-buffer.ron @@ -39,7 +39,7 @@ ], actions: [ CreateBuffer( - Id(0, 1, Empty), + Id(0, 1), ( label: Some("mapped_at_creation: false, with MAP_WRITE"), size: 16, @@ -48,7 +48,7 @@ ), ), CreateBuffer( - Id(1, 1, Empty), + Id(1, 1), ( label: Some("mapped_at_creation: false, without MAP_WRITE"), size: 16, @@ -57,7 +57,7 @@ ), ), CreateBuffer( - Id(2, 1, Empty), + Id(2, 1), ( label: Some("partially written"), size: 24, @@ -66,7 +66,7 @@ ), ), WriteBuffer( - id: Id(2, 1, Empty), + id: Id(2, 1), data: "data1.bin", range: ( start: 4, @@ -75,20 +75,20 @@ queued: true, ), CreateShaderModule( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, flags: (bits: 3), ), data: "zero-init-buffer-for-binding.wgsl", ), - CreateBuffer(Id(3, 1, Empty), ( + CreateBuffer(Id(3, 1), ( label: Some("used in binding"), size: 16, usage: 129, // STORAGE + MAP_READ mapped_at_creation: false, )), - CreateBindGroupLayout(Id(0, 1, Empty), ( + CreateBindGroupLayout(Id(0, 1), ( label: None, entries: [ ( @@ -105,34 +105,34 @@ ), ], )), - CreateBindGroup(Id(0, 1, Empty), ( + CreateBindGroup(Id(0, 1), ( label: None, - layout: Id(0, 1, Empty), + layout: Id(0, 1), entries: [ ( binding: 0, resource: Buffer(( - buffer_id: Id(3, 1, Empty), + buffer_id: Id(3, 1), offset: 0, size: Some(16), )), ), ], )), - CreatePipelineLayout(Id(0, 1, Empty), ( + CreatePipelineLayout(Id(0, 1), ( label: None, bind_group_layouts: [ - Id(0, 1, Empty), + Id(0, 1), ], push_constant_ranges: [], )), CreateComputePipeline( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, - layout: Some(Id(0, 1, Empty)), + layout: Some(Id(0, 1)), stage: ( - module: Id(0, 1, Empty), + module: Id(0, 1), entry_point: None, constants: {}, zero_initialize_workgroup_memory: true, @@ -145,11 +145,11 @@ base: ( label: None, commands: [ - SetPipeline(Id(0, 1, Empty)), + SetPipeline(Id(0, 1)), SetBindGroup( index: 0, num_dynamic_offsets: 0, - bind_group_id: Id(0, 1, Empty), + bind_group_id: Some(Id(0, 1)), ), Dispatch((4, 1, 1)), ], diff --git a/player/tests/data/zero-init-texture-binding.ron b/player/tests/data/zero-init-texture-binding.ron index ba4951c198..48415f43c5 100644 --- a/player/tests/data/zero-init-texture-binding.ron +++ b/player/tests/data/zero-init-texture-binding.ron @@ -17,7 +17,7 @@ // MISSING: Partial views ], actions: [ - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1), ( label: Some("Sampled Texture"), size: ( width: 64, @@ -31,12 +31,12 @@ view_formats: [], )), CreateTextureView( - id: Id(0, 1, Empty), - parent_id: Id(0, 1, Empty), + id: Id(0, 1), + parent_id: Id(0, 1), desc: (), ), CreateBuffer( - Id(0, 1, Empty), + Id(0, 1), ( label: Some("Sampled Texture Buffer"), size: 16384, @@ -44,7 +44,7 @@ mapped_at_creation: false, ), ), - CreateTexture(Id(1, 1, Empty), ( + CreateTexture(Id(1, 1), ( label: Some("Storage Texture"), size: ( width: 64, @@ -58,12 +58,12 @@ view_formats: [], )), CreateTextureView( - id: Id(1, 1, Empty), - parent_id: Id(1, 1, Empty), + id: Id(1, 1), + parent_id: Id(1, 1), desc: (), ), CreateBuffer( - Id(1, 1, Empty), + Id(1, 1), ( label: Some("Storage Texture Buffer"), size: 16384, @@ -73,7 +73,7 @@ ), - CreateBindGroupLayout(Id(0, 1, Empty), ( + CreateBindGroupLayout(Id(0, 1), ( label: None, entries: [ ( @@ -98,29 +98,29 @@ ), ], )), - CreateBindGroup(Id(0, 1, Empty), ( + CreateBindGroup(Id(0, 1), ( label: None, - layout: Id(0, 1, Empty), + layout: Id(0, 1), entries: [ ( binding: 0, - resource: TextureView(Id(0, 1, Empty)), + resource: TextureView(Id(0, 1)), ), ( binding: 1, - resource: TextureView(Id(1, 1, Empty)), + resource: TextureView(Id(1, 1)), ), ], )), - CreatePipelineLayout(Id(0, 1, Empty), ( + CreatePipelineLayout(Id(0, 1), ( label: None, bind_group_layouts: [ - Id(0, 1, Empty), + Id(0, 1), ], push_constant_ranges: [], )), CreateShaderModule( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, flags: (bits: 3), @@ -128,12 +128,12 @@ data: "zero-init-texture-binding.wgsl", ), CreateComputePipeline( - id: Id(0, 1, Empty), + id: Id(0, 1), desc: ( label: None, - layout: Some(Id(0, 1, Empty)), + layout: Some(Id(0, 1)), stage: ( - module: Id(0, 1, Empty), + module: Id(0, 1), entry_point: None, constants: {}, zero_initialize_workgroup_memory: true, @@ -146,11 +146,11 @@ RunComputePass( base: ( commands: [ - SetPipeline(Id(0, 1, Empty)), + SetPipeline(Id(0, 1)), SetBindGroup( index: 0, num_dynamic_offsets: 0, - bind_group_id: Id(0, 1, Empty), + bind_group_id: Some(Id(0, 1)), ), Dispatch((4, 1, 1)), ], @@ -161,12 +161,12 @@ ), CopyTextureToBuffer( src: ( - texture: Id(0, 1, Empty), + texture: Id(0, 1), mip_level: 0, array_layer: 0, ), dst: ( - buffer: Id(0, 1, Empty), + buffer: Id(0, 1), layout: ( offset: 0, bytes_per_row: Some(256), @@ -180,12 +180,12 @@ ), CopyTextureToBuffer( src: ( - texture: Id(1, 1, Empty), + texture: Id(1, 1), mip_level: 0, array_layer: 0, ), dst: ( - buffer: Id(1, 1, Empty), + buffer: Id(1, 1), layout: ( offset: 0, bytes_per_row: Some(256), diff --git a/player/tests/data/zero-init-texture-copytobuffer.ron b/player/tests/data/zero-init-texture-copytobuffer.ron index 599ddbd67d..eae95aaae4 100644 --- a/player/tests/data/zero-init-texture-copytobuffer.ron +++ b/player/tests/data/zero-init-texture-copytobuffer.ron @@ -10,7 +10,7 @@ // MISSING: Partial copies ], actions: [ - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1), ( label: Some("Copy To Buffer Texture"), size: ( width: 64, @@ -24,7 +24,7 @@ view_formats: [], )), CreateBuffer( - Id(0, 1, Empty), + Id(0, 1), ( label: Some("Copy to Buffer Buffer"), size: 16384, @@ -35,12 +35,12 @@ Submit(1, [ CopyTextureToBuffer( src: ( - texture: Id(0, 1, Empty), + texture: Id(0, 1), mip_level: 0, array_layer: 0, ), dst: ( - buffer: Id(0, 1, Empty), + buffer: Id(0, 1), layout: ( offset: 0, bytes_per_row: Some(256), diff --git a/player/tests/data/zero-init-texture-rendertarget.ron b/player/tests/data/zero-init-texture-rendertarget.ron index ec844fe073..adbb869625 100644 --- a/player/tests/data/zero-init-texture-rendertarget.ron +++ b/player/tests/data/zero-init-texture-rendertarget.ron @@ -10,7 +10,7 @@ // MISSING: Partial view. ], actions: [ - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1), ( label: Some("Render Target Texture"), size: ( width: 64, @@ -24,12 +24,12 @@ view_formats: [], )), CreateTextureView( - id: Id(0, 1, Empty), - parent_id: Id(0, 1, Empty), + id: Id(0, 1), + parent_id: Id(0, 1), desc: (), ), CreateBuffer( - Id(0, 1, Empty), + Id(0, 1), ( label: Some("Render Target Buffer"), size: 16384, @@ -48,7 +48,7 @@ ), target_colors: [ Some(( - view: Id(0, 1, Empty), + view: Id(0, 1), resolve_target: None, channel: ( load_op: load, @@ -64,12 +64,12 @@ ), CopyTextureToBuffer( src: ( - texture: Id(0, 1, Empty), + texture: Id(0, 1), mip_level: 0, array_layer: 0, ), dst: ( - buffer: Id(0, 1, Empty), + buffer: Id(0, 1), layout: ( offset: 0, bytes_per_row: Some(256), diff --git a/player/tests/test.rs b/player/tests/test.rs index ee8e2ecc0d..ec96f54469 100644 --- a/player/tests/test.rs +++ b/player/tests/test.rs @@ -14,6 +14,7 @@ use player::GlobalPlay; use std::{ fs::{read_to_string, File}, io::{Read, Seek, SeekFrom}, + mem::size_of, path::{Path, PathBuf}, slice, }; @@ -35,7 +36,7 @@ impl ExpectedData { fn len(&self) -> usize { match self { ExpectedData::Raw(vec) => vec.len(), - ExpectedData::U64(vec) => vec.len() * std::mem::size_of::(), + ExpectedData::U64(vec) => vec.len() * size_of::(), ExpectedData::File(_, size) => *size, } } @@ -104,10 +105,9 @@ impl Test<'_> { adapter: wgc::id::AdapterId, test_num: u32, ) { - let backend = adapter.backend(); - let device_id = wgc::id::Id::zip(test_num, 0, backend); - let queue_id = wgc::id::Id::zip(test_num, 0, backend); - let (_, _, error) = global.adapter_request_device( + let device_id = wgc::id::Id::zip(test_num, 1); + let queue_id = wgc::id::Id::zip(test_num, 1); + let res = global.adapter_request_device( adapter, &wgt::DeviceDescriptor { label: None, @@ -119,7 +119,7 @@ impl Test<'_> { Some(device_id), Some(queue_id), ); - if let Some(e) = error { + if let Err(e) = res { panic!("{:?}", e); } @@ -136,7 +136,7 @@ impl Test<'_> { } println!("\t\t\tMapping..."); for expect in &self.expectations { - let buffer = wgc::id::Id::zip(expect.buffer.index, expect.buffer.epoch, backend); + let buffer = wgc::id::Id::zip(expect.buffer.index, expect.buffer.epoch); global .buffer_map_async( buffer, @@ -159,7 +159,7 @@ impl Test<'_> { for expect in self.expectations { println!("\t\t\tChecking {}", expect.name); - let buffer = wgc::id::Id::zip(expect.buffer.index, expect.buffer.epoch, backend); + let buffer = wgc::id::Id::zip(expect.buffer.index, expect.buffer.epoch); let (ptr, size) = global .buffer_get_mapped_range( buffer, @@ -236,17 +236,18 @@ impl Corpus { force_fallback_adapter: false, compatible_surface: None, }, - wgc::instance::AdapterInputs::IdSet(&[wgc::id::Id::zip(0, 0, backend)]), + wgt::Backends::from(backend), + Some(wgc::id::Id::zip(0, 1)), ) { Ok(adapter) => adapter, Err(_) => continue, }; println!("\tBackend {:?}", backend); - let supported_features = global.adapter_features(adapter).unwrap(); - let downlevel_caps = global.adapter_downlevel_capabilities(adapter).unwrap(); + let supported_features = global.adapter_features(adapter); + let downlevel_caps = global.adapter_downlevel_capabilities(adapter); - let test = Test::load(dir.join(test_path), adapter.backend()); + let test = Test::load(dir.join(test_path), backend); if !supported_features.contains(test.features) { println!( "\t\tSkipped due to missing features {:?}", diff --git a/tests/src/image.rs b/tests/src/image.rs index e72d3ee442..602d93c4e2 100644 --- a/tests/src/image.rs +++ b/tests/src/image.rs @@ -377,7 +377,7 @@ fn copy_via_compute( let mut pass = encoder.begin_compute_pass(&ComputePassDescriptor::default()); pass.set_pipeline(&pipeline_copy); - pass.set_bind_group(0, &bg, &[]); + pass.set_bind_group(0, Some(&bg), &[]); pass.dispatch_workgroups(1, 1, 1); } diff --git a/tests/src/init.rs b/tests/src/init.rs index 140bb202fc..3644655bec 100644 --- a/tests/src/init.rs +++ b/tests/src/init.rs @@ -1,6 +1,8 @@ use wgpu::{Adapter, Device, Instance, Queue}; use wgt::{Backends, Features, Limits}; +use crate::report::AdapterReport; + /// Initialize the logger for the test runner. pub fn init_logger() { // We don't actually care if it fails @@ -11,7 +13,7 @@ pub fn init_logger() { } /// Initialize a wgpu instance with the options from the environment. -pub fn initialize_instance(force_fxc: bool) -> Instance { +pub fn initialize_instance(backends: wgpu::Backends, force_fxc: bool) -> Instance { // We ignore `WGPU_BACKEND` for now, merely using test filtering to only run a single backend's tests. // // We can potentially work support back into the test runner in the future, but as the adapters are matched up @@ -23,9 +25,9 @@ pub fn initialize_instance(force_fxc: bool) -> Instance { // To "disable" webgpu regardless, we do this by removing the webgpu backend whenever we see // the webgl feature. let backends = if cfg!(feature = "webgl") { - Backends::all() - Backends::BROWSER_WEBGPU + backends - wgpu::Backends::BROWSER_WEBGPU } else { - Backends::all() + backends }; // Some tests need to be able to force demote to FXC, to specifically test workarounds for FXC // behavior. @@ -43,12 +45,16 @@ pub fn initialize_instance(force_fxc: bool) -> Instance { }) } -/// Initialize a wgpu adapter, taking the `n`th adapter from the instance. +/// Initialize a wgpu adapter, using the given adapter report to match the adapter. pub async fn initialize_adapter( - adapter_index: usize, + adapter_report: Option<&AdapterReport>, force_fxc: bool, ) -> (Instance, Adapter, Option) { - let instance = initialize_instance(force_fxc); + let backends = adapter_report + .map(|report| Backends::from(report.info.backend)) + .unwrap_or_default(); + + let instance = initialize_instance(backends, force_fxc); #[allow(unused_variables)] let surface: Option; let surface_guard: Option; @@ -82,13 +88,24 @@ pub async fn initialize_adapter( cfg_if::cfg_if! { if #[cfg(not(target_arch = "wasm32"))] { - let adapter_iter = instance.enumerate_adapters(wgpu::Backends::all()); - let adapter_count = adapter_iter.len(); + let adapter_iter = instance.enumerate_adapters(backends); let adapter = adapter_iter.into_iter() - .nth(adapter_index) - .unwrap_or_else(|| panic!("Tried to get index {adapter_index} adapter, but adapter list was only {adapter_count} long. Is .gpuconfig out of date?")); + // If we have a report, we only want to match the adapter with the same info. + // + // If we don't have a report, we just take the first adapter. + .find(|adapter| if let Some(adapter_report) = adapter_report { + adapter.get_info() == adapter_report.info + } else { + true + }); + let Some(adapter) = adapter else { + panic!( + "Could not find adapter with info {:#?} in {:#?}", + adapter_report.map(|r| &r.info), + instance.enumerate_adapters(backends).into_iter().map(|a| a.get_info()).collect::>(), + ); + }; } else { - assert_eq!(adapter_index, 0); let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions { compatible_surface: surface.as_ref(), ..Default::default() diff --git a/tests/src/lib.rs b/tests/src/lib.rs index fcc1615875..89f7e91c6e 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -54,10 +54,16 @@ pub fn fail( } /// Run some code in an error scope and assert that validation succeeds. +#[track_caller] pub fn valid(device: &wgpu::Device, callback: impl FnOnce() -> T) -> T { device.push_error_scope(wgpu::ErrorFilter::Validation); let result = callback(); - assert!(pollster::block_on(device.pop_error_scope()).is_none()); + if let Some(error) = pollster::block_on(device.pop_error_scope()) { + panic!( + "`valid` block at {} encountered wgpu error:\n{error}", + std::panic::Location::caller() + ); + } result } diff --git a/tests/src/native.rs b/tests/src/native.rs index 3d328b4ff1..afad3d46dc 100644 --- a/tests/src/native.rs +++ b/tests/src/native.rs @@ -19,15 +19,16 @@ struct NativeTest { } impl NativeTest { + /// Adapter index is only used for naming the test, the adapters are matched based on the adapter info. fn from_configuration( config: GpuTestConfiguration, - adapter: &AdapterReport, + adapter_report: AdapterReport, adapter_index: usize, ) -> Self { - let backend = adapter.info.backend; - let device_name = &adapter.info.name; + let backend = adapter_report.info.backend; + let device_name = &adapter_report.info.name; - let test_info = TestInfo::from_configuration(&config, adapter); + let test_info = TestInfo::from_configuration(&config, &adapter_report); let full_name = format!( "[{running_msg}] [{backend:?}/{device_name}/{adapter_index}] {base_name}", @@ -50,10 +51,12 @@ impl NativeTest { let env_value = if metal_validation { "1" } else { "0" }; std::env::set_var("MTL_DEBUG_LAYER", env_value); - // Metal Shader Validation is entirely broken in the paravirtualized CI environment. - // std::env::set_var("MTL_SHADER_VALIDATION", env_value); + if std::env::var("GITHUB_ACTIONS").as_deref() != Ok("true") { + // Metal Shader Validation is entirely broken in the paravirtualized CI environment. + std::env::set_var("MTL_SHADER_VALIDATION", env_value); + } - execute_test(config, Some(test_info), adapter_index).await; + execute_test(Some(&adapter_report), config, Some(test_info)).await; }), } } @@ -83,16 +86,24 @@ pub fn main() -> MainResult { &std::fs::read_to_string(format!("{}/../.gpuconfig", env!("CARGO_MANIFEST_DIR"))) .context("Failed to read .gpuconfig, did you run the tests via `cargo xtask test`?")? }; - let report = GpuReport::from_json(config_text).context("Could not parse .gpuconfig JSON")?; + let mut report = + GpuReport::from_json(config_text).context("Could not parse .gpuconfig JSON")?; + + // Filter out the adapters that are not part of WGPU_BACKEND. + let wgpu_backends = wgpu::util::backend_bits_from_env().unwrap_or(wgpu::Backends::all()); + report + .devices + .retain(|report| wgpu_backends.contains(wgpu::Backends::from(report.info.backend))); let mut test_guard = TEST_LIST.lock(); + // Iterate through all the tests. Creating a test per adapter. execute_native(test_guard.drain(..).flat_map(|test| { report .devices .iter() .enumerate() - .map(move |(adapter_index, adapter)| { - NativeTest::from_configuration(test.clone(), adapter, adapter_index) + .map(move |(adapter_index, adapter_report)| { + NativeTest::from_configuration(test.clone(), adapter_report.clone(), adapter_index) }) })); diff --git a/tests/src/report.rs b/tests/src/report.rs index 42633e72ac..b26bdbfaf3 100644 --- a/tests/src/report.rs +++ b/tests/src/report.rs @@ -25,8 +25,8 @@ impl GpuReport { /// A single report of the capabilities of an Adapter. /// /// Must be synchronized with the definition on wgpu-info/src/report.rs. -#[derive(Deserialize)] -pub(crate) struct AdapterReport { +#[derive(Deserialize, Clone)] +pub struct AdapterReport { pub info: AdapterInfo, pub features: Features, pub limits: Limits, diff --git a/tests/src/run.rs b/tests/src/run.rs index 303c4c24af..5fb15c4c3d 100644 --- a/tests/src/run.rs +++ b/tests/src/run.rs @@ -24,14 +24,14 @@ pub struct TestingContext { pub queue: Queue, } -/// Execute the given test configuration with the given adapter index. +/// Execute the given test configuration with the given adapter report. /// /// If test_info is specified, will use the information whether to skip the test. /// If it is not, we'll create the test info from the adapter itself. pub async fn execute_test( + adapter_report: Option<&AdapterReport>, config: GpuTestConfiguration, test_info: Option, - adapter_index: usize, ) { // If we get information externally, skip based on that information before we do anything. if let Some(TestInfo { skip: true, .. }) = test_info { @@ -43,7 +43,7 @@ pub async fn execute_test( let _test_guard = isolation::OneTestPerProcessGuard::new(); let (instance, adapter, _surface_guard) = - initialize_adapter(adapter_index, config.params.force_fxc).await; + initialize_adapter(adapter_report, config.params.force_fxc).await; let adapter_info = adapter.get_info(); let adapter_downlevel_capabilities = adapter.get_downlevel_capabilities(); diff --git a/tests/tests/bgra8unorm_storage.rs b/tests/tests/bgra8unorm_storage.rs index 0859473b2f..fa8310c7ea 100644 --- a/tests/tests/bgra8unorm_storage.rs +++ b/tests/tests/bgra8unorm_storage.rs @@ -110,7 +110,7 @@ static BGRA8_UNORM_STORAGE: GpuTestConfiguration = GpuTestConfiguration::new() timestamp_writes: None, }); - pass.set_bind_group(0, &bg, &[]); + pass.set_bind_group(0, Some(&bg), &[]); pass.set_pipeline(&pipeline); pass.dispatch_workgroups(256, 256, 1); } diff --git a/tests/tests/bind_group_layout_dedup.rs b/tests/tests/bind_group_layout_dedup.rs index 32f71b89d7..f81360288f 100644 --- a/tests/tests/bind_group_layout_dedup.rs +++ b/tests/tests/bind_group_layout_dedup.rs @@ -31,129 +31,77 @@ static BIND_GROUP_LAYOUT_DEDUPLICATION: GpuTestConfiguration = GpuTestConfigurat .run_async(bgl_dedupe); async fn bgl_dedupe(ctx: TestingContext) { - let entries_1 = &[]; - - let entries_2 = &[ENTRY]; - - // Block so we can force all resource to die. - { - let bgl_1a = ctx - .device - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: entries_1, - }); - - let bgl_2 = ctx - .device - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: entries_2, - }); - - let bgl_1b = ctx - .device - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: entries_1, - }); - - let bg_1a = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + let entries = &[]; + + let bgl_1a = ctx + .device + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { label: None, - layout: &bgl_1a, - entries: &[], + entries, }); - let bg_1b = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + let bgl_1b = ctx + .device + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { label: None, - layout: &bgl_1b, - entries: &[], + entries, }); - let pipeline_layout = ctx - .device - .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: None, - bind_group_layouts: &[&bgl_1b], - push_constant_ranges: &[], - }); - - let module = ctx - .device - .create_shader_module(wgpu::ShaderModuleDescriptor { - label: None, - source: wgpu::ShaderSource::Wgsl(SHADER_SRC.into()), - }); - - let desc = wgpu::ComputePipelineDescriptor { - label: None, - layout: Some(&pipeline_layout), - module: &module, - entry_point: Some("no_resources"), - compilation_options: Default::default(), - cache: None, - }; + let bg_1a = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + label: None, + layout: &bgl_1a, + entries: &[], + }); - let pipeline = ctx.device.create_compute_pipeline(&desc); + let bg_1b = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + label: None, + layout: &bgl_1b, + entries: &[], + }); - let mut encoder = ctx.device.create_command_encoder(&Default::default()); + let pipeline_layout = ctx + .device + .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: None, + bind_group_layouts: &[&bgl_1b], + push_constant_ranges: &[], + }); - let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { + let module = ctx + .device + .create_shader_module(wgpu::ShaderModuleDescriptor { label: None, - timestamp_writes: None, + source: wgpu::ShaderSource::Wgsl(SHADER_SRC.into()), }); - pass.set_bind_group(0, &bg_1b, &[]); - pass.set_pipeline(&pipeline); - pass.dispatch_workgroups(1, 1, 1); + let desc = wgpu::ComputePipelineDescriptor { + label: None, + layout: Some(&pipeline_layout), + module: &module, + entry_point: Some("no_resources"), + compilation_options: Default::default(), + cache: None, + }; - pass.set_bind_group(0, &bg_1a, &[]); - pass.dispatch_workgroups(1, 1, 1); + let pipeline = ctx.device.create_compute_pipeline(&desc); - drop(pass); + let mut encoder = ctx.device.create_command_encoder(&Default::default()); - ctx.queue.submit(Some(encoder.finish())); + let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { + label: None, + timestamp_writes: None, + }); - // Abuse the fact that global_id is really just the bitpacked ids when targeting wgpu-core. - if ctx.adapter_info.backend != wgt::Backend::BrowserWebGpu { - let bgl_1a_idx = bgl_1a.global_id().inner() & 0xFFFF_FFFF; - assert_eq!(bgl_1a_idx, 0); - let bgl_2_idx = bgl_2.global_id().inner() & 0xFFFF_FFFF; - assert_eq!(bgl_2_idx, 1); - let bgl_1b_idx = bgl_1b.global_id().inner() & 0xFFFF_FFFF; - assert_eq!(bgl_1b_idx, 2); - } - } - - ctx.async_poll(wgpu::Maintain::wait()) - .await - .panic_on_timeout(); - - if ctx.adapter_info.backend != wgt::Backend::BrowserWebGpu { - // Indices are made reusable as soon as the handle is dropped so we keep them around - // for the duration of the loop. - let mut bgls = Vec::new(); - let mut indices = Vec::new(); - // Now all of the BGL ids should be dead, so we should get the same ids again. - for _ in 0..=2 { - let test_bgl = ctx - .device - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: entries_1, - }); - - let test_bgl_idx = test_bgl.global_id().inner() & 0xFFFF_FFFF; - bgls.push(test_bgl); - indices.push(test_bgl_idx); - } - // We don't guarantee that the IDs will appear in the same order. Sort them - // and check that they all appear exactly once. - indices.sort(); - for (i, index) in indices.iter().enumerate() { - assert_eq!(*index, i as u64); - } - } + pass.set_bind_group(0, Some(&bg_1b), &[]); + pass.set_pipeline(&pipeline); + pass.dispatch_workgroups(1, 1, 1); + + pass.set_bind_group(0, Some(&bg_1a), &[]); + pass.dispatch_workgroups(1, 1, 1); + + drop(pass); + + ctx.queue.submit(Some(encoder.finish())); } #[gpu_test] @@ -231,7 +179,7 @@ fn bgl_dedupe_with_dropped_user_handle(ctx: TestingContext) { timestamp_writes: None, }); - pass.set_bind_group(0, &bg, &[]); + pass.set_bind_group(0, Some(&bg), &[]); pass.set_pipeline(&pipeline); pass.dispatch_workgroups(1, 1, 1); @@ -302,10 +250,10 @@ fn get_derived_bgl(ctx: TestingContext) { pass.set_pipeline(&pipeline); - pass.set_bind_group(0, &bg1, &[]); + pass.set_bind_group(0, Some(&bg1), &[]); pass.dispatch_workgroups(1, 1, 1); - pass.set_bind_group(0, &bg2, &[]); + pass.set_bind_group(0, Some(&bg2), &[]); pass.dispatch_workgroups(1, 1, 1); drop(pass); @@ -365,7 +313,7 @@ fn separate_pipelines_have_incompatible_derived_bgls(ctx: TestingContext) { pass.set_pipeline(&pipeline1); // We use the wrong bind group for this pipeline here. This should fail. - pass.set_bind_group(0, &bg2, &[]); + pass.set_bind_group(0, Some(&bg2), &[]); pass.dispatch_workgroups(1, 1, 1); fail( @@ -437,7 +385,7 @@ fn derived_bgls_incompatible_with_regular_bgls(ctx: TestingContext) { pass.set_pipeline(&pipeline); - pass.set_bind_group(0, &bg, &[]); + pass.set_bind_group(0, Some(&bg), &[]); pass.dispatch_workgroups(1, 1, 1); fail( @@ -528,8 +476,8 @@ fn bgl_dedupe_derived(ctx: TestingContext) { timestamp_writes: None, }); pass.set_pipeline(&pipeline); - pass.set_bind_group(0, &bind_group_0, &[]); - pass.set_bind_group(1, &bind_group_1, &[]); + pass.set_bind_group(0, Some(&bind_group_0), &[]); + pass.set_bind_group(1, Some(&bind_group_1), &[]); pass.dispatch_workgroups(1, 1, 1); drop(pass); diff --git a/tests/tests/buffer.rs b/tests/tests/buffer.rs index b3a48f178a..f76a9fc352 100644 --- a/tests/tests/buffer.rs +++ b/tests/tests/buffer.rs @@ -328,7 +328,7 @@ static MINIMUM_BUFFER_BINDING_SIZE_DISPATCH: GpuTestConfiguration = GpuTestConfi timestamp_writes: None, }); - pass.set_bind_group(0, &bind_group, &[]); + pass.set_bind_group(0, Some(&bind_group), &[]); pass.set_pipeline(&pipeline); pass.dispatch_workgroups(1, 1, 1); diff --git a/tests/tests/clear_texture.rs b/tests/tests/clear_texture.rs index f62e2be219..484681130b 100644 --- a/tests/tests/clear_texture.rs +++ b/tests/tests/clear_texture.rs @@ -26,7 +26,7 @@ static TEXTURE_FORMATS_UNCOMPRESSED_GLES_COMPAT: &[wgpu::TextureFormat] = &[ wgpu::TextureFormat::Bgra8UnormSrgb, wgpu::TextureFormat::Rgb10a2Uint, wgpu::TextureFormat::Rgb10a2Unorm, - wgpu::TextureFormat::Rg11b10UFloat, + wgpu::TextureFormat::Rg11b10Ufloat, wgpu::TextureFormat::Rg32Uint, wgpu::TextureFormat::Rg32Sint, wgpu::TextureFormat::Rg32Float, diff --git a/tests/tests/compute_pass_ownership.rs b/tests/tests/compute_pass_ownership.rs index 80f81f4d81..6b5bad8cf7 100644 --- a/tests/tests/compute_pass_ownership.rs +++ b/tests/tests/compute_pass_ownership.rs @@ -1,7 +1,7 @@ //! Tests that compute passes take ownership of resources that are associated with. //! I.e. once a resource is passed in to a compute pass, it can be dropped. -use std::num::NonZeroU64; +use std::{mem::size_of, num::NonZeroU64}; use wgpu::util::DeviceExt as _; use wgpu_test::{gpu_test, valid, GpuTestConfiguration, TestParameters, TestingContext}; @@ -45,7 +45,7 @@ async fn compute_pass_resource_ownership(ctx: TestingContext) { { let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor::default()); cpass.set_pipeline(&pipeline); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); cpass.dispatch_workgroups_indirect(&indirect_buffer, 0); // Now drop all resources we set. Then do a device poll to make sure the resources are really not dropped too early, no matter what. @@ -95,7 +95,7 @@ async fn compute_pass_query_set_ownership_pipeline_statistics(ctx: TestingContex { let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor::default()); cpass.set_pipeline(&pipeline); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); cpass.begin_pipeline_statistics_query(&query_set, 0); cpass.dispatch_workgroups(1, 1, 1); cpass.end_pipeline_statistics_query(); @@ -153,7 +153,7 @@ async fn compute_pass_query_set_ownership_timestamps(ctx: TestingContext) { }), }); cpass.set_pipeline(&pipeline); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); cpass.write_timestamp(&query_set_write_timestamp, 0); cpass.dispatch_workgroups(1, 1, 1); @@ -203,7 +203,7 @@ async fn compute_pass_keep_encoder_alive(ctx: TestingContext) { // Record some draw commands. cpass.set_pipeline(&pipeline); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); cpass.dispatch_workgroups_indirect(&indirect_buffer, 0); // Dropping the pass will still execute the pass, even though there's no way to submit it. @@ -253,7 +253,7 @@ fn resource_setup(ctx: &TestingContext) -> ResourceSetup { source: wgpu::ShaderSource::Wgsl(SHADER_SRC.into()), }); - let buffer_size = 4 * std::mem::size_of::() as u64; + let buffer_size = 4 * size_of::() as u64; let bgl = ctx .device diff --git a/tests/tests/create_surface_error.rs b/tests/tests/create_surface_error.rs index e3b48cb757..75c8b90201 100644 --- a/tests/tests/create_surface_error.rs +++ b/tests/tests/create_surface_error.rs @@ -6,7 +6,7 @@ #[wasm_bindgen_test::wasm_bindgen_test] fn canvas_get_context_returned_null() { // Not using the normal testing infrastructure because that goes straight to creating the canvas for us. - let instance = wgpu_test::initialize_instance(false); + let instance = wgpu_test::initialize_instance(wgpu::Backends::all(), false); // Create canvas let canvas = wgpu_test::initialize_html_canvas(); diff --git a/tests/tests/device.rs b/tests/tests/device.rs index 0430f097fe..2774bfd524 100644 --- a/tests/tests/device.rs +++ b/tests/tests/device.rs @@ -36,32 +36,23 @@ static CROSS_DEVICE_BIND_GROUP_USAGE: GpuTestConfiguration = GpuTestConfiguratio #[gpu_test] static DEVICE_LIFETIME_CHECK: GpuTestConfiguration = GpuTestConfiguration::new() .parameters(TestParameters::default()) - .run_sync(|_| { - use pollster::FutureExt as _; - - let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { - backends: wgpu::util::backend_bits_from_env().unwrap_or(wgpu::Backends::all()), - dx12_shader_compiler: wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default(), - gles_minor_version: wgpu::util::gles_minor_version_from_env().unwrap_or_default(), - flags: wgpu::InstanceFlags::advanced_debugging().with_env(), - }); - - let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, None) - .block_on() - .expect("failed to create adapter"); - - let (device, queue) = adapter - .request_device(&wgpu::DeviceDescriptor::default(), None) - .block_on() - .expect("failed to create device"); + .run_sync(|ctx| { + ctx.instance.poll_all(false); - instance.poll_all(false); + let pre_report = ctx.instance.generate_report().unwrap(); - let pre_report = instance.generate_report().unwrap(); + let TestingContext { + instance, + device, + queue, + .. + } = ctx; drop(queue); drop(device); + let post_report = instance.generate_report().unwrap(); + assert_ne!( pre_report, post_report, "Queue and Device has not been dropped as expected" @@ -72,29 +63,16 @@ static DEVICE_LIFETIME_CHECK: GpuTestConfiguration = GpuTestConfiguration::new() #[gpu_test] static MULTIPLE_DEVICES: GpuTestConfiguration = GpuTestConfiguration::new() .parameters(TestParameters::default()) - .run_sync(|_| { + .run_sync(|ctx| { use pollster::FutureExt as _; - - fn create_device_and_queue() -> (wgpu::Device, wgpu::Queue) { - let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { - backends: wgpu::util::backend_bits_from_env().unwrap_or(wgpu::Backends::all()), - dx12_shader_compiler: wgpu::util::dx12_shader_compiler_from_env() - .unwrap_or_default(), - gles_minor_version: wgpu::util::gles_minor_version_from_env().unwrap_or_default(), - flags: wgpu::InstanceFlags::advanced_debugging().with_env(), - }); - - let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, None) - .block_on() - .expect("failed to create adapter"); - - adapter - .request_device(&wgpu::DeviceDescriptor::default(), None) - .block_on() - .expect("failed to create device") - } - - let _ = vec![create_device_and_queue(), create_device_and_queue()]; + ctx.adapter + .request_device(&wgpu::DeviceDescriptor::default(), None) + .block_on() + .expect("failed to create device"); + ctx.adapter + .request_device(&wgpu::DeviceDescriptor::default(), None) + .block_on() + .expect("failed to create device"); }); #[cfg(not(all(target_arch = "wasm32", not(target_os = "emscripten"))))] @@ -109,7 +87,7 @@ static REQUEST_DEVICE_ERROR_MESSAGE_NATIVE: GpuTestConfiguration = async fn request_device_error_message() { // Not using initialize_test() because that doesn't let us catch the error // nor .await anything - let (_instance, adapter, _surface_guard) = wgpu_test::initialize_adapter(0, false).await; + let (_instance, adapter, _surface_guard) = wgpu_test::initialize_adapter(None, false).await; let device_error = adapter .request_device( @@ -673,33 +651,6 @@ static DEVICE_DROP_THEN_LOST: GpuTestConfiguration = GpuTestConfiguration::new() ); }); -#[gpu_test] -static DEVICE_INVALID_THEN_SET_LOST_CALLBACK: GpuTestConfiguration = GpuTestConfiguration::new() - .parameters(TestParameters::default().expect_fail(FailureCase::webgl2())) - .run_sync(|ctx| { - // This test checks that when the device is invalid, a subsequent call - // to set the device lost callback will immediately call the callback. - // Invalidating the device is done via a testing-only method. Fails on - // webgl because webgl doesn't implement make_invalid. - - // Make the device invalid. - ctx.device.make_invalid(); - - static WAS_CALLED: AtomicBool = AtomicBool::new(false); - - // Set a LoseDeviceCallback on the device. - let callback = Box::new(|reason, _m| { - WAS_CALLED.store(true, std::sync::atomic::Ordering::SeqCst); - assert_eq!(reason, wgt::DeviceLostReason::DeviceInvalid); - }); - ctx.device.set_device_lost_callback(callback); - - assert!( - WAS_CALLED.load(std::sync::atomic::Ordering::SeqCst), - "Device lost callback should have been called." - ); - }); - #[gpu_test] static DEVICE_LOST_REPLACED_CALLBACK: GpuTestConfiguration = GpuTestConfiguration::new() .parameters(TestParameters::default()) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index 75de0776e8..84879efda3 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -194,7 +194,7 @@ async fn draw_test_with_reports( }); rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bg, &[]); + rpass.set_bind_group(0, Some(&bg), &[]); let global_report = ctx.instance.generate_report().unwrap(); let report = global_report.hub_report(); diff --git a/tests/tests/nv12_texture/mod.rs b/tests/tests/nv12_texture/mod.rs index 6ded163a3a..d6af8496f7 100644 --- a/tests/tests/nv12_texture/mod.rs +++ b/tests/tests/nv12_texture/mod.rs @@ -115,7 +115,7 @@ static NV12_TEXTURE_CREATION_SAMPLING: GpuTestConfiguration = GpuTestConfigurati occlusion_query_set: None, }); rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bind_group, &[]); + rpass.set_bind_group(0, Some(&bind_group), &[]); rpass.draw(0..4, 0..1); drop(rpass); ctx.queue.submit(Some(encoder.finish())); diff --git a/tests/tests/occlusion_query/mod.rs b/tests/tests/occlusion_query/mod.rs index a9b1f12649..20c7fff82b 100644 --- a/tests/tests/occlusion_query/mod.rs +++ b/tests/tests/occlusion_query/mod.rs @@ -1,4 +1,4 @@ -use std::borrow::Cow; +use std::{borrow::Cow, mem::size_of}; use wgpu_test::{gpu_test, FailureCase, GpuTestConfiguration, TestParameters}; #[gpu_test] @@ -100,7 +100,7 @@ static OCCLUSION_QUERY: GpuTestConfiguration = GpuTestConfiguration::new() // Resolve query set to buffer let query_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor { label: Some("Query buffer"), - size: std::mem::size_of::() as u64 * 3, + size: size_of::() as u64 * 3, usage: wgpu::BufferUsages::QUERY_RESOLVE | wgpu::BufferUsages::COPY_SRC, mapped_at_creation: false, }); diff --git a/tests/tests/partially_bounded_arrays/mod.rs b/tests/tests/partially_bounded_arrays/mod.rs index 195fd88dd4..4e6d6fc097 100644 --- a/tests/tests/partially_bounded_arrays/mod.rs +++ b/tests/tests/partially_bounded_arrays/mod.rs @@ -90,7 +90,7 @@ static PARTIALLY_BOUNDED_ARRAY: GpuTestConfiguration = GpuTestConfiguration::new timestamp_writes: None, }); cpass.set_pipeline(&compute_pipeline); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); cpass.dispatch_workgroups(1, 1, 1); } diff --git a/tests/tests/pipeline.rs b/tests/tests/pipeline.rs index 4c3888a210..8e9c91e527 100644 --- a/tests/tests/pipeline.rs +++ b/tests/tests/pipeline.rs @@ -1,44 +1,16 @@ -use wgpu_test::{fail, gpu_test, FailureCase, GpuTestConfiguration, TestParameters}; +use wgpu_test::{fail, gpu_test, GpuTestConfiguration, TestParameters}; -// Create an invalid shader and a compute pipeline that uses it -// with a default bindgroup layout, and then ask for that layout. -// Validation should fail, but wgpu should not panic. -#[gpu_test] -static PIPELINE_DEFAULT_LAYOUT_BAD_MODULE: GpuTestConfiguration = GpuTestConfiguration::new() - .parameters( - TestParameters::default() - // https://github.com/gfx-rs/wgpu/issues/4167 - .expect_fail(FailureCase::always().panic("Pipeline is invalid")), - ) - .run_sync(|ctx| { - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); - - fail( - &ctx.device, - || { - let module = ctx - .device - .create_shader_module(wgpu::ShaderModuleDescriptor { - label: None, - source: wgpu::ShaderSource::Wgsl("not valid wgsl".into()), - }); - - let pipeline = - ctx.device - .create_compute_pipeline(&wgpu::ComputePipelineDescriptor { - label: Some("mandelbrot compute pipeline"), - layout: None, - module: &module, - entry_point: Some("doesn't exist"), - compilation_options: Default::default(), - cache: None, - }); +const INVALID_SHADER_DESC: wgpu::ShaderModuleDescriptor = wgpu::ShaderModuleDescriptor { + label: Some("invalid shader"), + source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed("not valid wgsl")), +}; - pipeline.get_bind_group_layout(0); - }, - None, - ); - }); +const TRIVIAL_COMPUTE_SHADER_DESC: wgpu::ShaderModuleDescriptor = wgpu::ShaderModuleDescriptor { + label: Some("trivial compute shader"), + source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed( + "@compute @workgroup_size(1) fn main() {}", + )), +}; const TRIVIAL_VERTEX_SHADER_DESC: wgpu::ShaderModuleDescriptor = wgpu::ShaderModuleDescriptor { label: Some("trivial vertex shader"), @@ -47,6 +19,161 @@ const TRIVIAL_VERTEX_SHADER_DESC: wgpu::ShaderModuleDescriptor = wgpu::ShaderMod )), }; +const TRIVIAL_FRAGMENT_SHADER_DESC: wgpu::ShaderModuleDescriptor = wgpu::ShaderModuleDescriptor { + label: Some("trivial fragment shader"), + source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed( + "@fragment fn main() -> @location(0) vec4 { return vec4(0); }", + )), +}; + +// Create an invalid shader and a compute pipeline that uses it +// with a default bindgroup layout, and then ask for that layout. +// Validation should fail, but wgpu should not panic. +#[gpu_test] +static COMPUTE_PIPELINE_DEFAULT_LAYOUT_BAD_MODULE: GpuTestConfiguration = + GpuTestConfiguration::new() + .parameters(TestParameters::default()) + .run_sync(|ctx| { + ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); + + fail( + &ctx.device, + || { + let module = ctx.device.create_shader_module(INVALID_SHADER_DESC); + + let pipeline = + ctx.device + .create_compute_pipeline(&wgpu::ComputePipelineDescriptor { + label: Some("compute pipeline"), + layout: None, + module: &module, + entry_point: Some("doesn't exist"), + compilation_options: Default::default(), + cache: None, + }); + + // https://github.com/gfx-rs/wgpu/issues/4167 this used to panic + pipeline.get_bind_group_layout(0); + }, + Some("Shader 'invalid shader' parsing error"), + ); + }); + +#[gpu_test] +static COMPUTE_PIPELINE_DEFAULT_LAYOUT_BAD_BGL_INDEX: GpuTestConfiguration = + GpuTestConfiguration::new() + .parameters(TestParameters::default().test_features_limits()) + .run_sync(|ctx| { + ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); + + fail( + &ctx.device, + || { + let module = ctx.device.create_shader_module(TRIVIAL_COMPUTE_SHADER_DESC); + + let pipeline = + ctx.device + .create_compute_pipeline(&wgpu::ComputePipelineDescriptor { + label: Some("compute pipeline"), + layout: None, + module: &module, + entry_point: Some("main"), + compilation_options: Default::default(), + cache: None, + }); + + pipeline.get_bind_group_layout(0); + }, + Some("Invalid group index 0"), + ); + }); + +#[gpu_test] +static RENDER_PIPELINE_DEFAULT_LAYOUT_BAD_MODULE: GpuTestConfiguration = + GpuTestConfiguration::new() + .parameters(TestParameters::default()) + .run_sync(|ctx| { + ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); + + fail( + &ctx.device, + || { + let module = ctx.device.create_shader_module(INVALID_SHADER_DESC); + + let pipeline = + ctx.device + .create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("render pipeline"), + layout: None, + vertex: wgpu::VertexState { + module: &module, + entry_point: Some("doesn't exist"), + compilation_options: Default::default(), + buffers: &[], + }, + primitive: Default::default(), + depth_stencil: None, + multisample: Default::default(), + fragment: None, + multiview: None, + cache: None, + }); + + pipeline.get_bind_group_layout(0); + }, + Some("Shader 'invalid shader' parsing error"), + ); + }); + +#[gpu_test] +static RENDER_PIPELINE_DEFAULT_LAYOUT_BAD_BGL_INDEX: GpuTestConfiguration = + GpuTestConfiguration::new() + .parameters(TestParameters::default().test_features_limits()) + .run_sync(|ctx| { + ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); + + fail( + &ctx.device, + || { + let vs_module = ctx.device.create_shader_module(TRIVIAL_VERTEX_SHADER_DESC); + let fs_module = ctx + .device + .create_shader_module(TRIVIAL_FRAGMENT_SHADER_DESC); + + let pipeline = + ctx.device + .create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("render pipeline"), + layout: None, + vertex: wgpu::VertexState { + module: &vs_module, + entry_point: Some("main"), + compilation_options: Default::default(), + buffers: &[], + }, + primitive: Default::default(), + depth_stencil: None, + multisample: Default::default(), + fragment: Some(wgpu::FragmentState { + module: &fs_module, + entry_point: Some("main"), + compilation_options: Default::default(), + targets: &[Some(wgpu::ColorTargetState { + format: wgpu::TextureFormat::Rgba8Unorm, + blend: None, + write_mask: wgpu::ColorWrites::ALL, + })], + }), + multiview: None, + cache: None, + }); + + pipeline.get_bind_group_layout(0); + }, + Some("Invalid group index 0"), + ); + }); + #[gpu_test] static NO_TARGETLESS_RENDER: GpuTestConfiguration = GpuTestConfiguration::new() .parameters(TestParameters::default()) diff --git a/tests/tests/pipeline_cache.rs b/tests/tests/pipeline_cache.rs index 67e9e68270..c88a871c75 100644 --- a/tests/tests/pipeline_cache.rs +++ b/tests/tests/pipeline_cache.rs @@ -32,7 +32,7 @@ fn shader() -> String { r#" @group(0) @binding(0) var output: array; - + @compute @workgroup_size(1) fn main() {{ {body} @@ -167,7 +167,7 @@ async fn validate_pipeline( timestamp_writes: None, }); cpass.set_pipeline(&pipeline); - cpass.set_bind_group(0, bind_group, &[]); + cpass.set_bind_group(0, Some(bind_group), &[]); cpass.dispatch_workgroups(1, 1, 1); } diff --git a/tests/tests/poll.rs b/tests/tests/poll.rs index 7e99cbcd7d..aeea2617f6 100644 --- a/tests/tests/poll.rs +++ b/tests/tests/poll.rs @@ -46,7 +46,7 @@ fn generate_dummy_work(ctx: &TestingContext) -> CommandBuffer { .create_command_encoder(&CommandEncoderDescriptor::default()); let mut cpass = cmd_buf.begin_compute_pass(&ComputePassDescriptor::default()); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); drop(cpass); cmd_buf.finish() diff --git a/tests/tests/push_constants.rs b/tests/tests/push_constants.rs index 905578d533..047fe5c8f2 100644 --- a/tests/tests/push_constants.rs +++ b/tests/tests/push_constants.rs @@ -119,7 +119,7 @@ async fn partial_update_test(ctx: TestingContext) { timestamp_writes: None, }); cpass.set_pipeline(&pipeline); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); // -- Dispatch 0 -- diff --git a/tests/tests/regression/issue_3349.rs b/tests/tests/regression/issue_3349.rs index 21929bd9b7..b1361722fd 100644 --- a/tests/tests/regression/issue_3349.rs +++ b/tests/tests/regression/issue_3349.rs @@ -163,7 +163,7 @@ async fn multi_stage_data_binding_test(ctx: TestingContext) { }); rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bg, &[]); + rpass.set_bind_group(0, Some(&bg), &[]); rpass.set_push_constants( wgpu::ShaderStages::VERTEX_FRAGMENT, 0, diff --git a/tests/tests/render_pass_ownership.rs b/tests/tests/render_pass_ownership.rs index 502375e736..5086d38d91 100644 --- a/tests/tests/render_pass_ownership.rs +++ b/tests/tests/render_pass_ownership.rs @@ -9,7 +9,7 @@ //! * rpass.multi_draw_indirect_count //! * rpass.multi_draw_indexed_indirect_count //! -use std::num::NonZeroU64; +use std::{mem::size_of, num::NonZeroU64}; use wgpu::util::DeviceExt as _; use wgpu_test::{gpu_test, valid, GpuTestConfiguration, TestParameters, TestingContext}; @@ -87,7 +87,7 @@ async fn render_pass_resource_ownership(ctx: TestingContext) { drop(depth_stencil_view); rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bind_group, &[]); + rpass.set_bind_group(0, Some(&bind_group), &[]); rpass.set_vertex_buffer(0, vertex_buffer.slice(..)); rpass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32); rpass.begin_occlusion_query(0); @@ -163,7 +163,7 @@ async fn render_pass_query_set_ownership_pipeline_statistics(ctx: TestingContext ..Default::default() }); rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bind_group, &[]); + rpass.set_bind_group(0, Some(&bind_group), &[]); rpass.set_vertex_buffer(0, vertex_buffer.slice(..)); rpass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32); rpass.begin_pipeline_statistics_query(&query_set, 0); @@ -242,7 +242,7 @@ async fn render_pass_query_set_ownership_timestamps(ctx: TestingContext) { rpass.write_timestamp(&query_set_write_timestamp, 0); rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bind_group, &[]); + rpass.set_bind_group(0, Some(&bind_group), &[]); rpass.set_vertex_buffer(0, vertex_buffer.slice(..)); rpass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32); rpass.draw(0..3, 0..1); @@ -305,7 +305,7 @@ async fn render_pass_keep_encoder_alive(ctx: TestingContext) { // Record some a draw command. rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bind_group, &[]); + rpass.set_bind_group(0, Some(&bind_group), &[]); rpass.set_vertex_buffer(0, vertex_buffer.slice(..)); rpass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32); rpass.draw(0..3, 0..1); @@ -367,7 +367,7 @@ fn resource_setup(ctx: &TestingContext) -> ResourceSetup { source: wgpu::ShaderSource::Wgsl(SHADER_SRC.into()), }); - let buffer_size = 4 * std::mem::size_of::() as u64; + let buffer_size = 4 * size_of::() as u64; let bgl = ctx .device @@ -418,7 +418,7 @@ fn resource_setup(ctx: &TestingContext) -> ResourceSetup { let vertex_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor { label: Some("vertex_buffer"), usage: wgpu::BufferUsages::VERTEX, - size: std::mem::size_of::() as u64 * vertex_count as u64, + size: size_of::() as u64 * vertex_count as u64, mapped_at_creation: false, }); diff --git a/tests/tests/resource_error.rs b/tests/tests/resource_error.rs index fc7e062f4c..d071053ebd 100644 --- a/tests/tests/resource_error.rs +++ b/tests/tests/resource_error.rs @@ -17,21 +17,16 @@ static BAD_BUFFER: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(| Some("`map` usage can only be combined with the opposite `copy`"), ); - let error = match ctx.adapter_info.backend.to_str() { - "vulkan" | "vk" => "bufferid id(0,1,vk) is invalid", - "dx12" | "d3d12" => "bufferid id(0,1,d3d12) is invalid", - "metal" | "mtl" => "bufferid id(0,1,mtl) is invalid", - "opengl" | "gles" | "gl" => "bufferid id(0,1,gl) is invalid", - "webgpu" => "bufferid id(0,1,webgpu) is invalid", - b => b, - }; - fail( &ctx.device, || buffer.slice(..).map_async(wgpu::MapMode::Write, |_| {}), - Some(error), + Some("Buffer with '' label is invalid"), + ); + fail( + &ctx.device, + || buffer.unmap(), + Some("Buffer with '' label is invalid"), ); - fail(&ctx.device, || buffer.unmap(), Some(error)); valid(&ctx.device, || buffer.destroy()); valid(&ctx.device, || buffer.destroy()); }); @@ -59,21 +54,12 @@ static BAD_TEXTURE: GpuTestConfiguration = GpuTestConfiguration::new().run_sync( Some("dimension x is zero"), ); - let error = match ctx.adapter_info.backend.to_str() { - "vulkan" | "vk" => "textureid id(0,1,vk) is invalid", - "dx12" | "d3d12" => "textureid id(0,1,d3d12) is invalid", - "metal" | "mtl" => "textureid id(0,1,mtl) is invalid", - "opengl" | "gles" | "gl" => "textureid id(0,1,gl) is invalid", - "webgpu" => "textureid id(0,1,webgpu) is invalid", - b => b, - }; - fail( &ctx.device, || { let _ = texture.create_view(&wgpu::TextureViewDescriptor::default()); }, - Some(error), + Some("Texture with '' label is invalid"), ); valid(&ctx.device, || texture.destroy()); valid(&ctx.device, || texture.destroy()); diff --git a/tests/tests/shader/mod.rs b/tests/tests/shader/mod.rs index 7d6ed7aaaa..2e7dfd9424 100644 --- a/tests/tests/shader/mod.rs +++ b/tests/tests/shader/mod.rs @@ -349,7 +349,7 @@ async fn shader_input_output_test( timestamp_writes: None, }); cpass.set_pipeline(&pipeline); - cpass.set_bind_group(0, &bg, &[]); + cpass.set_bind_group(0, Some(&bg), &[]); if let InputStorageType::PushConstant = storage_type { cpass.set_push_constants(0, bytemuck::cast_slice(&test.input_values)) diff --git a/tests/tests/shader/zero_init_workgroup_mem.rs b/tests/tests/shader/zero_init_workgroup_mem.rs index beacb4fcc8..084498770e 100644 --- a/tests/tests/shader/zero_init_workgroup_mem.rs +++ b/tests/tests/shader/zero_init_workgroup_mem.rs @@ -119,7 +119,7 @@ static ZERO_INIT_WORKGROUP_MEMORY: GpuTestConfiguration = GpuTestConfiguration:: cpass.set_pipeline(&pipeline_read); for i in 0..NR_OF_DISPATCHES { - cpass.set_bind_group(0, &bg, &[i * BUFFER_BINDING_SIZE]); + cpass.set_bind_group(0, Some(&bg), &[i * BUFFER_BINDING_SIZE]); cpass.dispatch_workgroups(DISPATCH_SIZE.0, DISPATCH_SIZE.1, DISPATCH_SIZE.2); } drop(cpass); diff --git a/tests/tests/shader_view_format/mod.rs b/tests/tests/shader_view_format/mod.rs index b2bc0426eb..052573bc0d 100644 --- a/tests/tests/shader_view_format/mod.rs +++ b/tests/tests/shader_view_format/mod.rs @@ -148,7 +148,7 @@ async fn reinterpret( occlusion_query_set: None, }); rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bind_group, &[]); + rpass.set_bind_group(0, Some(&bind_group), &[]); rpass.draw(0..3, 0..1); drop(rpass); ctx.queue.submit(Some(encoder.finish())); diff --git a/tests/tests/subgroup_operations/mod.rs b/tests/tests/subgroup_operations/mod.rs index 7696fb78df..ecf8adfd7d 100644 --- a/tests/tests/subgroup_operations/mod.rs +++ b/tests/tests/subgroup_operations/mod.rs @@ -1,4 +1,4 @@ -use std::{borrow::Cow, num::NonZeroU64}; +use std::{borrow::Cow, mem::size_of, num::NonZeroU64}; use wgpu_test::{gpu_test, GpuTestConfiguration, TestParameters}; @@ -35,7 +35,7 @@ static SUBGROUP_OPERATIONS: GpuTestConfiguration = GpuTestConfiguration::new() let storage_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: None, - size: THREAD_COUNT * std::mem::size_of::() as u64, + size: THREAD_COUNT * size_of::() as u64, usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::COPY_SRC, @@ -50,9 +50,7 @@ static SUBGROUP_OPERATIONS: GpuTestConfiguration = GpuTestConfiguration::new() ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Storage { read_only: false }, has_dynamic_offset: false, - min_binding_size: NonZeroU64::new( - THREAD_COUNT * std::mem::size_of::() as u64, - ), + min_binding_size: NonZeroU64::new(THREAD_COUNT * size_of::() as u64), }, count: None, }], @@ -95,7 +93,7 @@ static SUBGROUP_OPERATIONS: GpuTestConfiguration = GpuTestConfiguration::new() timestamp_writes: None, }); cpass.set_pipeline(&compute_pipeline); - cpass.set_bind_group(0, &bind_group, &[]); + cpass.set_bind_group(0, Some(&bind_group), &[]); cpass.dispatch_workgroups(1, 1, 1); } ctx.queue.submit(Some(encoder.finish())); diff --git a/tests/tests/vertex_formats/mod.rs b/tests/tests/vertex_formats/mod.rs index 60ef177efa..d447ac8f7e 100644 --- a/tests/tests/vertex_formats/mod.rs +++ b/tests/tests/vertex_formats/mod.rs @@ -1,6 +1,6 @@ //! Tests that vertex formats pass through to vertex shaders accurately. -use std::num::NonZeroU64; +use std::{mem::size_of_val, num::NonZeroU64}; use wgpu::util::{BufferInitDescriptor, DeviceExt}; @@ -273,7 +273,7 @@ async fn vertex_formats_common(ctx: TestingContext, tests: &[Test<'_>]) { let pipeline = ctx.device.create_render_pipeline(&pipeline_desc); let expected = test.checksums; - let buffer_size = (std::mem::size_of_val(&expected[0]) * expected.len()) as u64; + let buffer_size = (size_of_val(&expected[0]) * expected.len()) as u64; let cpu_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor { label: None, size: buffer_size, @@ -315,7 +315,7 @@ async fn vertex_formats_common(ctx: TestingContext, tests: &[Test<'_>]) { rpass.set_vertex_buffer(0, buffer_input.slice(..)); rpass.set_pipeline(&pipeline); - rpass.set_bind_group(0, &bg, &[]); + rpass.set_bind_group(0, Some(&bg), &[]); // Draw three vertices and no instance, which is enough to generate the // checksums. diff --git a/tests/tests/vertex_indices/mod.rs b/tests/tests/vertex_indices/mod.rs index 5a847d0fbb..5c5ce8a202 100644 --- a/tests/tests/vertex_indices/mod.rs +++ b/tests/tests/vertex_indices/mod.rs @@ -3,7 +3,7 @@ //! We need tests for these as the backends use various schemes to work around the lack //! of support for things like `gl_BaseInstance` in shaders. -use std::{num::NonZeroU64, ops::Range}; +use std::{mem::size_of_val, num::NonZeroU64, ops::Range}; use itertools::Itertools; use strum::IntoEnumIterator; @@ -341,7 +341,7 @@ async fn vertex_index_common(ctx: TestingContext) { let expected = test.expectation(&ctx); - let buffer_size = (std::mem::size_of_val(&expected[0]) * expected.len()) as u64; + let buffer_size = (size_of_val(&expected[0]) * expected.len()) as u64; let cpu_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor { label: None, size: buffer_size, @@ -409,7 +409,7 @@ async fn vertex_index_common(ctx: TestingContext) { render_encoder.set_vertex_buffer(1, identity_buffer.slice(..)); render_encoder.set_index_buffer(identity_buffer.slice(..), wgpu::IndexFormat::Uint32); render_encoder.set_pipeline(pipeline); - render_encoder.set_bind_group(0, &bg, &[]); + render_encoder.set_bind_group(0, Some(&bg), &[]); let draws = test.case.draws(); diff --git a/wgpu-core/Cargo.toml b/wgpu-core/Cargo.toml index 22d813c4cb..1b9ce98488 100644 --- a/wgpu-core/Cargo.toml +++ b/wgpu-core/Cargo.toml @@ -57,6 +57,9 @@ serde = ["dep:serde", "wgt/serde", "arrayvec/serde"] ## Enable API tracing. trace = ["dep:ron", "serde", "naga/serialize"] +## Enable lock order observation. +observe_locks = ["dep:ron", "serde/serde_derive"] + ## Enable API replaying replay = ["serde", "naga/deserialize"] diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index d8a8b32d2f..2357a8c776 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -6,8 +6,8 @@ use crate::{ init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction}, pipeline::{ComputePipeline, RenderPipeline}, resource::{ - Buffer, DestroyedResourceError, Labeled, MissingBufferUsageError, MissingTextureUsageError, - ResourceErrorIdent, Sampler, TextureView, TrackingData, + Buffer, DestroyedResourceError, InvalidResourceError, Labeled, MissingBufferUsageError, + MissingTextureUsageError, ResourceErrorIdent, Sampler, TextureView, TrackingData, }, resource_log, snatch::{SnatchGuard, Snatchable}, @@ -79,14 +79,6 @@ pub enum CreateBindGroupLayoutError { pub enum CreateBindGroupError { #[error(transparent)] Device(#[from] DeviceError), - #[error("Bind group layout is invalid")] - InvalidLayout, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), - #[error("TextureViewId {0:?} is invalid")] - InvalidTextureViewId(TextureViewId), - #[error("SamplerId {0:?} is invalid")] - InvalidSamplerId(SamplerId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error( @@ -188,6 +180,8 @@ pub enum CreateBindGroupError { StorageReadNotSupported(wgt::TextureFormat), #[error(transparent)] ResourceUsageCompatibility(#[from] ResourceUsageCompatibilityError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -545,8 +539,6 @@ impl BindGroupLayout { pub enum CreatePipelineLayoutError { #[error(transparent)] Device(#[from] DeviceError), - #[error("BindGroupLayoutId {0:?} is invalid")] - InvalidBindGroupLayoutId(BindGroupLayoutId), #[error( "Push constant at index {index} has range bound {bound} not aligned to {}", wgt::PUSH_CONSTANT_ALIGNMENT @@ -570,6 +562,8 @@ pub enum CreatePipelineLayoutError { TooManyBindings(BindingTypeMaxCountError), #[error("Bind group layout count {actual} exceeds device bind group limit {max}")] TooManyGroups { actual: usize, max: usize }, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -884,6 +878,16 @@ pub(crate) fn buffer_binding_type_alignment( } } +pub(crate) fn buffer_binding_type_bounds_check_alignment( + alignments: &hal::Alignments, + binding_type: wgt::BufferBindingType, +) -> wgt::BufferAddress { + match binding_type { + wgt::BufferBindingType::Uniform => alignments.uniform_bounds_check_alignment.get(), + wgt::BufferBindingType::Storage { .. } => wgt::COPY_BUFFER_ALIGNMENT, + } +} + #[derive(Debug)] pub struct BindGroup { pub(crate) raw: Snatchable>, @@ -993,10 +997,10 @@ crate::impl_trackable!(BindGroup); #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum GetBindGroupLayoutError { - #[error("Pipeline is invalid")] - InvalidPipeline, #[error("Invalid group index {0}")] InvalidGroupIndex(u32), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error, Eq, PartialEq)] diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 56f7d551b0..b0d90976dd 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -92,7 +92,10 @@ use crate::{ id, init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction}, pipeline::{PipelineFlags, RenderPipeline, VertexStep}, - resource::{Buffer, DestroyedResourceError, Labeled, ParentDevice, TrackingData}, + resource::{ + Buffer, DestroyedResourceError, Fallible, InvalidResourceError, Labeled, ParentDevice, + TrackingData, + }, resource_log, snatch::SnatchGuard, track::RenderBundleScope, @@ -100,7 +103,7 @@ use crate::{ }; use arrayvec::ArrayVec; -use std::{borrow::Cow, mem, num::NonZeroU32, ops::Range, sync::Arc}; +use std::{borrow::Cow, mem::size_of, num::NonZeroU32, ops::Range, sync::Arc}; use thiserror::Error; use super::{ @@ -578,15 +581,20 @@ impl RenderBundleEncoder { fn set_bind_group( state: &mut State, - bind_group_guard: &crate::lock::RwLockReadGuard>, + bind_group_guard: &crate::storage::Storage>, dynamic_offsets: &[u32], index: u32, num_dynamic_offsets: usize, - bind_group_id: id::Id, + bind_group_id: Option>, ) -> Result<(), RenderBundleErrorInner> { - let bind_group = bind_group_guard - .get_owned(bind_group_id) - .map_err(|_| RenderCommandError::InvalidBindGroupId(bind_group_id))?; + if bind_group_id.is_none() { + // TODO: do appropriate cleanup for null bind_group. + return Ok(()); + } + + let bind_group_id = bind_group_id.unwrap(); + + let bind_group = bind_group_guard.get(bind_group_id).get()?; bind_group.same_device(&state.device)?; @@ -623,15 +631,13 @@ fn set_bind_group( fn set_pipeline( state: &mut State, - pipeline_guard: &crate::lock::RwLockReadGuard>, + pipeline_guard: &crate::storage::Storage>, context: &RenderPassContext, is_depth_read_only: bool, is_stencil_read_only: bool, pipeline_id: id::Id, ) -> Result<(), RenderBundleErrorInner> { - let pipeline = pipeline_guard - .get_owned(pipeline_id) - .map_err(|_| RenderCommandError::InvalidPipelineId(pipeline_id))?; + let pipeline = pipeline_guard.get(pipeline_id).get()?; pipeline.same_device(&state.device)?; @@ -666,15 +672,13 @@ fn set_pipeline( fn set_index_buffer( state: &mut State, - buffer_guard: &crate::lock::RwLockReadGuard>, + buffer_guard: &crate::storage::Storage>, buffer_id: id::Id, index_format: wgt::IndexFormat, offset: u64, size: Option, ) -> Result<(), RenderBundleErrorInner> { - let buffer = buffer_guard - .get_owned(buffer_id) - .map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))?; + let buffer = buffer_guard.get(buffer_id).get()?; state .trackers @@ -701,7 +705,7 @@ fn set_index_buffer( fn set_vertex_buffer( state: &mut State, - buffer_guard: &crate::lock::RwLockReadGuard>, + buffer_guard: &crate::storage::Storage>, slot: u32, buffer_id: id::Id, offset: u64, @@ -716,9 +720,7 @@ fn set_vertex_buffer( .into()); } - let buffer = buffer_guard - .get_owned(buffer_id) - .map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))?; + let buffer = buffer_guard.get(buffer_id).get()?; state .trackers @@ -845,7 +847,7 @@ fn draw_indexed( fn multi_draw_indirect( state: &mut State, dynamic_offsets: &[u32], - buffer_guard: &crate::lock::RwLockReadGuard>, + buffer_guard: &crate::storage::Storage>, buffer_id: id::Id, offset: u64, indexed: bool, @@ -857,9 +859,7 @@ fn multi_draw_indirect( let pipeline = state.pipeline()?; let used_bind_groups = pipeline.used_bind_groups; - let buffer = buffer_guard - .get_owned(buffer_id) - .map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))?; + let buffer = buffer_guard.get(buffer_id).get()?; state .trackers @@ -873,7 +873,7 @@ fn multi_draw_indirect( .buffer_memory_init_actions .extend(buffer.initialization_status.read().create_action( &buffer, - offset..(offset + mem::size_of::() as u64), + offset..(offset + size_of::() as u64), MemoryInitKind::NeedsInitializedMemory, )); @@ -981,12 +981,17 @@ impl RenderBundle { num_dynamic_offsets, bind_group, } => { - let raw_bg = bind_group.try_raw(snatch_guard)?; + let mut bg = None; + if bind_group.is_some() { + let bind_group = bind_group.as_ref().unwrap(); + let raw_bg = bind_group.try_raw(snatch_guard)?; + bg = Some(raw_bg); + } unsafe { raw.set_bind_group( pipeline_layout.as_ref().unwrap().raw(), *index, - raw_bg, + bg, &offsets[..*num_dynamic_offsets], ) }; @@ -1501,7 +1506,7 @@ impl State { let offsets = &contents.dynamic_offsets; return Some(ArcRenderCommand::SetBindGroup { index: i.try_into().unwrap(), - bind_group: contents.bind_group.clone(), + bind_group: Some(contents.bind_group.clone()), num_dynamic_offsets: offsets.end - offsets.start, }); } @@ -1526,6 +1531,8 @@ pub(super) enum RenderBundleErrorInner { MissingDownlevelFlags(#[from] MissingDownlevelFlags), #[error(transparent)] Bind(#[from] BindError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl From for RenderBundleErrorInner @@ -1577,11 +1584,10 @@ pub mod bundle_ffi { /// /// This function is unsafe as there is no guarantee that the given pointer is /// valid for `offset_length` elements. - #[no_mangle] - pub unsafe extern "C" fn wgpu_render_bundle_set_bind_group( + pub unsafe fn wgpu_render_bundle_set_bind_group( bundle: &mut RenderBundleEncoder, index: u32, - bind_group_id: id::BindGroupId, + bind_group_id: Option, offsets: *const DynamicOffset, offset_length: usize, ) { @@ -1605,8 +1611,7 @@ pub mod bundle_ffi { }); } - #[no_mangle] - pub extern "C" fn wgpu_render_bundle_set_pipeline( + pub fn wgpu_render_bundle_set_pipeline( bundle: &mut RenderBundleEncoder, pipeline_id: id::RenderPipelineId, ) { @@ -1620,8 +1625,7 @@ pub mod bundle_ffi { .push(RenderCommand::SetPipeline(pipeline_id)); } - #[no_mangle] - pub extern "C" fn wgpu_render_bundle_set_vertex_buffer( + pub fn wgpu_render_bundle_set_vertex_buffer( bundle: &mut RenderBundleEncoder, slot: u32, buffer_id: id::BufferId, @@ -1636,8 +1640,7 @@ pub mod bundle_ffi { }); } - #[no_mangle] - pub extern "C" fn wgpu_render_bundle_set_index_buffer( + pub fn wgpu_render_bundle_set_index_buffer( encoder: &mut RenderBundleEncoder, buffer: id::BufferId, index_format: IndexFormat, @@ -1651,8 +1654,7 @@ pub mod bundle_ffi { /// /// This function is unsafe as there is no guarantee that the given pointer is /// valid for `data` elements. - #[no_mangle] - pub unsafe extern "C" fn wgpu_render_bundle_set_push_constants( + pub unsafe fn wgpu_render_bundle_set_push_constants( pass: &mut RenderBundleEncoder, stages: wgt::ShaderStages, offset: u32, @@ -1688,8 +1690,7 @@ pub mod bundle_ffi { }); } - #[no_mangle] - pub extern "C" fn wgpu_render_bundle_draw( + pub fn wgpu_render_bundle_draw( bundle: &mut RenderBundleEncoder, vertex_count: u32, instance_count: u32, @@ -1704,8 +1705,7 @@ pub mod bundle_ffi { }); } - #[no_mangle] - pub extern "C" fn wgpu_render_bundle_draw_indexed( + pub fn wgpu_render_bundle_draw_indexed( bundle: &mut RenderBundleEncoder, index_count: u32, instance_count: u32, @@ -1722,8 +1722,7 @@ pub mod bundle_ffi { }); } - #[no_mangle] - pub extern "C" fn wgpu_render_bundle_draw_indirect( + pub fn wgpu_render_bundle_draw_indirect( bundle: &mut RenderBundleEncoder, buffer_id: id::BufferId, offset: BufferAddress, @@ -1736,8 +1735,7 @@ pub mod bundle_ffi { }); } - #[no_mangle] - pub extern "C" fn wgpu_render_bundle_draw_indexed_indirect( + pub fn wgpu_render_bundle_draw_indexed_indirect( bundle: &mut RenderBundleEncoder, buffer_id: id::BufferId, offset: BufferAddress, @@ -1754,16 +1752,14 @@ pub mod bundle_ffi { /// /// This function is unsafe as there is no guarantee that the given `label` /// is a valid null-terminated string. - #[no_mangle] - pub unsafe extern "C" fn wgpu_render_bundle_push_debug_group( + pub unsafe fn wgpu_render_bundle_push_debug_group( _bundle: &mut RenderBundleEncoder, _label: RawString, ) { //TODO } - #[no_mangle] - pub extern "C" fn wgpu_render_bundle_pop_debug_group(_bundle: &mut RenderBundleEncoder) { + pub fn wgpu_render_bundle_pop_debug_group(_bundle: &mut RenderBundleEncoder) { //TODO } @@ -1771,8 +1767,7 @@ pub mod bundle_ffi { /// /// This function is unsafe as there is no guarantee that the given `label` /// is a valid null-terminated string. - #[no_mangle] - pub unsafe extern "C" fn wgpu_render_bundle_insert_debug_marker( + pub unsafe fn wgpu_render_bundle_insert_debug_marker( _bundle: &mut RenderBundleEncoder, _label: RawString, ) { diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index 944dd40af4..3a12a54117 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -11,8 +11,8 @@ use crate::{ id::{BufferId, CommandEncoderId, TextureId}, init_tracker::{MemoryInitKind, TextureInitRange}, resource::{ - DestroyedResourceError, Labeled, MissingBufferUsageError, ParentDevice, ResourceErrorIdent, - Texture, TextureClearMode, + DestroyedResourceError, InvalidResourceError, Labeled, MissingBufferUsageError, + ParentDevice, ResourceErrorIdent, Texture, TextureClearMode, }, snatch::SnatchGuard, track::{TextureSelector, TextureTrackerSetSingle}, @@ -27,10 +27,6 @@ use wgt::{math::align_to, BufferAddress, BufferUsages, ImageSubresourceRange, Te pub enum ClearError { #[error("To use clear_texture the CLEAR_TEXTURE feature needs to be enabled")] MissingClearTextureFeature, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), - #[error("TextureId {0:?} is invalid")] - InvalidTextureId(TextureId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("{0} can not be cleared")] @@ -75,6 +71,8 @@ whereas subesource range specified start {subresource_base_array_layer} and coun Device(#[from] DeviceError), #[error(transparent)] CommandEncoderError(#[from] CommandEncoderError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl Global { @@ -90,27 +88,18 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ClearBuffer { dst, offset, size }); } - let dst_buffer = hub - .buffers - .get(dst) - .map_err(|_| ClearError::InvalidBufferId(dst))?; + let dst_buffer = hub.buffers.get(dst).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; @@ -163,7 +152,7 @@ impl Global { // actual hal barrier & operation let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard)); - let cmd_buf_raw = cmd_buf_data.encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_buffers(dst_barrier.as_slice()); cmd_buf_raw.clear_buffer(dst_raw, offset..end_offset); @@ -182,17 +171,11 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -206,10 +189,7 @@ impl Global { return Err(ClearError::MissingClearTextureFeature); } - let dst_texture = hub - .textures - .get(dst) - .map_err(|_| ClearError::InvalidTextureId(dst))?; + let dst_texture = hub.textures.get(dst).get()?; dst_texture.same_device_as(cmd_buf.as_ref())?; @@ -249,7 +229,7 @@ impl Global { let device = &cmd_buf.device; device.check_is_valid()?; - let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker()?; + let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker(&cmd_buf.device)?; let snatch_guard = device.snatchable_lock.read(); clear_texture( diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 5f23fb7221..5de03917d2 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -17,8 +17,8 @@ use crate::{ init_tracker::{BufferInitTrackerAction, MemoryInitKind}, pipeline::ComputePipeline, resource::{ - self, Buffer, DestroyedResourceError, Labeled, MissingBufferUsageError, ParentDevice, - Trackable, + self, Buffer, DestroyedResourceError, InvalidResourceError, Labeled, + MissingBufferUsageError, ParentDevice, Trackable, }, snatch::SnatchGuard, track::{ResourceUsageCompatibilityError, Tracker, TrackerIndex, UsageScope}, @@ -29,7 +29,7 @@ use thiserror::Error; use wgt::{BufferAddress, DynamicOffset}; use std::sync::Arc; -use std::{fmt, mem, str}; +use std::{fmt, mem::size_of, str}; use super::{bind::BinderError, memory_init::CommandBufferTextureMemoryActions}; @@ -132,14 +132,8 @@ pub enum ComputePassErrorInner { Encoder(#[from] CommandEncoderError), #[error("Parent encoder is invalid")] InvalidParentEncoder, - #[error("BindGroupId {0:?} is invalid")] - InvalidBindGroupId(id::BindGroupId), #[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")] BindGroupIndexOutOfRange { index: u32, max: u32 }, - #[error("ComputePipelineId {0:?} is invalid")] - InvalidPipelineId(id::ComputePipelineId), - #[error("QuerySet {0:?} is invalid")] - InvalidQuerySet(id::QuerySetId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("Indirect buffer uses bytes {offset}..{end_offset} which overruns indirect buffer of size {buffer_size}")] @@ -148,8 +142,6 @@ pub enum ComputePassErrorInner { end_offset: u64, buffer_size: u64, }, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(id::BufferId), #[error(transparent)] ResourceUsageCompatibility(#[from] ResourceUsageCompatibilityError), #[error(transparent)] @@ -176,6 +168,8 @@ pub enum ComputePassErrorInner { MissingDownlevelFlags(#[from] MissingDownlevelFlags), #[error("The compute pass has already been ended and no further commands can be recorded")] PassEnded, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } /// Error encountered when performing a compute pass. @@ -298,22 +292,21 @@ impl Global { let make_err = |e, arc_desc| (ComputePass::new(None, arc_desc), Some(e)); - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return make_err(CommandEncoderError::Invalid, arc_desc), - }; + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); - match cmd_buf.lock_encoder() { + match cmd_buf + .try_get() + .map_err(|e| e.into()) + .and_then(|mut cmd_buf_data| cmd_buf_data.lock_encoder()) + { Ok(_) => {} Err(e) => return make_err(e, arc_desc), }; arc_desc.timestamp_writes = if let Some(tw) = desc.timestamp_writes { - let Ok(query_set) = hub.query_sets.get(tw.query_set) else { - return make_err( - CommandEncoderError::InvalidTimestampWritesQuerySetId(tw.query_set), - arc_desc, - ); + let query_set = match hub.query_sets.get(tw.query_set).get() { + Ok(query_set) => query_set, + Err(e) => return make_err(e.into(), arc_desc), }; Some(ArcPassTimestampWrites { @@ -328,25 +321,8 @@ impl Global { (ComputePass::new(Some(cmd_buf), arc_desc), None) } - pub fn compute_pass_end(&self, pass: &mut ComputePass) -> Result<(), ComputePassError> { - let scope = PassErrorScope::Pass; - - let cmd_buf = pass - .parent - .as_ref() - .ok_or(ComputePassErrorInner::InvalidParentEncoder) - .map_pass_err(scope)?; - - cmd_buf.unlock_encoder().map_pass_err(scope)?; - - let base = pass - .base - .take() - .ok_or(ComputePassErrorInner::PassEnded) - .map_pass_err(scope)?; - self.compute_pass_end_impl(cmd_buf, base, pass.timestamp_writes.take()) - } - + /// Note that this differs from [`Self::compute_pass_end`], it will + /// create a new pass, replay the commands and end the pass. #[doc(hidden)] #[cfg(any(feature = "serde", feature = "replay"))] pub fn compute_pass_end_with_unresolved_commands( @@ -355,19 +331,16 @@ impl Global { base: BasePass, timestamp_writes: Option<&PassTimestampWrites>, ) -> Result<(), ComputePassError> { - let hub = &self.hub; - let scope = PassErrorScope::Pass; - - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid).map_pass_err(scope), - }; - cmd_buf.check_recording().map_pass_err(scope)?; + let pass_scope = PassErrorScope::Pass; #[cfg(feature = "trace")] { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = self + .hub + .command_buffers + .get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; + if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunComputePass { base: BasePass { @@ -382,50 +355,61 @@ impl Global { } } - let commands = - super::ComputeCommand::resolve_compute_command_ids(&self.hub, &base.commands)?; - - let timestamp_writes = if let Some(tw) = timestamp_writes { - Some(ArcPassTimestampWrites { - query_set: hub - .query_sets - .get(tw.query_set) - .map_err(|_| ComputePassErrorInner::InvalidQuerySet(tw.query_set)) - .map_pass_err(scope)?, - beginning_of_pass_write_index: tw.beginning_of_pass_write_index, - end_of_pass_write_index: tw.end_of_pass_write_index, - }) - } else { - None + let BasePass { + label, + commands, + dynamic_offsets, + string_data, + push_constant_data, + } = base; + + let (mut compute_pass, encoder_error) = self.command_encoder_create_compute_pass( + encoder_id, + &ComputePassDescriptor { + label: label.as_deref().map(std::borrow::Cow::Borrowed), + timestamp_writes, + }, + ); + if let Some(err) = encoder_error { + return Err(ComputePassError { + scope: pass_scope, + inner: err.into(), + }); }; - self.compute_pass_end_impl( - &cmd_buf, - BasePass { - label: base.label, - commands, - dynamic_offsets: base.dynamic_offsets, - string_data: base.string_data, - push_constant_data: base.push_constant_data, - }, - timestamp_writes, - ) + compute_pass.base = Some(BasePass { + label, + commands: super::ComputeCommand::resolve_compute_command_ids(&self.hub, &commands)?, + dynamic_offsets, + string_data, + push_constant_data, + }); + + self.compute_pass_end(&mut compute_pass) } - fn compute_pass_end_impl( - &self, - cmd_buf: &CommandBuffer, - base: BasePass, - mut timestamp_writes: Option, - ) -> Result<(), ComputePassError> { + pub fn compute_pass_end(&self, pass: &mut ComputePass) -> Result<(), ComputePassError> { profiling::scope!("CommandEncoder::run_compute_pass"); let pass_scope = PassErrorScope::Pass; + let cmd_buf = pass + .parent + .as_ref() + .ok_or(ComputePassErrorInner::InvalidParentEncoder) + .map_pass_err(pass_scope)?; + + let base = pass + .base + .take() + .ok_or(ComputePassErrorInner::PassEnded) + .map_pass_err(pass_scope)?; + let device = &cmd_buf.device; device.check_is_valid().map_pass_err(pass_scope)?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; + cmd_buf_data.unlock_encoder().map_pass_err(pass_scope)?; + let cmd_buf_data = &mut *cmd_buf_data; let encoder = &mut cmd_buf_data.encoder; let status = &mut cmd_buf_data.status; @@ -433,10 +417,10 @@ impl Global { // We automatically keep extending command buffers over time, and because // we want to insert a command buffer _before_ what we're about to record, // we need to make sure to close the previous one. - encoder.close().map_pass_err(pass_scope)?; + encoder.close(&cmd_buf.device).map_pass_err(pass_scope)?; // will be reset to true if recording is done without errors *status = CommandEncoderStatus::Error; - let raw_encoder = encoder.open().map_pass_err(pass_scope)?; + let raw_encoder = encoder.open(&cmd_buf.device).map_pass_err(pass_scope)?; let mut state = State { binder: Binder::new(), @@ -467,9 +451,9 @@ impl Global { state.tracker.textures.set_size(indices.textures.size()); let timestamp_writes: Option> = - if let Some(tw) = timestamp_writes.take() { + if let Some(tw) = pass.timestamp_writes.take() { tw.query_set - .same_device_as(cmd_buf) + .same_device_as(cmd_buf.as_ref()) .map_pass_err(pass_scope)?; let query_set = state.tracker.query_sets.insert_single(tw.query_set); @@ -503,7 +487,7 @@ impl Global { }; let hal_desc = hal::ComputePassDescriptor { - label: hal_label(base.label.as_deref(), self.instance.flags), + label: hal_label(base.label.as_deref(), device.instance_flags), timestamp_writes, }; @@ -617,12 +601,12 @@ impl Global { } = state; // Stop the current command buffer. - encoder.close().map_pass_err(pass_scope)?; + encoder.close(&cmd_buf.device).map_pass_err(pass_scope)?; // Create a new command buffer, which we will insert _before_ the body of the compute pass. // // Use that buffer to insert barriers and clear discarded images. - let transit = encoder.open().map_pass_err(pass_scope)?; + let transit = encoder.open(&cmd_buf.device).map_pass_err(pass_scope)?; fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), transit, @@ -637,7 +621,9 @@ impl Global { &snatch_guard, ); // Close the command buffer, and swap it with the previous. - encoder.close_and_swap().map_pass_err(pass_scope)?; + encoder + .close_and_swap(&cmd_buf.device) + .map_pass_err(pass_scope)?; Ok(()) } @@ -649,10 +635,8 @@ fn set_bind_group( dynamic_offsets: &[DynamicOffset], index: u32, num_dynamic_offsets: usize, - bind_group: Arc, + bind_group: Option>, ) -> Result<(), ComputePassErrorInner> { - bind_group.same_device_as(cmd_buf)?; - let max_bind_groups = state.device.limits.max_bind_groups; if index >= max_bind_groups { return Err(ComputePassErrorInner::BindGroupIndexOutOfRange { @@ -668,7 +652,16 @@ fn set_bind_group( ); state.dynamic_offset_count += num_dynamic_offsets; + if bind_group.is_none() { + // TODO: Handle bind_group None. + return Ok(()); + } + + let bind_group = bind_group.unwrap(); let bind_group = state.tracker.bind_groups.insert_single(bind_group); + + bind_group.same_device_as(cmd_buf)?; + bind_group.validate_dynamic_bindings(index, &state.temp_offsets)?; state @@ -700,7 +693,7 @@ fn set_bind_group( state.raw_encoder.set_bind_group( pipeline_layout, index + i as u32, - raw_bg, + Some(raw_bg), &e.dynamic_offsets, ); } @@ -745,7 +738,7 @@ fn set_pipeline( state.raw_encoder.set_bind_group( pipeline.layout.raw(), start_index as u32 + i as u32, - raw_bg, + Some(raw_bg), &e.dynamic_offsets, ); } @@ -854,7 +847,7 @@ fn dispatch_indirect( .merge_single(&buffer, hal::BufferUses::INDIRECT)?; buffer.check_usage(wgt::BufferUsages::INDIRECT)?; - let end_offset = offset + mem::size_of::() as u64; + let end_offset = offset + size_of::() as u64; if end_offset > buffer.size { return Err(ComputePassErrorInner::IndirectBufferOverrun { offset, @@ -952,7 +945,7 @@ impl Global { &self, pass: &mut ComputePass, index: u32, - bind_group_id: id::BindGroupId, + bind_group_id: Option, offsets: &[DynamicOffset], ) -> Result<(), ComputePassError> { let scope = PassErrorScope::SetBindGroup; @@ -973,12 +966,18 @@ impl Global { return Ok(()); } - let hub = &self.hub; - let bind_group = hub - .bind_groups - .get(bind_group_id) - .map_err(|_| ComputePassErrorInner::InvalidBindGroupId(bind_group_id)) - .map_pass_err(scope)?; + let mut bind_group = None; + if bind_group_id.is_some() { + let bind_group_id = bind_group_id.unwrap(); + + let hub = &self.hub; + let bg = hub + .bind_groups + .get(bind_group_id) + .get() + .map_pass_err(scope)?; + bind_group = Some(bg); + } base.commands.push(ArcComputeCommand::SetBindGroup { index, @@ -1008,7 +1007,7 @@ impl Global { let pipeline = hub .compute_pipelines .get(pipeline_id) - .map_err(|_| ComputePassErrorInner::InvalidPipelineId(pipeline_id)) + .get() .map_pass_err(scope)?; base.commands.push(ArcComputeCommand::SetPipeline(pipeline)); @@ -1079,11 +1078,7 @@ impl Global { let scope = PassErrorScope::Dispatch { indirect: true }; let base = pass.base_mut(scope)?; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| ComputePassErrorInner::InvalidBufferId(buffer_id)) - .map_pass_err(scope)?; + let buffer = hub.buffers.get(buffer_id).get().map_pass_err(scope)?; base.commands .push(ArcComputeCommand::DispatchIndirect { buffer, offset }); @@ -1150,11 +1145,7 @@ impl Global { let base = pass.base_mut(scope)?; let hub = &self.hub; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id)) - .map_pass_err(scope)?; + let query_set = hub.query_sets.get(query_set_id).get().map_pass_err(scope)?; base.commands.push(ArcComputeCommand::WriteTimestamp { query_set, @@ -1174,11 +1165,7 @@ impl Global { let base = pass.base_mut(scope)?; let hub = &self.hub; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id)) - .map_pass_err(scope)?; + let query_set = hub.query_sets.get(query_set_id).get().map_pass_err(scope)?; base.commands .push(ArcComputeCommand::BeginPipelineStatisticsQuery { diff --git a/wgpu-core/src/command/compute_command.rs b/wgpu-core/src/command/compute_command.rs index e16487b7ea..67c23d9452 100644 --- a/wgpu-core/src/command/compute_command.rs +++ b/wgpu-core/src/command/compute_command.rs @@ -13,7 +13,7 @@ pub enum ComputeCommand { SetBindGroup { index: u32, num_dynamic_offsets: usize, - bind_group_id: id::BindGroupId, + bind_group_id: Option, }, SetPipeline(id::ComputePipelineId), @@ -74,7 +74,7 @@ impl ComputeCommand { hub: &crate::hub::Hub, commands: &[ComputeCommand], ) -> Result, super::ComputePassError> { - use super::{ComputePassError, ComputePassErrorInner, PassErrorScope}; + use super::{ComputePassError, PassErrorScope}; let buffers_guard = hub.buffers.read(); let bind_group_guard = hub.bind_groups.read(); @@ -89,23 +89,36 @@ impl ComputeCommand { index, num_dynamic_offsets, bind_group_id, - } => ArcComputeCommand::SetBindGroup { - index, - num_dynamic_offsets, - bind_group: bind_group_guard.get_owned(bind_group_id).map_err(|_| { + } => { + if bind_group_id.is_none() { + return Ok(ArcComputeCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group: None, + }); + } + + let bind_group_id = bind_group_id.unwrap(); + let bg = bind_group_guard.get(bind_group_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::SetBindGroup, - inner: ComputePassErrorInner::InvalidBindGroupId(bind_group_id), + inner: e.into(), } - })?, - }, + })?; + ArcComputeCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group: Some(bg), + } + } ComputeCommand::SetPipeline(pipeline_id) => ArcComputeCommand::SetPipeline( pipelines_guard - .get_owned(pipeline_id) - .map_err(|_| ComputePassError { + .get(pipeline_id) + .get() + .map_err(|e| ComputePassError { scope: PassErrorScope::SetPipelineCompute, - inner: ComputePassErrorInner::InvalidPipelineId(pipeline_id), + inner: e.into(), })?, ), @@ -123,10 +136,10 @@ impl ComputeCommand { ComputeCommand::DispatchIndirect { buffer_id, offset } => { ArcComputeCommand::DispatchIndirect { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::Dispatch { indirect: true }, - inner: ComputePassErrorInner::InvalidBufferId(buffer_id), + inner: e.into(), } })?, offset, @@ -147,10 +160,10 @@ impl ComputeCommand { query_set_id, query_index, } => ArcComputeCommand::WriteTimestamp { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::WriteTimestamp, - inner: ComputePassErrorInner::InvalidQuerySet(query_set_id), + inner: e.into(), } })?, query_index, @@ -160,10 +173,10 @@ impl ComputeCommand { query_set_id, query_index, } => ArcComputeCommand::BeginPipelineStatisticsQuery { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::BeginPipelineStatisticsQuery, - inner: ComputePassErrorInner::InvalidQuerySet(query_set_id), + inner: e.into(), } })?, query_index, @@ -185,7 +198,7 @@ pub enum ArcComputeCommand { SetBindGroup { index: u32, num_dynamic_offsets: usize, - bind_group: Arc, + bind_group: Option>, }, SetPipeline(Arc), @@ -215,6 +228,7 @@ pub enum ArcComputeCommand { }, PushDebugGroup { + #[cfg_attr(target_os = "emscripten", allow(dead_code))] color: u32, len: usize, }, @@ -222,6 +236,7 @@ pub enum ArcComputeCommand { PopDebugGroup, InsertDebugMarker { + #[cfg_attr(target_os = "emscripten", allow(dead_code))] color: u32, len: usize, }, diff --git a/wgpu-core/src/command/draw.rs b/wgpu-core/src/command/draw.rs index e8578bba05..dd54b60f26 100644 --- a/wgpu-core/src/command/draw.rs +++ b/wgpu-core/src/command/draw.rs @@ -1,6 +1,5 @@ use crate::{ binding_model::{LateMinBufferBindingSizeMismatch, PushConstantUploadError}, - id, resource::{ DestroyedResourceError, MissingBufferUsageError, MissingTextureUsageError, ResourceErrorIdent, @@ -68,22 +67,12 @@ pub enum DrawError { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum RenderCommandError { - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(id::BufferId), - #[error("BindGroupId {0:?} is invalid")] - InvalidBindGroupId(id::BindGroupId), - #[error("Render bundle {0:?} is invalid")] - InvalidRenderBundle(id::RenderBundleId), #[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")] BindGroupIndexOutOfRange { index: u32, max: u32 }, #[error("Vertex buffer index {index} is greater than the device's requested `max_vertex_buffers` limit {max}")] VertexBufferIndexOutOfRange { index: u32, max: u32 }, #[error("Dynamic buffer offset {0} does not respect device's requested `{1}` limit {2}")] UnalignedBufferOffset(u64, &'static str, u32), - #[error("RenderPipelineId {0:?} is invalid")] - InvalidPipelineId(id::RenderPipelineId), - #[error("QuerySet {0:?} is invalid")] - InvalidQuerySet(id::QuerySetId), #[error("Render pipeline targets are incompatible with render pass")] IncompatiblePipelineTargets(#[from] crate::device::RenderPassCompatibilityError), #[error("{0} writes to depth, while the pass has read-only depth access")] diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 313bf813a1..42c80d3ca5 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -31,7 +31,7 @@ use crate::lock::{rank, Mutex}; use crate::snatch::SnatchGuard; use crate::init_tracker::BufferInitTrackerAction; -use crate::resource::Labeled; +use crate::resource::{InvalidResourceError, Labeled}; use crate::track::{DeviceTracker, Tracker, UsageScope}; use crate::LabelHelpers; use crate::{api_log, global::Global, id, resource_log, Label}; @@ -82,8 +82,8 @@ pub(crate) enum CommandEncoderStatus { /// /// When a `CommandEncoder` is left in this state, we have also /// returned an error result from the function that encountered - /// the problem. Future attempts to use the encoder (that is, - /// calls to [`CommandBuffer::get_encoder`]) will also return + /// the problem. Future attempts to use the encoder (for example, + /// calls to [`CommandBufferMutable::check_recording`]) will also return /// errors. /// /// Calling [`Global::command_encoder_finish`] in this state @@ -172,10 +172,10 @@ impl CommandEncoder { /// [l]: CommandEncoder::list /// [`transition_buffers`]: hal::CommandEncoder::transition_buffers /// [`transition_textures`]: hal::CommandEncoder::transition_textures - fn close_and_swap(&mut self) -> Result<(), DeviceError> { + fn close_and_swap(&mut self, device: &Device) -> Result<(), DeviceError> { if self.is_open { self.is_open = false; - let new = unsafe { self.raw.end_encoding()? }; + let new = unsafe { self.raw.end_encoding() }.map_err(|e| device.handle_hal_error(e))?; self.list.insert(self.list.len() - 1, new); } @@ -192,10 +192,11 @@ impl CommandEncoder { /// On return, the underlying hal encoder is closed. /// /// [l]: CommandEncoder::list - fn close(&mut self) -> Result<(), DeviceError> { + fn close(&mut self, device: &Device) -> Result<(), DeviceError> { if self.is_open { self.is_open = false; - let cmd_buf = unsafe { self.raw.end_encoding()? }; + let cmd_buf = + unsafe { self.raw.end_encoding() }.map_err(|e| device.handle_hal_error(e))?; self.list.push(cmd_buf); } @@ -215,11 +216,15 @@ impl CommandEncoder { /// Begin recording a new command buffer, if we haven't already. /// /// The underlying hal encoder is put in the "recording" state. - pub(crate) fn open(&mut self) -> Result<&mut dyn hal::DynCommandEncoder, DeviceError> { + pub(crate) fn open( + &mut self, + device: &Device, + ) -> Result<&mut dyn hal::DynCommandEncoder, DeviceError> { if !self.is_open { self.is_open = true; let hal_label = self.hal_label.as_deref(); - unsafe { self.raw.begin_encoding(hal_label)? }; + unsafe { self.raw.begin_encoding(hal_label) } + .map_err(|e| device.handle_hal_error(e))?; } Ok(self.raw.as_mut()) @@ -229,9 +234,9 @@ impl CommandEncoder { /// its own label. /// /// The underlying hal encoder is put in the "recording" state. - fn open_pass(&mut self, hal_label: Option<&str>) -> Result<(), DeviceError> { + fn open_pass(&mut self, hal_label: Option<&str>, device: &Device) -> Result<(), DeviceError> { self.is_open = true; - unsafe { self.raw.begin_encoding(hal_label)? }; + unsafe { self.raw.begin_encoding(hal_label) }.map_err(|e| device.handle_hal_error(e))?; Ok(()) } @@ -276,12 +281,113 @@ pub struct CommandBufferMutable { impl CommandBufferMutable { pub(crate) fn open_encoder_and_tracker( &mut self, + device: &Device, ) -> Result<(&mut dyn hal::DynCommandEncoder, &mut Tracker), DeviceError> { - let encoder = self.encoder.open()?; + let encoder = self.encoder.open(device)?; let tracker = &mut self.trackers; Ok((encoder, tracker)) } + + fn lock_encoder_impl(&mut self, lock: bool) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Recording => { + if lock { + self.status = CommandEncoderStatus::Locked; + } + Ok(()) + } + CommandEncoderStatus::Locked => { + // Any operation on a locked encoder is required to put it into the invalid/error state. + // See https://www.w3.org/TR/webgpu/#encoder-state-locked + self.encoder.discard(); + self.status = CommandEncoderStatus::Error; + Err(CommandEncoderError::Locked) + } + CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), + CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), + } + } + + /// Checks that the encoder is in the [`CommandEncoderStatus::Recording`] state. + fn check_recording(&mut self) -> Result<(), CommandEncoderError> { + self.lock_encoder_impl(false) + } + + /// Locks the encoder by putting it in the [`CommandEncoderStatus::Locked`] state. + /// + /// Call [`CommandBufferMutable::unlock_encoder`] to put the [`CommandBuffer`] back into the [`CommandEncoderStatus::Recording`] state. + fn lock_encoder(&mut self) -> Result<(), CommandEncoderError> { + self.lock_encoder_impl(true) + } + + /// Unlocks the [`CommandBuffer`] and puts it back into the [`CommandEncoderStatus::Recording`] state. + /// + /// This function is the counterpart to [`CommandBufferMutable::lock_encoder`]. + /// It is only valid to call this function if the encoder is in the [`CommandEncoderStatus::Locked`] state. + fn unlock_encoder(&mut self) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Recording => Err(CommandEncoderError::Invalid), + CommandEncoderStatus::Locked => { + self.status = CommandEncoderStatus::Recording; + Ok(()) + } + CommandEncoderStatus::Finished => Err(CommandEncoderError::Invalid), + CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), + } + } + + pub fn check_finished(&self) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Finished => Ok(()), + _ => Err(CommandEncoderError::Invalid), + } + } + + pub(crate) fn finish(&mut self, device: &Device) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Recording => { + if let Err(e) = self.encoder.close(device) { + Err(e.into()) + } else { + self.status = CommandEncoderStatus::Finished; + // Note: if we want to stop tracking the swapchain texture view, + // this is the place to do it. + Ok(()) + } + } + CommandEncoderStatus::Locked => { + self.encoder.discard(); + self.status = CommandEncoderStatus::Error; + Err(CommandEncoderError::Locked) + } + CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), + CommandEncoderStatus::Error => { + self.encoder.discard(); + Err(CommandEncoderError::Invalid) + } + } + } + + pub(crate) fn into_baked_commands(self) -> BakedCommands { + BakedCommands { + encoder: self.encoder.raw, + list: self.encoder.list, + trackers: self.trackers, + buffer_memory_init_actions: self.buffer_memory_init_actions, + texture_memory_actions: self.texture_memory_actions, + } + } + + pub(crate) fn destroy(mut self, device: &Device) { + self.encoder.discard(); + unsafe { + self.encoder.raw.reset_all(self.encoder.list); + } + unsafe { + device.raw().destroy_command_encoder(self.encoder.raw); + } + } } /// A buffer of commands to be submitted to the GPU for execution. @@ -313,22 +419,15 @@ pub struct CommandBuffer { /// This `Option` is populated when the command buffer is first created. /// When this is submitted, dropped, or destroyed, its contents are /// extracted into a [`BakedCommands`] by - /// [`CommandBuffer::extract_baked_commands`]. + /// [`CommandBufferMutable::into_baked_commands`]. pub(crate) data: Mutex>, } impl Drop for CommandBuffer { fn drop(&mut self) { resource_log!("Drop {}", self.error_ident()); - if self.data.lock().is_none() { - return; - } - let mut baked = self.extract_baked_commands(); - unsafe { - baked.encoder.reset_all(baked.list); - } - unsafe { - self.device.raw().destroy_command_encoder(baked.encoder); + if let Some(data) = self.data.lock().take() { + data.destroy(&self.device); } } } @@ -368,6 +467,15 @@ impl CommandBuffer { } } + pub(crate) fn new_invalid(device: &Arc, label: &Label) -> Self { + CommandBuffer { + device: device.clone(), + support_clear_texture: device.features.contains(wgt::Features::CLEAR_TEXTURE), + label: label.to_string(), + data: Mutex::new(rank::COMMAND_BUFFER_DATA, None), + } + } + pub(crate) fn insert_barriers_from_tracker( raw: &mut dyn hal::DynCommandEncoder, base: &mut Tracker, @@ -446,80 +554,19 @@ impl CommandBuffer { } impl CommandBuffer { - fn lock_encoder_impl(&self, lock: bool) -> Result<(), CommandEncoderError> { - let mut cmd_buf_data_guard = self.data.lock(); - let cmd_buf_data = cmd_buf_data_guard.as_mut().unwrap(); - match cmd_buf_data.status { - CommandEncoderStatus::Recording => { - if lock { - cmd_buf_data.status = CommandEncoderStatus::Locked; - } - Ok(()) - } - CommandEncoderStatus::Locked => { - // Any operation on a locked encoder is required to put it into the invalid/error state. - // See https://www.w3.org/TR/webgpu/#encoder-state-locked - cmd_buf_data.encoder.discard(); - cmd_buf_data.status = CommandEncoderStatus::Error; - Err(CommandEncoderError::Locked) - } - CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), - CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), - } + pub fn try_get<'a>( + &'a self, + ) -> Result, InvalidResourceError> { + let g = self.data.lock(); + crate::lock::MutexGuard::try_map(g, |data| data.as_mut()) + .map_err(|_| InvalidResourceError(self.error_ident())) } - /// Checks that the encoder is in the [`CommandEncoderStatus::Recording`] state. - fn check_recording(&self) -> Result<(), CommandEncoderError> { - self.lock_encoder_impl(false) - } - - /// Locks the encoder by putting it in the [`CommandEncoderStatus::Locked`] state. - /// - /// Call [`CommandBuffer::unlock_encoder`] to put the [`CommandBuffer`] back into the [`CommandEncoderStatus::Recording`] state. - fn lock_encoder(&self) -> Result<(), CommandEncoderError> { - self.lock_encoder_impl(true) - } - - /// Unlocks the [`CommandBuffer`] and puts it back into the [`CommandEncoderStatus::Recording`] state. - /// - /// This function is the counterpart to [`CommandBuffer::lock_encoder`]. - /// It is only valid to call this function if the encoder is in the [`CommandEncoderStatus::Locked`] state. - fn unlock_encoder(&self) -> Result<(), CommandEncoderError> { - let mut data_lock = self.data.lock(); - let status = &mut data_lock.as_mut().unwrap().status; - match *status { - CommandEncoderStatus::Recording => Err(CommandEncoderError::Invalid), - CommandEncoderStatus::Locked => { - *status = CommandEncoderStatus::Recording; - Ok(()) - } - CommandEncoderStatus::Finished => Err(CommandEncoderError::Invalid), - CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), - } - } - - pub fn is_finished(&self) -> bool { - match self.data.lock().as_ref().unwrap().status { - CommandEncoderStatus::Finished => true, - _ => false, - } - } - - pub(crate) fn extract_baked_commands(&mut self) -> BakedCommands { - let data = self.data.lock().take().unwrap(); - BakedCommands { - encoder: data.encoder.raw, - list: data.encoder.list, - trackers: data.trackers, - buffer_memory_init_actions: data.buffer_memory_init_actions, - texture_memory_actions: data.texture_memory_actions, - } - } - - pub(crate) fn from_arc_into_baked(self: Arc) -> BakedCommands { - let mut command_buffer = Arc::into_inner(self) - .expect("CommandBuffer cannot be destroyed because is still in use"); - command_buffer.extract_baked_commands() + pub fn try_take<'a>(&'a self) -> Result { + self.data + .lock() + .take() + .ok_or_else(|| InvalidResourceError(self.error_ident())) } } @@ -591,18 +638,10 @@ pub enum CommandEncoderError { #[error("Command encoder is locked by a previously created render/compute pass. Before recording any new commands, the pass must be ended.")] Locked, - #[error("QuerySet {0:?} for pass timestamp writes is invalid.")] - InvalidTimestampWritesQuerySetId(id::QuerySetId), - #[error("Attachment TextureViewId {0:?} is invalid")] - InvalidAttachmentId(id::TextureViewId), #[error(transparent)] InvalidColorAttachment(#[from] ColorAttachmentError), - #[error("Resolve attachment TextureViewId {0:?} is invalid")] - InvalidResolveTargetId(id::TextureViewId), - #[error("Depth stencil attachment TextureViewId {0:?} is invalid")] - InvalidDepthStencilAttachmentId(id::TextureViewId), - #[error("Occlusion QuerySetId {0:?} is invalid")] - InvalidOcclusionQuerySetId(id::QuerySetId), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl Global { @@ -615,34 +654,15 @@ impl Global { let hub = &self.hub; - let error = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - match cmd_buf_data.status { - CommandEncoderStatus::Recording => { - if let Err(e) = cmd_buf_data.encoder.close() { - Some(e.into()) - } else { - cmd_buf_data.status = CommandEncoderStatus::Finished; - //Note: if we want to stop tracking the swapchain texture view, - // this is the place to do it. - None - } - } - CommandEncoderStatus::Locked => { - cmd_buf_data.encoder.discard(); - cmd_buf_data.status = CommandEncoderStatus::Error; - Some(CommandEncoderError::Locked) - } - CommandEncoderStatus::Finished => Some(CommandEncoderError::NotRecording), - CommandEncoderStatus::Error => { - cmd_buf_data.encoder.discard(); - Some(CommandEncoderError::Invalid) - } - } - } - Err(_) => Some(CommandEncoderError::Invalid), + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + + let error = match cmd_buf + .try_get() + .map_err(|e| e.into()) + .and_then(|mut cmd_buf_data| cmd_buf_data.finish(&cmd_buf.device)) + { + Ok(_) => None, + Err(e) => Some(e), }; (encoder_id.into_command_buffer_id(), error) @@ -658,23 +678,19 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid), - }; - cmd_buf.check_recording()?; + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::PushDebugGroup(label.to_string())); } - let cmd_buf_raw = cmd_buf_data.encoder.open()?; - if !self - .instance - .flags + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; + if !cmd_buf + .device + .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS) { unsafe { @@ -694,26 +710,21 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::InsertDebugMarker(label.to_string())); } - if !self - .instance - .flags + if !cmd_buf + .device + .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS) { - let cmd_buf_raw = cmd_buf_data.encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.insert_debug_marker(label); } @@ -730,24 +741,19 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::PopDebugGroup); } - let cmd_buf_raw = cmd_buf_data.encoder.open()?; - if !self - .instance - .flags + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; + if !cmd_buf + .device + .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS) { unsafe { @@ -805,7 +811,7 @@ impl Default for StateChange { #[derive(Debug)] struct BindGroupStateChange { - last_states: [StateChange; hal::MAX_BIND_GROUPS], + last_states: [StateChange>; hal::MAX_BIND_GROUPS], } impl BindGroupStateChange { @@ -817,7 +823,7 @@ impl BindGroupStateChange { fn set_and_check_redundant( &mut self, - bind_group_id: id::BindGroupId, + bind_group_id: Option, index: u32, dynamic_offsets: &mut Vec, offsets: &[wgt::DynamicOffset], diff --git a/wgpu-core/src/command/query.rs b/wgpu-core/src/command/query.rs index de5103ac88..d783721fb4 100644 --- a/wgpu-core/src/command/query.rs +++ b/wgpu-core/src/command/query.rs @@ -7,7 +7,8 @@ use crate::{ id, init_tracker::MemoryInitKind, resource::{ - DestroyedResourceError, MissingBufferUsageError, ParentDevice, QuerySet, Trackable, + DestroyedResourceError, InvalidResourceError, MissingBufferUsageError, ParentDevice, + QuerySet, Trackable, }, track::{StatelessTracker, TrackerIndex}, FastHashMap, @@ -100,12 +101,10 @@ pub enum QueryError { Use(#[from] QueryUseError), #[error("Error encountered while trying to resolve a query")] Resolve(#[from] ResolveError), - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(id::BufferId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), - #[error("QuerySetId {0:?} is invalid or destroyed")] - InvalidQuerySetId(id::QuerySetId), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } /// Error encountered while trying to use queries @@ -319,22 +318,16 @@ impl Global { ) -> Result<(), QueryError> { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; cmd_buf .device .require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::WriteTimestamp { @@ -343,20 +336,14 @@ impl Global { }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - - let raw_encoder = encoder.open()?; + let raw_encoder = cmd_buf_data.encoder.open(&cmd_buf.device)?; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| QueryError::InvalidQuerySetId(query_set_id))?; - - let query_set = tracker.query_sets.insert_single(query_set); + let query_set = hub.query_sets.get(query_set_id).get()?; query_set.validate_and_write_timestamp(raw_encoder, query_index, None)?; + cmd_buf_data.trackers.query_sets.insert_single(query_set); + Ok(()) } @@ -371,17 +358,11 @@ impl Global { ) -> Result<(), QueryError> { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -394,32 +375,20 @@ impl Global { }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; - let raw_encoder = encoder.open()?; - if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 { return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment)); } - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| QueryError::InvalidQuerySetId(query_set_id))?; - - let query_set = tracker.query_sets.insert_single(query_set); + let query_set = hub.query_sets.get(query_set_id).get()?; query_set.same_device_as(cmd_buf.as_ref())?; - let dst_buffer = hub - .buffers - .get(destination) - .map_err(|_| QueryError::InvalidBufferId(destination))?; + let dst_buffer = hub.buffers.get(destination).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; - let dst_pending = tracker + let dst_pending = cmd_buf_data + .trackers .buffers .set_single(&dst_buffer, hal::BufferUses::COPY_DST); @@ -465,14 +434,16 @@ impl Global { } // TODO(https://github.com/gfx-rs/wgpu/issues/3993): Need to track initialization state. - buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( - &dst_buffer, - buffer_start_offset..buffer_end_offset, - MemoryInitKind::ImplicitlyInitialized, - )); + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( + &dst_buffer, + buffer_start_offset..buffer_end_offset, + MemoryInitKind::ImplicitlyInitialized, + ), + ); let raw_dst_buffer = dst_buffer.try_raw(&snatch_guard)?; - + let raw_encoder = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { raw_encoder.transition_buffers(dst_barrier.as_slice()); raw_encoder.copy_query_results( @@ -484,6 +455,8 @@ impl Global { ); } + cmd_buf_data.trackers.query_sets.insert_single(query_set); + Ok(()) } } diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 1f11ba0937..b6680333c2 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -4,6 +4,7 @@ use crate::command::{ }; use crate::init_tracker::BufferInitTrackerAction; use crate::pipeline::RenderPipeline; +use crate::resource::InvalidResourceError; use crate::snatch::SnatchGuard; use crate::{ api_log, @@ -45,7 +46,7 @@ use serde::Deserialize; use serde::Serialize; use std::sync::Arc; -use std::{borrow::Cow, fmt, iter, mem, num::NonZeroU32, ops::Range, str}; +use std::{borrow::Cow, fmt, iter, mem::size_of, num::NonZeroU32, ops::Range, str}; use super::render_command::ArcRenderCommand; use super::{ @@ -582,14 +583,6 @@ pub enum RenderPassErrorInner { InvalidParentEncoder, #[error("The format of the depth-stencil attachment ({0:?}) is not a depth-stencil format")] InvalidDepthStencilAttachmentFormat(wgt::TextureFormat), - #[error("Buffer {0:?} is invalid or destroyed")] - InvalidBuffer(id::BufferId), - #[error("Render pipeline {0:?} is invalid")] - InvalidPipeline(id::RenderPipelineId), - #[error("QuerySet {0:?} is invalid")] - InvalidQuerySet(id::QuerySetId), - #[error("Render bundle {0:?} is invalid")] - InvalidRenderBundle(id::RenderBundleId), #[error("The format of the {location} ({format:?}) is not resolvable")] UnsupportedResolveTargetFormat { location: AttachmentErrorLocation, @@ -635,8 +628,6 @@ pub enum RenderPassErrorInner { SurfaceTextureDropped, #[error("Not enough memory left for render pass")] OutOfMemory, - #[error("The bind group at index {0:?} is invalid")] - InvalidBindGroup(u32), #[error("Unable to clear non-present/read-only depth")] InvalidDepthOps, #[error("Unable to clear non-present/read-only stencil")] @@ -705,6 +696,8 @@ pub enum RenderPassErrorInner { DestroyedResource(#[from] DestroyedResourceError), #[error("The compute pass has already been ended and no further commands can be recorded")] PassEnded, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl From for RenderPassErrorInner { @@ -1362,14 +1355,10 @@ impl Global { channel, }) = color_attachment { - let view = texture_views - .get_owned(*view_id) - .map_err(|_| CommandEncoderError::InvalidAttachmentId(*view_id))?; + let view = texture_views.get(*view_id).get()?; let resolve_target = if let Some(resolve_target_id) = resolve_target { - let rt_arc = texture_views.get_owned(*resolve_target_id).map_err(|_| { - CommandEncoderError::InvalidResolveTargetId(*resolve_target_id) - })?; + let rt_arc = texture_views.get(*resolve_target_id).get()?; Some(rt_arc) } else { @@ -1390,13 +1379,7 @@ impl Global { arc_desc.depth_stencil_attachment = if let Some(depth_stencil_attachment) = desc.depth_stencil_attachment { - let view = texture_views - .get_owned(depth_stencil_attachment.view) - .map_err(|_| { - CommandEncoderError::InvalidDepthStencilAttachmentId( - depth_stencil_attachment.view, - ) - })?; + let view = texture_views.get(depth_stencil_attachment.view).get()?; Some(ArcRenderPassDepthStencilAttachment { view, @@ -1408,9 +1391,7 @@ impl Global { }; arc_desc.timestamp_writes = if let Some(tw) = desc.timestamp_writes { - let query_set = query_sets.get_owned(tw.query_set).map_err(|_| { - CommandEncoderError::InvalidTimestampWritesQuerySetId(tw.query_set) - })?; + let query_set = query_sets.get(tw.query_set).get()?; Some(ArcPassTimestampWrites { query_set, @@ -1423,9 +1404,7 @@ impl Global { arc_desc.occlusion_query_set = if let Some(occlusion_query_set) = desc.occlusion_query_set { - let query_set = query_sets.get_owned(occlusion_query_set).map_err(|_| { - CommandEncoderError::InvalidOcclusionQuerySetId(occlusion_query_set) - })?; + let query_set = query_sets.get(occlusion_query_set).get()?; Some(query_set) } else { @@ -1446,12 +1425,13 @@ impl Global { let make_err = |e, arc_desc| (RenderPass::new(None, arc_desc), Some(e)); - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return make_err(CommandEncoderError::Invalid, arc_desc), - }; + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); - match cmd_buf.lock_encoder() { + match cmd_buf + .try_get() + .map_err(|e| e.into()) + .and_then(|mut cmd_buf_data| cmd_buf_data.lock_encoder()) + { Ok(_) => {} Err(e) => return make_err(e, arc_desc), }; @@ -1461,6 +1441,8 @@ impl Global { (RenderPass::new(Some(cmd_buf), arc_desc), err) } + /// Note that this differs from [`Self::render_pass_end`], it will + /// create a new pass, replay the commands and end the pass. #[doc(hidden)] #[cfg(any(feature = "serde", feature = "replay"))] pub fn render_pass_end_with_unresolved_commands( @@ -1476,15 +1458,11 @@ impl Global { #[cfg(feature = "trace")] { - let hub = &self.hub; - - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid).map_pass_err(pass_scope)?, - }; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = self + .hub + .command_buffers + .get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunRenderPass { @@ -1528,29 +1506,26 @@ impl Global { }); }; - let hub = &self.hub; render_pass.base = Some(BasePass { label, - commands: super::RenderCommand::resolve_render_command_ids(hub, &commands)?, + commands: super::RenderCommand::resolve_render_command_ids(&self.hub, &commands)?, dynamic_offsets, string_data, push_constant_data, }); - if let Some(err) = encoder_error { - Err(RenderPassError { - scope: pass_scope, - inner: err.into(), - }) - } else { - self.render_pass_end(&mut render_pass) - } + self.render_pass_end(&mut render_pass) } - #[doc(hidden)] pub fn render_pass_end(&self, pass: &mut RenderPass) -> Result<(), RenderPassError> { let pass_scope = PassErrorScope::Pass; + let cmd_buf = pass + .parent + .as_ref() + .ok_or(RenderPassErrorInner::InvalidParentEncoder) + .map_pass_err(pass_scope)?; + let base = pass .base .take() @@ -1562,20 +1537,16 @@ impl Global { base.label.as_deref().unwrap_or("") ); - let Some(cmd_buf) = pass.parent.as_ref() else { - return Err(RenderPassErrorInner::InvalidParentEncoder).map_pass_err(pass_scope); - }; - cmd_buf.unlock_encoder().map_pass_err(pass_scope)?; - - let hal_label = hal_label(base.label.as_deref(), self.instance.flags); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; + cmd_buf_data.unlock_encoder().map_pass_err(pass_scope)?; + let cmd_buf_data = &mut *cmd_buf_data; let device = &cmd_buf.device; let snatch_guard = &device.snatchable_lock.read(); - let (scope, pending_discard_init_fixups) = { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let hal_label = hal_label(base.label.as_deref(), device.instance_flags); + let (scope, pending_discard_init_fixups) = { device.check_is_valid().map_pass_err(pass_scope)?; let encoder = &mut cmd_buf_data.encoder; @@ -1588,10 +1559,12 @@ impl Global { // We automatically keep extending command buffers over time, and because // we want to insert a command buffer _before_ what we're about to record, // we need to make sure to close the previous one. - encoder.close().map_pass_err(pass_scope)?; + encoder.close(&cmd_buf.device).map_pass_err(pass_scope)?; // We will reset this to `Recording` if we succeed, acts as a fail-safe. *status = CommandEncoderStatus::Error; - encoder.open_pass(hal_label).map_pass_err(pass_scope)?; + encoder + .open_pass(hal_label, &cmd_buf.device) + .map_pass_err(pass_scope)?; let info = RenderPassInfo::start( device, @@ -1894,19 +1867,16 @@ impl Global { .finish(state.raw_encoder, state.snatch_guard) .map_pass_err(pass_scope)?; - encoder.close().map_pass_err(pass_scope)?; + encoder.close(&cmd_buf.device).map_pass_err(pass_scope)?; (trackers, pending_discard_init_fixups) }; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let encoder = &mut cmd_buf_data.encoder; let status = &mut cmd_buf_data.status; let tracker = &mut cmd_buf_data.trackers; { - let transit = encoder.open().map_pass_err(pass_scope)?; + let transit = encoder.open(&cmd_buf.device).map_pass_err(pass_scope)?; fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), @@ -1922,7 +1892,9 @@ impl Global { } *status = CommandEncoderStatus::Recording; - encoder.close_and_swap().map_pass_err(pass_scope)?; + encoder + .close_and_swap(&cmd_buf.device) + .map_pass_err(pass_scope)?; Ok(()) } @@ -1934,12 +1906,16 @@ fn set_bind_group( dynamic_offsets: &[DynamicOffset], index: u32, num_dynamic_offsets: usize, - bind_group: Arc, + bind_group: Option>, ) -> Result<(), RenderPassErrorInner> { - api_log!( - "RenderPass::set_bind_group {index} {}", - bind_group.error_ident() - ); + if bind_group.is_none() { + api_log!("RenderPass::set_bind_group {index} None"); + } else { + api_log!( + "RenderPass::set_bind_group {index} {}", + bind_group.as_ref().unwrap().error_ident() + ); + } let max_bind_groups = state.device.limits.max_bind_groups; if index >= max_bind_groups { @@ -1957,6 +1933,12 @@ fn set_bind_group( ); state.dynamic_offset_count += num_dynamic_offsets; + if bind_group.is_none() { + // TODO: Handle bind_group None. + return Ok(()); + } + + let bind_group = bind_group.unwrap(); let bind_group = state.tracker.bind_groups.insert_single(bind_group); bind_group.same_device_as(cmd_buf.as_ref())?; @@ -1999,7 +1981,7 @@ fn set_bind_group( state.raw_encoder.set_bind_group( pipeline_layout, index + i as u32, - raw_bg, + Some(raw_bg), &e.dynamic_offsets, ); } @@ -2073,7 +2055,7 @@ fn set_pipeline( state.raw_encoder.set_bind_group( pipeline.layout.raw(), start_index as u32 + i as u32, - raw_bg, + Some(raw_bg), &e.dynamic_offsets, ); } @@ -2442,8 +2424,8 @@ fn multi_draw_indirect( state.is_ready(indexed)?; let stride = match indexed { - false => mem::size_of::(), - true => mem::size_of::(), + false => size_of::(), + true => size_of::(), }; if count.is_some() { @@ -2520,8 +2502,8 @@ fn multi_draw_indirect_count( state.is_ready(indexed)?; let stride = match indexed { - false => mem::size_of::(), - true => mem::size_of::(), + false => size_of::(), + true => size_of::(), } as u64; state @@ -2760,11 +2742,7 @@ impl Global { buffer_id: id::Id, ) -> Result, RenderPassError> { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(buffer_id)) - .map_pass_err(scope)?; + let buffer = hub.buffers.get(buffer_id).get().map_pass_err(scope)?; Ok(buffer) } @@ -2775,11 +2753,7 @@ impl Global { query_set_id: id::Id, ) -> Result, RenderPassError> { let hub = &self.hub; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| RenderPassErrorInner::InvalidQuerySet(query_set_id)) - .map_pass_err(scope)?; + let query_set = hub.query_sets.get(query_set_id).get().map_pass_err(scope)?; Ok(query_set) } @@ -2788,7 +2762,7 @@ impl Global { &self, pass: &mut RenderPass, index: u32, - bind_group_id: id::BindGroupId, + bind_group_id: Option, offsets: &[DynamicOffset], ) -> Result<(), RenderPassError> { let scope = PassErrorScope::SetBindGroup; @@ -2808,12 +2782,18 @@ impl Global { return Ok(()); } - let hub = &self.hub; - let bind_group = hub - .bind_groups - .get(bind_group_id) - .map_err(|_| RenderPassErrorInner::InvalidBindGroup(index)) - .map_pass_err(scope)?; + let mut bind_group = None; + if bind_group_id.is_some() { + let bind_group_id = bind_group_id.unwrap(); + + let hub = &self.hub; + let bg = hub + .bind_groups + .get(bind_group_id) + .get() + .map_pass_err(scope)?; + bind_group = Some(bg); + } base.commands.push(ArcRenderCommand::SetBindGroup { index, @@ -2843,7 +2823,7 @@ impl Global { let pipeline = hub .render_pipelines .get(pipeline_id) - .map_err(|_| RenderPassErrorInner::InvalidPipeline(pipeline_id)) + .get() .map_pass_err(scope)?; base.commands.push(ArcRenderCommand::SetPipeline(pipeline)); @@ -3154,23 +3134,11 @@ impl Global { }; let base = pass.base_mut(scope)?; - // Don't use resolve_render_pass_buffer_id here, because we don't want to take the read-lock twice. - let hub = &self.hub; - let buffers = hub.buffers.read(); - let buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(buffer_id)) - .map_pass_err(scope)?; - let count_buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(count_buffer_id)) - .map_pass_err(scope)?; - base.commands .push(ArcRenderCommand::MultiDrawIndirectCount { - buffer, + buffer: self.resolve_render_pass_buffer_id(scope, buffer_id)?, offset, - count_buffer, + count_buffer: self.resolve_render_pass_buffer_id(scope, count_buffer_id)?, count_buffer_offset, max_count, indexed: false, @@ -3194,24 +3162,11 @@ impl Global { }; let base = pass.base_mut(scope)?; - // Don't use resolve_render_pass_buffer_id here, because we don't want to take the read-lock twice. - let hub = &self.hub; - let buffers = hub.buffers.read(); - let buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(buffer_id)) - .map_pass_err(scope)?; - - let count_buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(count_buffer_id)) - .map_pass_err(scope)?; - base.commands .push(ArcRenderCommand::MultiDrawIndirectCount { - buffer, + buffer: self.resolve_render_pass_buffer_id(scope, buffer_id)?, offset, - count_buffer, + count_buffer: self.resolve_render_pass_buffer_id(scope, count_buffer_id)?, count_buffer_offset, max_count, indexed: true, @@ -3355,10 +3310,7 @@ impl Global { let bundles = hub.render_bundles.read(); for &bundle_id in render_bundle_ids { - let bundle = bundles - .get_owned(bundle_id) - .map_err(|_| RenderPassErrorInner::InvalidRenderBundle(bundle_id)) - .map_pass_err(scope)?; + let bundle = bundles.get(bundle_id).get().map_pass_err(scope)?; base.commands.push(ArcRenderCommand::ExecuteBundle(bundle)); } diff --git a/wgpu-core/src/command/render_command.rs b/wgpu-core/src/command/render_command.rs index 891ee3cfbc..d4e2689d27 100644 --- a/wgpu-core/src/command/render_command.rs +++ b/wgpu-core/src/command/render_command.rs @@ -17,7 +17,7 @@ pub enum RenderCommand { SetBindGroup { index: u32, num_dynamic_offsets: usize, - bind_group_id: id::BindGroupId, + bind_group_id: Option, }, SetPipeline(id::RenderPipelineId), SetIndexBuffer { @@ -129,9 +129,7 @@ impl RenderCommand { hub: &crate::hub::Hub, commands: &[RenderCommand], ) -> Result, super::RenderPassError> { - use super::{ - DrawKind, PassErrorScope, RenderCommandError, RenderPassError, RenderPassErrorInner, - }; + use super::{DrawKind, PassErrorScope, RenderPassError}; let buffers_guard = hub.buffers.read(); let bind_group_guard = hub.bind_groups.read(); @@ -139,240 +137,253 @@ impl RenderCommand { let pipelines_guard = hub.render_pipelines.read(); let render_bundles_guard = hub.render_bundles.read(); - let resolved_commands: Vec = commands - .iter() - .map(|c| -> Result { - Ok(match *c { - RenderCommand::SetBindGroup { - index, - num_dynamic_offsets, - bind_group_id, - } => ArcRenderCommand::SetBindGroup { - index, - num_dynamic_offsets, - bind_group: bind_group_guard.get_owned(bind_group_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::SetBindGroup, - inner: RenderPassErrorInner::InvalidBindGroup(index), + let resolved_commands: Vec = + commands + .iter() + .map(|c| -> Result { + Ok(match *c { + RenderCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group_id, + } => { + if bind_group_id.is_none() { + return Ok(ArcRenderCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group: None, + }); } - })?, - }, - - RenderCommand::SetPipeline(pipeline_id) => ArcRenderCommand::SetPipeline( - pipelines_guard - .get_owned(pipeline_id) - .map_err(|_| RenderPassError { - scope: PassErrorScope::SetPipelineRender, - inner: RenderCommandError::InvalidPipelineId(pipeline_id).into(), - })?, - ), - - RenderCommand::SetPushConstant { - offset, - size_bytes, - values_offset, - stages, - } => ArcRenderCommand::SetPushConstant { - offset, - size_bytes, - values_offset, - stages, - }, - - RenderCommand::PushDebugGroup { color, len } => { - ArcRenderCommand::PushDebugGroup { color, len } - } - - RenderCommand::PopDebugGroup => ArcRenderCommand::PopDebugGroup, - - RenderCommand::InsertDebugMarker { color, len } => { - ArcRenderCommand::InsertDebugMarker { color, len } - } - - RenderCommand::WriteTimestamp { - query_set_id, - query_index, - } => ArcRenderCommand::WriteTimestamp { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::WriteTimestamp, - inner: RenderPassErrorInner::InvalidQuerySet(query_set_id), - } - })?, - query_index, - }, - - RenderCommand::BeginPipelineStatisticsQuery { - query_set_id, - query_index, - } => ArcRenderCommand::BeginPipelineStatisticsQuery { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::BeginPipelineStatisticsQuery, - inner: RenderPassErrorInner::InvalidQuerySet(query_set_id), - } - })?, - query_index, - }, - - RenderCommand::EndPipelineStatisticsQuery => { - ArcRenderCommand::EndPipelineStatisticsQuery - } - - RenderCommand::SetIndexBuffer { - buffer_id, - index_format, - offset, - size, - } => ArcRenderCommand::SetIndexBuffer { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::SetIndexBuffer, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), - } - })?, - index_format, - offset, - size, - }, - - RenderCommand::SetVertexBuffer { - slot, - buffer_id, - offset, - size, - } => ArcRenderCommand::SetVertexBuffer { - slot, - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::SetVertexBuffer, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), - } - })?, - offset, - size, - }, - - RenderCommand::SetBlendConstant(color) => { - ArcRenderCommand::SetBlendConstant(color) - } - - RenderCommand::SetStencilReference(reference) => { - ArcRenderCommand::SetStencilReference(reference) - } - - RenderCommand::SetViewport { - rect, - depth_min, - depth_max, - } => ArcRenderCommand::SetViewport { - rect, - depth_min, - depth_max, - }, - - RenderCommand::SetScissor(scissor) => ArcRenderCommand::SetScissor(scissor), - - RenderCommand::Draw { - vertex_count, - instance_count, - first_vertex, - first_instance, - } => ArcRenderCommand::Draw { - vertex_count, - instance_count, - first_vertex, - first_instance, - }, - - RenderCommand::DrawIndexed { - index_count, - instance_count, - first_index, - base_vertex, - first_instance, - } => ArcRenderCommand::DrawIndexed { - index_count, - instance_count, - first_index, - base_vertex, - first_instance, - }, - - RenderCommand::MultiDrawIndirect { - buffer_id, - offset, - count, - indexed, - } => ArcRenderCommand::MultiDrawIndirect { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::Draw { - kind: if count.is_some() { - DrawKind::MultiDrawIndirect - } else { - DrawKind::DrawIndirect - }, - indexed, - }, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), + + let bind_group_id = bind_group_id.unwrap(); + let bg = bind_group_guard.get(bind_group_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetBindGroup, + inner: e.into(), + } + })?; + + ArcRenderCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group: Some(bg), } - })?, - offset, - count, - indexed, - }, - - RenderCommand::MultiDrawIndirectCount { - buffer_id, - offset, - count_buffer_id, - count_buffer_offset, - max_count, - indexed, - } => { - let scope = PassErrorScope::Draw { - kind: DrawKind::MultiDrawIndirectCount, + } + + RenderCommand::SetPipeline(pipeline_id) => ArcRenderCommand::SetPipeline( + pipelines_guard.get(pipeline_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetPipelineRender, + inner: e.into(), + } + })?, + ), + + RenderCommand::SetPushConstant { + offset, + size_bytes, + values_offset, + stages, + } => ArcRenderCommand::SetPushConstant { + offset, + size_bytes, + values_offset, + stages, + }, + + RenderCommand::PushDebugGroup { color, len } => { + ArcRenderCommand::PushDebugGroup { color, len } + } + + RenderCommand::PopDebugGroup => ArcRenderCommand::PopDebugGroup, + + RenderCommand::InsertDebugMarker { color, len } => { + ArcRenderCommand::InsertDebugMarker { color, len } + } + + RenderCommand::WriteTimestamp { + query_set_id, + query_index, + } => ArcRenderCommand::WriteTimestamp { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::WriteTimestamp, + inner: e.into(), + } + })?, + query_index, + }, + + RenderCommand::BeginPipelineStatisticsQuery { + query_set_id, + query_index, + } => ArcRenderCommand::BeginPipelineStatisticsQuery { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::BeginPipelineStatisticsQuery, + inner: e.into(), + } + })?, + query_index, + }, + + RenderCommand::EndPipelineStatisticsQuery => { + ArcRenderCommand::EndPipelineStatisticsQuery + } + + RenderCommand::SetIndexBuffer { + buffer_id, + index_format, + offset, + size, + } => ArcRenderCommand::SetIndexBuffer { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetIndexBuffer, + inner: e.into(), + } + })?, + index_format, + offset, + size, + }, + + RenderCommand::SetVertexBuffer { + slot, + buffer_id, + offset, + size, + } => ArcRenderCommand::SetVertexBuffer { + slot, + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetVertexBuffer, + inner: e.into(), + } + })?, + offset, + size, + }, + + RenderCommand::SetBlendConstant(color) => { + ArcRenderCommand::SetBlendConstant(color) + } + + RenderCommand::SetStencilReference(reference) => { + ArcRenderCommand::SetStencilReference(reference) + } + + RenderCommand::SetViewport { + rect, + depth_min, + depth_max, + } => ArcRenderCommand::SetViewport { + rect, + depth_min, + depth_max, + }, + + RenderCommand::SetScissor(scissor) => ArcRenderCommand::SetScissor(scissor), + + RenderCommand::Draw { + vertex_count, + instance_count, + first_vertex, + first_instance, + } => ArcRenderCommand::Draw { + vertex_count, + instance_count, + first_vertex, + first_instance, + }, + + RenderCommand::DrawIndexed { + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + } => ArcRenderCommand::DrawIndexed { + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + }, + + RenderCommand::MultiDrawIndirect { + buffer_id, + offset, + count, indexed, - }; - ArcRenderCommand::MultiDrawIndirectCount { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { + } => ArcRenderCommand::MultiDrawIndirect { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { RenderPassError { - scope, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), + scope: PassErrorScope::Draw { + kind: if count.is_some() { + DrawKind::MultiDrawIndirect + } else { + DrawKind::DrawIndirect + }, + indexed, + }, + inner: e.into(), } })?, offset, - count_buffer: buffers_guard.get_owned(count_buffer_id).map_err( - |_| RenderPassError { - scope, - inner: RenderCommandError::InvalidBufferId(count_buffer_id) - .into(), - }, - )?, + count, + indexed, + }, + + RenderCommand::MultiDrawIndirectCount { + buffer_id, + offset, + count_buffer_id, count_buffer_offset, max_count, indexed, + } => { + let scope = PassErrorScope::Draw { + kind: DrawKind::MultiDrawIndirectCount, + indexed, + }; + ArcRenderCommand::MultiDrawIndirectCount { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { + RenderPassError { + scope, + inner: e.into(), + } + })?, + offset, + count_buffer: buffers_guard.get(count_buffer_id).get().map_err( + |e| RenderPassError { + scope, + inner: e.into(), + }, + )?, + count_buffer_offset, + max_count, + indexed, + } } - } - RenderCommand::BeginOcclusionQuery { query_index } => { - ArcRenderCommand::BeginOcclusionQuery { query_index } - } + RenderCommand::BeginOcclusionQuery { query_index } => { + ArcRenderCommand::BeginOcclusionQuery { query_index } + } - RenderCommand::EndOcclusionQuery => ArcRenderCommand::EndOcclusionQuery, + RenderCommand::EndOcclusionQuery => ArcRenderCommand::EndOcclusionQuery, - RenderCommand::ExecuteBundle(bundle) => ArcRenderCommand::ExecuteBundle( - render_bundles_guard - .get_owned(bundle) - .map_err(|_| RenderPassError { - scope: PassErrorScope::ExecuteBundle, - inner: RenderCommandError::InvalidRenderBundle(bundle).into(), + RenderCommand::ExecuteBundle(bundle) => ArcRenderCommand::ExecuteBundle( + render_bundles_guard.get(bundle).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::ExecuteBundle, + inner: e.into(), + } })?, - ), + ), + }) }) - }) - .collect::, RenderPassError>>()?; + .collect::, RenderPassError>>()?; Ok(resolved_commands) } } @@ -384,7 +395,7 @@ pub enum ArcRenderCommand { SetBindGroup { index: u32, num_dynamic_offsets: usize, - bind_group: Arc, + bind_group: Option>, }, SetPipeline(Arc), SetIndexBuffer { @@ -464,11 +475,13 @@ pub enum ArcRenderCommand { indexed: bool, }, PushDebugGroup { + #[cfg_attr(target_os = "emscripten", allow(dead_code))] color: u32, len: usize, }, PopDebugGroup, InsertDebugMarker { + #[cfg_attr(target_os = "emscripten", allow(dead_code))] color: u32, len: usize, }, diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index de5ef9ed84..72eae50c25 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -12,11 +12,11 @@ use crate::{ TextureInitTrackerAction, }, resource::{ - DestroyedResourceError, MissingBufferUsageError, MissingTextureUsageError, ParentDevice, - Texture, TextureErrorDimension, + DestroyedResourceError, InvalidResourceError, MissingBufferUsageError, + MissingTextureUsageError, ParentDevice, Texture, TextureErrorDimension, }, snatch::SnatchGuard, - track::{TextureSelector, Tracker}, + track::TextureSelector, }; use arrayvec::ArrayVec; @@ -25,7 +25,7 @@ use wgt::{BufferAddress, BufferUsages, Extent3d, TextureUsages}; use std::sync::Arc; -use super::{memory_init::CommandBufferTextureMemoryActions, ClearError, CommandEncoder}; +use super::{ClearError, CommandBufferMutable}; pub type ImageCopyBuffer = wgt::ImageCopyBuffer; pub type ImageCopyTexture = wgt::ImageCopyTexture; @@ -41,10 +41,6 @@ pub enum CopySide { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum TransferError { - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), - #[error("TextureId {0:?} is invalid")] - InvalidTextureId(TextureId), #[error("Source and destination cannot be the same buffer")] SameSourceDestinationBuffer, #[error(transparent)] @@ -150,6 +146,8 @@ pub enum CopyError { Transfer(#[from] TransferError), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl From for CopyError { @@ -408,9 +406,7 @@ pub(crate) fn validate_texture_copy_range( fn handle_texture_init( init_kind: MemoryInitKind, - encoder: &mut CommandEncoder, - trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + cmd_buf_data: &mut CommandBufferMutable, device: &Device, copy_texture: &ImageCopyTexture, copy_size: &Extent3d, @@ -428,11 +424,13 @@ fn handle_texture_init( }; // Register the init action. - let immediate_inits = texture_memory_actions.register_init_action(&{ init_action }); + let immediate_inits = cmd_buf_data + .texture_memory_actions + .register_init_action(&{ init_action }); // In rare cases we may need to insert an init operation immediately onto the command buffer. if !immediate_inits.is_empty() { - let cmd_buf_raw = encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(device)?; for init in immediate_inits { clear_texture( &init.texture, @@ -441,7 +439,7 @@ fn handle_texture_init( layer_range: init.layer..(init.layer + 1), }, cmd_buf_raw, - &mut trackers.textures, + &mut cmd_buf_data.trackers.textures, &device.alignments, device.zero_buffer.as_ref(), snatch_guard, @@ -457,9 +455,7 @@ fn handle_texture_init( /// Ensure the source texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_src_texture_init( - encoder: &mut CommandEncoder, - trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + cmd_buf_data: &mut CommandBufferMutable, device: &Device, source: &ImageCopyTexture, copy_size: &Extent3d, @@ -468,9 +464,7 @@ fn handle_src_texture_init( ) -> Result<(), TransferError> { handle_texture_init( MemoryInitKind::NeedsInitializedMemory, - encoder, - trackers, - texture_memory_actions, + cmd_buf_data, device, source, copy_size, @@ -485,9 +479,7 @@ fn handle_src_texture_init( /// Ensure the destination texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_dst_texture_init( - encoder: &mut CommandEncoder, - trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + cmd_buf_data: &mut CommandBufferMutable, device: &Device, destination: &ImageCopyTexture, copy_size: &Extent3d, @@ -510,9 +502,7 @@ fn handle_dst_texture_init( handle_texture_init( dst_init_kind, - encoder, - trackers, - texture_memory_actions, + cmd_buf_data, device, destination, copy_size, @@ -542,17 +532,11 @@ impl Global { } let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; @@ -570,10 +554,7 @@ impl Global { let snatch_guard = device.snatchable_lock.read(); - let src_buffer = hub - .buffers - .get(source) - .map_err(|_| TransferError::InvalidBufferId(source))?; + let src_buffer = hub.buffers.get(source).get()?; src_buffer.same_device_as(cmd_buf.as_ref())?; @@ -589,10 +570,7 @@ impl Global { // expecting only a single barrier let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer, &snatch_guard)); - let dst_buffer = hub - .buffers - .get(destination) - .map_err(|_| TransferError::InvalidBufferId(destination))?; + let dst_buffer = hub.buffers.get(destination).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; @@ -684,7 +662,7 @@ impl Global { dst_offset: destination_offset, size: wgt::BufferSize::new(size).unwrap(), }; - let cmd_buf_raw = cmd_buf_data.encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; let barriers = src_barrier .into_iter() .chain(dst_barrier) @@ -712,21 +690,15 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyBufferToTexture { @@ -736,20 +708,12 @@ impl Global { }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; - let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; - if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_buffer_to_texture of size 0"); return Ok(()); } - let dst_texture = hub - .textures - .get(destination.texture) - .map_err(|_| TransferError::InvalidTextureId(destination.texture))?; + let dst_texture = hub.textures.get(destination.texture).get()?; dst_texture.same_device_as(cmd_buf.as_ref())?; @@ -768,9 +732,7 @@ impl Global { // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. handle_dst_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, destination, copy_size, @@ -778,14 +740,12 @@ impl Global { &snatch_guard, )?; - let src_buffer = hub - .buffers - .get(source.buffer) - .map_err(|_| TransferError::InvalidBufferId(source.buffer))?; + let src_buffer = hub.buffers.get(source.buffer).get()?; src_buffer.same_device_as(cmd_buf.as_ref())?; - let src_pending = tracker + let src_pending = cmd_buf_data + .trackers .buffers .set_single(&src_buffer, hal::BufferUses::COPY_SRC); @@ -795,10 +755,11 @@ impl Global { .map_err(TransferError::MissingBufferUsage)?; let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer, &snatch_guard)); - let dst_pending = - tracker - .textures - .set_single(&dst_texture, dst_range, hal::TextureUses::COPY_DST); + let dst_pending = cmd_buf_data.trackers.textures.set_single( + &dst_texture, + dst_range, + hal::TextureUses::COPY_DST, + ); let dst_raw = dst_texture.try_raw(&snatch_guard)?; dst_texture .check_usage(TextureUsages::COPY_DST) @@ -835,11 +796,13 @@ impl Global { .map_err(TransferError::from)?; } - buffer_memory_init_actions.extend(src_buffer.initialization_status.read().create_action( - &src_buffer, - source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::NeedsInitializedMemory, - )); + cmd_buf_data.buffer_memory_init_actions.extend( + src_buffer.initialization_status.read().create_action( + &src_buffer, + source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::NeedsInitializedMemory, + ), + ); let regions = (0..array_layer_count) .map(|rel_array_layer| { @@ -855,7 +818,7 @@ impl Global { }) .collect::>(); - let cmd_buf_raw = encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_textures(&dst_barrier); cmd_buf_raw.transition_buffers(src_barrier.as_slice()); @@ -880,21 +843,15 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToBuffer { @@ -903,20 +860,13 @@ impl Global { size: *copy_size, }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; - let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_buffer of size 0"); return Ok(()); } - let src_texture = hub - .textures - .get(source.texture) - .map_err(|_| TransferError::InvalidTextureId(source.texture))?; + let src_texture = hub.textures.get(source.texture).get()?; src_texture.same_device_as(cmd_buf.as_ref())?; @@ -931,9 +881,7 @@ impl Global { // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. handle_src_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, source, copy_size, @@ -941,10 +889,11 @@ impl Global { &snatch_guard, )?; - let src_pending = - tracker - .textures - .set_single(&src_texture, src_range, hal::TextureUses::COPY_SRC); + let src_pending = cmd_buf_data.trackers.textures.set_single( + &src_texture, + src_range, + hal::TextureUses::COPY_SRC, + ); let src_raw = src_texture.try_raw(&snatch_guard)?; src_texture .check_usage(TextureUsages::COPY_SRC) @@ -966,14 +915,12 @@ impl Global { .map(|pending| pending.into_hal(src_raw)) .collect::>(); - let dst_buffer = hub - .buffers - .get(destination.buffer) - .map_err(|_| TransferError::InvalidBufferId(destination.buffer))?; + let dst_buffer = hub.buffers.get(destination.buffer).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; - let dst_pending = tracker + let dst_pending = cmd_buf_data + .trackers .buffers .set_single(&dst_buffer, hal::BufferUses::COPY_DST); @@ -1011,11 +958,14 @@ impl Global { .map_err(TransferError::from)?; } - buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( - &dst_buffer, - destination.layout.offset..(destination.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::ImplicitlyInitialized, - )); + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( + &dst_buffer, + destination.layout.offset + ..(destination.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::ImplicitlyInitialized, + ), + ); let regions = (0..array_layer_count) .map(|rel_array_layer| { @@ -1030,7 +980,7 @@ impl Global { } }) .collect::>(); - let cmd_buf_raw = encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_buffers(dst_barrier.as_slice()); cmd_buf_raw.transition_textures(&src_barrier); @@ -1060,23 +1010,17 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; let snatch_guard = device.snatchable_lock.read(); - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToTexture { @@ -1085,23 +1029,14 @@ impl Global { size: *copy_size, }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_texture of size 0"); return Ok(()); } - let src_texture = hub - .textures - .get(source.texture) - .map_err(|_| TransferError::InvalidTextureId(source.texture))?; - let dst_texture = hub - .textures - .get(destination.texture) - .map_err(|_| TransferError::InvalidTextureId(source.texture))?; + let src_texture = hub.textures.get(source.texture).get()?; + let dst_texture = hub.textures.get(destination.texture).get()?; src_texture.same_device_as(cmd_buf.as_ref())?; dst_texture.same_device_as(cmd_buf.as_ref())?; @@ -1143,9 +1078,7 @@ impl Global { // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. handle_src_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, source, copy_size, @@ -1153,9 +1086,7 @@ impl Global { &snatch_guard, )?; handle_dst_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, destination, copy_size, @@ -1209,7 +1140,7 @@ impl Global { } }) .collect::>(); - let cmd_buf_raw = cmd_buf_data.encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_textures(&barriers); cmd_buf_raw.copy_texture_to_texture( diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index d9f983d1a8..10b82a73ae 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -6,7 +6,8 @@ use crate::{ self, BindGroupEntry, BindingResource, BufferBinding, ResolvedBindGroupDescriptor, ResolvedBindGroupEntry, ResolvedBindingResource, ResolvedBufferBinding, }, - command, conv, + command::{self, CommandBuffer}, + conv, device::{bgl, life::WaitIdleError, DeviceError, DeviceLostClosure, DeviceLostReason}, global::Global, hal_api::HalApi, @@ -19,14 +20,19 @@ use crate::{ present, resource::{ self, BufferAccessError, BufferAccessResult, BufferMapOperation, CreateBufferError, + Fallible, }, storage::Storage, - Label, + Label, LabelHelpers, }; use wgt::{BufferAddress, TextureFormat}; -use std::{borrow::Cow, ptr::NonNull, sync::atomic::Ordering}; +use std::{ + borrow::Cow, + ptr::NonNull, + sync::{atomic::Ordering, Arc}, +}; use super::{ImplicitPipelineIds, UserClosures}; @@ -35,18 +41,10 @@ impl Global { &self, adapter_id: AdapterId, surface_id: SurfaceId, - ) -> Result { - let hub = &self.hub; - - let surface_guard = self.surfaces.read(); - let adapter_guard = hub.adapters.read(); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?; - Ok(adapter.is_surface_supported(surface)) + ) -> bool { + let surface = self.surfaces.get(surface_id); + let adapter = self.hub.adapters.get(adapter_id); + adapter.is_surface_supported(&surface) } pub fn surface_get_capabilities( @@ -71,63 +69,30 @@ impl Global { }) } - fn fetch_adapter_and_surface< - F: FnOnce(&Adapter, &Surface) -> Result, - B, - >( + fn fetch_adapter_and_surface B, B>( &self, surface_id: SurfaceId, adapter_id: AdapterId, get_supported_callback: F, - ) -> Result { - let hub = &self.hub; - - let surface_guard = self.surfaces.read(); - let adapter_guard = hub.adapters.read(); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidSurface)?; - - get_supported_callback(adapter, surface) + ) -> B { + let surface = self.surfaces.get(surface_id); + let adapter = self.hub.adapters.get(adapter_id); + get_supported_callback(&adapter, &surface) } - pub fn device_features(&self, device_id: DeviceId) -> Result { - let hub = &self.hub; - - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; - - Ok(device.features) + pub fn device_features(&self, device_id: DeviceId) -> wgt::Features { + let device = self.hub.devices.get(device_id); + device.features } - pub fn device_limits(&self, device_id: DeviceId) -> Result { - let hub = &self.hub; - - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; - - Ok(device.limits.clone()) + pub fn device_limits(&self, device_id: DeviceId) -> wgt::Limits { + let device = self.hub.devices.get(device_id); + device.limits.clone() } - pub fn device_downlevel_properties( - &self, - device_id: DeviceId, - ) -> Result { - let hub = &self.hub; - - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; - - Ok(device.downlevel.clone()) + pub fn device_downlevel_properties(&self, device_id: DeviceId) -> wgt::DownlevelCapabilities { + let device = self.hub.devices.get(device_id); + device.downlevel.clone() } pub fn device_create_buffer( @@ -139,15 +104,10 @@ impl Global { profiling::scope!("Device::create_buffer"); let hub = &self.hub; - let fid = hub.buffers.prepare(device_id.backend(), id_in); + let fid = hub.buffers.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => { - break 'error DeviceError::InvalidDeviceId.into(); - } - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -166,7 +126,7 @@ impl Global { } }; - let id = fid.assign(buffer); + let id = fid.assign(Fallible::Valid(buffer)); api_log!( "Device::create_buffer({:?}{}) -> {id:?}", @@ -181,7 +141,7 @@ impl Global { return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -213,32 +173,34 @@ impl Global { /// [`device_create_buffer`]: Global::device_create_buffer /// [`usage`]: https://www.w3.org/TR/webgpu/#dom-gputexturedescriptor-usage /// [`wgpu_types::BufferUsages`]: wgt::BufferUsages - pub fn create_buffer_error(&self, backend: wgt::Backend, id_in: Option) { - let hub = &self.hub; - let fid = hub.buffers.prepare(backend, id_in); - - fid.assign_error(); + pub fn create_buffer_error( + &self, + id_in: Option, + desc: &resource::BufferDescriptor, + ) { + let fid = self.hub.buffers.prepare(id_in); + fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); } pub fn create_render_bundle_error( &self, - backend: wgt::Backend, id_in: Option, + desc: &command::RenderBundleDescriptor, ) { - let hub = &self.hub; - let fid = hub.render_bundles.prepare(backend, id_in); - - fid.assign_error(); + let fid = self.hub.render_bundles.prepare(id_in); + fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); } /// Assign `id_in` an error with the given `label`. /// /// See `create_buffer_error` for more context and explanation. - pub fn create_texture_error(&self, backend: wgt::Backend, id_in: Option) { - let hub = &self.hub; - let fid = hub.textures.prepare(backend, id_in); - - fid.assign_error(); + pub fn create_texture_error( + &self, + id_in: Option, + desc: &resource::TextureDescriptor, + ) { + let fid = self.hub.textures.prepare(id_in); + fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); } #[cfg(feature = "replay")] @@ -250,10 +212,7 @@ impl Global { ) -> BufferAccessResult { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; let device = &buffer.device; @@ -270,21 +229,27 @@ impl Global { let snatch_guard = device.snatchable_lock.read(); let raw_buf = buffer.try_raw(&snatch_guard)?; - unsafe { - let mapping = device + + let mapping = unsafe { + device .raw() .map_buffer(raw_buf, offset..offset + data.len() as u64) - .map_err(DeviceError::from)?; - std::ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()); - if !mapping.is_coherent { - #[allow(clippy::single_range_in_vec_init)] + } + .map_err(|e| device.handle_hal_error(e))?; + + unsafe { std::ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()) }; + + if !mapping.is_coherent { + #[allow(clippy::single_range_in_vec_init)] + unsafe { device .raw() - .flush_mapped_ranges(raw_buf, &[offset..offset + data.len() as u64]); - } - device.raw().unmap_buffer(raw_buf); + .flush_mapped_ranges(raw_buf, &[offset..offset + data.len() as u64]) + }; } + unsafe { device.raw().unmap_buffer(raw_buf) }; + Ok(()) } @@ -294,10 +259,7 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; + let buffer = hub.buffers.get(buffer_id).get()?; #[cfg(feature = "trace")] if let Some(trace) = buffer.device.trace.lock().as_mut() { @@ -318,9 +280,9 @@ impl Global { let hub = &self.hub; - let buffer = match hub.buffers.unregister(buffer_id) { - Some(buffer) => buffer, - None => { + let buffer = match hub.buffers.remove(buffer_id).get() { + Ok(buffer) => buffer, + Err(_) => { return; } }; @@ -346,13 +308,10 @@ impl Global { let hub = &self.hub; - let fid = hub.textures.prepare(device_id.backend(), id_in); + let fid = hub.textures.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -364,7 +323,7 @@ impl Global { Err(error) => break 'error error, }; - let id = fid.assign(texture); + let id = fid.assign(Fallible::Valid(texture)); api_log!("Device::create_texture({desc:?}) -> {id:?}"); return (id, None); @@ -372,7 +331,7 @@ impl Global { log::error!("Device::create_texture error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -392,13 +351,10 @@ impl Global { let hub = &self.hub; - let fid = hub.textures.prepare(device_id.backend(), id_in); + let fid = hub.textures.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); // NB: Any change done through the raw texture handle will not be // recorded in the replay @@ -412,7 +368,7 @@ impl Global { Err(error) => break 'error error, }; - let id = fid.assign(texture); + let id = fid.assign(Fallible::Valid(texture)); api_log!("Device::create_texture({desc:?}) -> {id:?}"); return (id, None); @@ -420,7 +376,7 @@ impl Global { log::error!("Device::create_texture error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -439,33 +395,23 @@ impl Global { profiling::scope!("Device::create_buffer"); let hub = &self.hub; - let fid = hub.buffers.prepare(A::VARIANT, id_in); - - let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; - - // NB: Any change done through the raw buffer handle will not be - // recorded in the replay - #[cfg(feature = "trace")] - if let Some(trace) = device.trace.lock().as_mut() { - trace.add(trace::Action::CreateBuffer(fid.id(), desc.clone())); - } + let fid = hub.buffers.prepare(id_in); - let buffer = device.create_buffer_from_hal(Box::new(hal_buffer), desc); + let device = self.hub.devices.get(device_id); - let id = fid.assign(buffer); - api_log!("Device::create_buffer -> {id:?}"); + // NB: Any change done through the raw buffer handle will not be + // recorded in the replay + #[cfg(feature = "trace")] + if let Some(trace) = device.trace.lock().as_mut() { + trace.add(trace::Action::CreateBuffer(fid.id(), desc.clone())); + } - return (id, None); - }; + let buffer = device.create_buffer_from_hal(Box::new(hal_buffer), desc); - log::error!("Device::create_buffer error: {error}"); + let id = fid.assign(buffer); + api_log!("Device::create_buffer -> {id:?}"); - let id = fid.assign_error(); - (id, Some(error)) + (id, None) } pub fn texture_destroy(&self, texture_id: id::TextureId) -> Result<(), resource::DestroyError> { @@ -474,10 +420,7 @@ impl Global { let hub = &self.hub; - let texture = hub - .textures - .get(texture_id) - .map_err(|_| resource::DestroyError::Invalid)?; + let texture = hub.textures.get(texture_id).get()?; #[cfg(feature = "trace")] if let Some(trace) = texture.device.trace.lock().as_mut() { @@ -493,9 +436,10 @@ impl Global { let hub = &self.hub; - if let Some(_texture) = hub.textures.unregister(texture_id) { - #[cfg(feature = "trace")] - if let Some(t) = _texture.device.trace.lock().as_mut() { + let _texture = hub.textures.remove(texture_id); + #[cfg(feature = "trace")] + if let Ok(texture) = _texture.get() { + if let Some(t) = texture.device.trace.lock().as_mut() { t.add(trace::Action::DestroyTexture(texture_id)); } } @@ -511,14 +455,12 @@ impl Global { let hub = &self.hub; - let fid = hub.texture_views.prepare(texture_id.backend(), id_in); + let fid = hub.texture_views.prepare(id_in); let error = 'error: { - let texture = match hub.textures.get(texture_id) { + let texture = match hub.textures.get(texture_id).get() { Ok(texture) => texture, - Err(_) => { - break 'error resource::CreateTextureViewError::InvalidTextureId(texture_id) - } + Err(e) => break 'error e.into(), }; let device = &texture.device; @@ -536,7 +478,7 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(view); + let id = fid.assign(Fallible::Valid(view)); api_log!("Texture::create_view({texture_id:?}) -> {id:?}"); @@ -544,7 +486,7 @@ impl Global { }; log::error!("Texture::create_view({texture_id:?}) error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -557,9 +499,11 @@ impl Global { let hub = &self.hub; - if let Some(_view) = hub.texture_views.unregister(texture_view_id) { - #[cfg(feature = "trace")] - if let Some(t) = _view.device.trace.lock().as_mut() { + let _view = hub.texture_views.remove(texture_view_id); + + #[cfg(feature = "trace")] + if let Ok(view) = _view.get() { + if let Some(t) = view.device.trace.lock().as_mut() { t.add(trace::Action::DestroyTextureView(texture_view_id)); } } @@ -575,13 +519,10 @@ impl Global { profiling::scope!("Device::create_sampler"); let hub = &self.hub; - let fid = hub.samplers.prepare(device_id.backend(), id_in); + let fid = hub.samplers.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -593,13 +534,13 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(sampler); + let id = fid.assign(Fallible::Valid(sampler)); api_log!("Device::create_sampler -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -609,9 +550,11 @@ impl Global { let hub = &self.hub; - if let Some(_sampler) = hub.samplers.unregister(sampler_id) { - #[cfg(feature = "trace")] - if let Some(t) = _sampler.device.trace.lock().as_mut() { + let _sampler = hub.samplers.remove(sampler_id); + + #[cfg(feature = "trace")] + if let Ok(sampler) = _sampler.get() { + if let Some(t) = sampler.device.trace.lock().as_mut() { t.add(trace::Action::DestroySampler(sampler_id)); } } @@ -629,13 +572,10 @@ impl Global { profiling::scope!("Device::create_bind_group_layout"); let hub = &self.hub; - let fid = hub.bind_group_layouts.prepare(device_id.backend(), id_in); + let fid = hub.bind_group_layouts.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -666,14 +606,13 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(layout.clone()); + let id = fid.assign(Fallible::Valid(layout.clone())); api_log!("Device::create_bind_group_layout -> {id:?}"); return (id, None); }; - let fid = hub.bind_group_layouts.prepare(device_id.backend(), id_in); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -683,9 +622,11 @@ impl Global { let hub = &self.hub; - if let Some(_layout) = hub.bind_group_layouts.unregister(bind_group_layout_id) { - #[cfg(feature = "trace")] - if let Some(t) = _layout.device.trace.lock().as_mut() { + let _layout = hub.bind_group_layouts.remove(bind_group_layout_id); + + #[cfg(feature = "trace")] + if let Ok(layout) = _layout.get() { + if let Some(t) = layout.device.trace.lock().as_mut() { t.add(trace::Action::DestroyBindGroupLayout(bind_group_layout_id)); } } @@ -703,13 +644,10 @@ impl Global { profiling::scope!("Device::create_pipeline_layout"); let hub = &self.hub; - let fid = hub.pipeline_layouts.prepare(device_id.backend(), id_in); + let fid = hub.pipeline_layouts.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -720,19 +658,13 @@ impl Global { let bind_group_layouts_guard = hub.bind_group_layouts.read(); desc.bind_group_layouts .iter() - .map(|bgl_id| { - bind_group_layouts_guard.get_owned(*bgl_id).map_err(|_| { - binding_model::CreatePipelineLayoutError::InvalidBindGroupLayoutId( - *bgl_id, - ) - }) - }) + .map(|bgl_id| bind_group_layouts_guard.get(*bgl_id).get()) .collect::, _>>() }; let bind_group_layouts = match bind_group_layouts { Ok(bind_group_layouts) => bind_group_layouts, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let desc = binding_model::ResolvedPipelineLayoutDescriptor { @@ -746,12 +678,12 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(layout); + let id = fid.assign(Fallible::Valid(layout)); api_log!("Device::create_pipeline_layout -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -760,9 +692,12 @@ impl Global { api_log!("PipelineLayout::drop {pipeline_layout_id:?}"); let hub = &self.hub; - if let Some(_layout) = hub.pipeline_layouts.unregister(pipeline_layout_id) { - #[cfg(feature = "trace")] - if let Some(t) = _layout.device.trace.lock().as_mut() { + + let _layout = hub.pipeline_layouts.remove(pipeline_layout_id); + + #[cfg(feature = "trace")] + if let Ok(layout) = _layout.get() { + if let Some(t) = layout.device.trace.lock().as_mut() { t.add(trace::Action::DestroyPipelineLayout(pipeline_layout_id)); } } @@ -777,79 +712,80 @@ impl Global { profiling::scope!("Device::create_bind_group"); let hub = &self.hub; - let fid = hub.bind_groups.prepare(device_id.backend(), id_in); + let fid = hub.bind_groups.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { trace.add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); } - let layout = match hub.bind_group_layouts.get(desc.layout) { + let layout = match hub.bind_group_layouts.get(desc.layout).get() { Ok(layout) => layout, - Err(..) => break 'error binding_model::CreateBindGroupError::InvalidLayout, + Err(e) => break 'error e.into(), }; - fn map_entry<'a>( + fn resolve_entry<'a>( e: &BindGroupEntry<'a>, - buffer_storage: &Storage, - sampler_storage: &Storage, - texture_view_storage: &Storage, + buffer_storage: &Storage>, + sampler_storage: &Storage>, + texture_view_storage: &Storage>, ) -> Result, binding_model::CreateBindGroupError> { - let map_buffer = |bb: &BufferBinding| { + let resolve_buffer = |bb: &BufferBinding| { buffer_storage - .get_owned(bb.buffer_id) + .get(bb.buffer_id) + .get() .map(|buffer| ResolvedBufferBinding { buffer, offset: bb.offset, size: bb.size, }) - .map_err(|_| { - binding_model::CreateBindGroupError::InvalidBufferId(bb.buffer_id) - }) + .map_err(binding_model::CreateBindGroupError::from) }; - let map_sampler = |id: &id::SamplerId| { + let resolve_sampler = |id: &id::SamplerId| { sampler_storage - .get_owned(*id) - .map_err(|_| binding_model::CreateBindGroupError::InvalidSamplerId(*id)) + .get(*id) + .get() + .map_err(binding_model::CreateBindGroupError::from) }; - let map_view = |id: &id::TextureViewId| { + let resolve_view = |id: &id::TextureViewId| { texture_view_storage - .get_owned(*id) - .map_err(|_| binding_model::CreateBindGroupError::InvalidTextureViewId(*id)) + .get(*id) + .get() + .map_err(binding_model::CreateBindGroupError::from) }; let resource = match e.resource { BindingResource::Buffer(ref buffer) => { - ResolvedBindingResource::Buffer(map_buffer(buffer)?) + ResolvedBindingResource::Buffer(resolve_buffer(buffer)?) } BindingResource::BufferArray(ref buffers) => { let buffers = buffers .iter() - .map(map_buffer) + .map(resolve_buffer) .collect::, _>>()?; ResolvedBindingResource::BufferArray(Cow::Owned(buffers)) } BindingResource::Sampler(ref sampler) => { - ResolvedBindingResource::Sampler(map_sampler(sampler)?) + ResolvedBindingResource::Sampler(resolve_sampler(sampler)?) } BindingResource::SamplerArray(ref samplers) => { let samplers = samplers .iter() - .map(map_sampler) + .map(resolve_sampler) .collect::, _>>()?; ResolvedBindingResource::SamplerArray(Cow::Owned(samplers)) } BindingResource::TextureView(ref view) => { - ResolvedBindingResource::TextureView(map_view(view)?) + ResolvedBindingResource::TextureView(resolve_view(view)?) } BindingResource::TextureViewArray(ref views) => { - let views = views.iter().map(map_view).collect::, _>>()?; + let views = views + .iter() + .map(resolve_view) + .collect::, _>>()?; ResolvedBindingResource::TextureViewArray(Cow::Owned(views)) } }; @@ -865,7 +801,7 @@ impl Global { let sampler_guard = hub.samplers.read(); desc.entries .iter() - .map(|e| map_entry(e, &buffer_guard, &sampler_guard, &texture_view_guard)) + .map(|e| resolve_entry(e, &buffer_guard, &sampler_guard, &texture_view_guard)) .collect::, _>>() }; let entries = match entries { @@ -884,14 +820,14 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(bind_group); + let id = fid.assign(Fallible::Valid(bind_group)); api_log!("Device::create_bind_group -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -901,8 +837,10 @@ impl Global { let hub = &self.hub; - if let Some(_bind_group) = hub.bind_groups.unregister(bind_group_id) { - #[cfg(feature = "trace")] + let _bind_group = hub.bind_groups.remove(bind_group_id); + + #[cfg(feature = "trace")] + if let Ok(_bind_group) = _bind_group.get() { if let Some(t) = _bind_group.device.trace.lock().as_mut() { t.add(trace::Action::DestroyBindGroup(bind_group_id)); } @@ -936,13 +874,10 @@ impl Global { profiling::scope!("Device::create_shader_module"); let hub = &self.hub; - let fid = hub.shader_modules.prepare(device_id.backend(), id_in); + let fid = hub.shader_modules.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -981,14 +916,14 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(shader); + let id = fid.assign(Fallible::Valid(shader)); api_log!("Device::create_shader_module -> {id:?}"); return (id, None); }; log::error!("Device::create_shader_module error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1011,13 +946,10 @@ impl Global { profiling::scope!("Device::create_shader_module"); let hub = &self.hub; - let fid = hub.shader_modules.prepare(device_id.backend(), id_in); + let fid = hub.shader_modules.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1035,14 +967,14 @@ impl Global { Ok(shader) => shader, Err(e) => break 'error e, }; - let id = fid.assign(shader); + let id = fid.assign(Fallible::Valid(shader)); api_log!("Device::create_shader_module_spirv -> {id:?}"); return (id, None); }; log::error!("Device::create_shader_module_spirv error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1052,12 +984,13 @@ impl Global { let hub = &self.hub; - if let Some(shader_module) = hub.shader_modules.unregister(shader_module_id) { - #[cfg(feature = "trace")] + let _shader_module = hub.shader_modules.remove(shader_module_id); + + #[cfg(feature = "trace")] + if let Ok(shader_module) = _shader_module.get() { if let Some(t) = shader_module.device.trace.lock().as_mut() { t.add(trace::Action::DestroyShaderModule(shader_module_id)); } - drop(shader_module) } } @@ -1070,17 +1003,13 @@ impl Global { profiling::scope!("Device::create_command_encoder"); let hub = &self.hub; - let fid = hub.command_buffers.prepare( - device_id.backend(), - id_in.map(|id| id.into_command_buffer_id()), - ); + let fid = hub + .command_buffers + .prepare(id_in.map(|id| id.into_command_buffer_id())); - let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId, - }; + let device = self.hub.devices.get(device_id); + let error = 'error: { let command_buffer = match device.create_command_encoder(&desc.label) { Ok(command_buffer) => command_buffer, Err(e) => break 'error e, @@ -1091,7 +1020,7 @@ impl Global { return (id.into_command_encoder_id(), None); }; - let id = fid.assign_error(); + let id = fid.assign(Arc::new(CommandBuffer::new_invalid(&device, &desc.label))); (id.into_command_encoder_id(), Some(error)) } @@ -1101,12 +1030,9 @@ impl Global { let hub = &self.hub; - if let Some(cmd_buf) = hub + let _cmd_buf = hub .command_buffers - .unregister(command_encoder_id.into_command_buffer_id()) - { - cmd_buf.data.lock().as_mut().unwrap().encoder.discard(); - } + .remove(command_encoder_id.into_command_buffer_id()); } pub fn command_buffer_drop(&self, command_buffer_id: id::CommandBufferId) { @@ -1142,19 +1068,10 @@ impl Global { let hub = &self.hub; - let fid = hub - .render_bundles - .prepare(bundle_encoder.parent().backend(), id_in); + let fid = hub.render_bundles.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(bundle_encoder.parent()) { - Ok(device) => device, - Err(_) => { - break 'error command::RenderBundleError::from_device_error( - DeviceError::InvalidDeviceId, - ); - } - }; + let device = self.hub.devices.get(bundle_encoder.parent()); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1175,13 +1092,13 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(render_bundle); + let id = fid.assign(Fallible::Valid(render_bundle)); api_log!("RenderBundleEncoder::finish -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1191,9 +1108,11 @@ impl Global { let hub = &self.hub; - if let Some(_bundle) = hub.render_bundles.unregister(render_bundle_id) { - #[cfg(feature = "trace")] - if let Some(t) = _bundle.device.trace.lock().as_mut() { + let _bundle = hub.render_bundles.remove(render_bundle_id); + + #[cfg(feature = "trace")] + if let Ok(bundle) = _bundle.get() { + if let Some(t) = bundle.device.trace.lock().as_mut() { t.add(trace::Action::DestroyRenderBundle(render_bundle_id)); } } @@ -1208,13 +1127,10 @@ impl Global { profiling::scope!("Device::create_query_set"); let hub = &self.hub; - let fid = hub.query_sets.prepare(device_id.backend(), id_in); + let fid = hub.query_sets.prepare(id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1229,13 +1145,13 @@ impl Global { Err(err) => break 'error err, }; - let id = fid.assign(query_set); + let id = fid.assign(Fallible::Valid(query_set)); api_log!("Device::create_query_set -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1245,9 +1161,11 @@ impl Global { let hub = &self.hub; - if let Some(_query_set) = hub.query_sets.unregister(query_set_id) { - #[cfg(feature = "trace")] - if let Some(trace) = _query_set.device.trace.lock().as_mut() { + let _query_set = hub.query_sets.remove(query_set_id); + + #[cfg(feature = "trace")] + if let Ok(query_set) = _query_set.get() { + if let Some(trace) = query_set.device.trace.lock().as_mut() { trace.add(trace::Action::DestroyQuerySet(query_set_id)); } } @@ -1270,7 +1188,7 @@ impl Global { let missing_implicit_pipeline_ids = desc.layout.is_none() && id_in.is_some() && implicit_pipeline_ids.is_none(); - let fid = hub.render_pipelines.prepare(device_id.backend(), id_in); + let fid = hub.render_pipelines.prepare(id_in); let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); let error = 'error: { @@ -1279,10 +1197,7 @@ impl Global { break 'error pipeline::ImplicitLayoutError::MissingImplicitPipelineIds.into(); } - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1295,37 +1210,30 @@ impl Global { let layout = desc .layout - .map(|layout| { - hub.pipeline_layouts - .get(layout) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout) - }) + .map(|layout| hub.pipeline_layouts.get(layout).get()) .transpose(); let layout = match layout { Ok(layout) => layout, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let cache = desc .cache - .map(|cache| { - hub.pipeline_caches - .get(cache) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidCache) - }) + .map(|cache| hub.pipeline_caches.get(cache).get()) .transpose(); let cache = match cache { Ok(cache) => cache, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let vertex = { let module = hub .shader_modules .get(desc.vertex.stage.module) - .map_err(|_| pipeline::CreateRenderPipelineError::Stage { + .get() + .map_err(|e| pipeline::CreateRenderPipelineError::Stage { stage: wgt::ShaderStages::VERTEX, - error: crate::validation::StageError::InvalidModule, + error: e.into(), }); let module = match module { Ok(module) => module, @@ -1347,12 +1255,14 @@ impl Global { }; let fragment = if let Some(ref state) = desc.fragment { - let module = hub.shader_modules.get(state.stage.module).map_err(|_| { - pipeline::CreateRenderPipelineError::Stage { + let module = hub + .shader_modules + .get(state.stage.module) + .get() + .map_err(|e| pipeline::CreateRenderPipelineError::Stage { stage: wgt::ShaderStages::FRAGMENT, - error: crate::validation::StageError::InvalidModule, - } - }); + error: e.into(), + }); let module = match module { Ok(module) => module, Err(e) => break 'error e, @@ -1406,7 +1316,7 @@ impl Global { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert(ids.root_id, pipeline.layout.clone()); + pipeline_layout_guard.insert(ids.root_id, Fallible::Valid(pipeline.layout.clone())); let mut group_ids = ids.group_ids.iter(); // NOTE: If the first iterator is longer than the second, the `.zip()` impl will still advance the // the first iterator before realizing that the second iterator has finished. @@ -1418,29 +1328,29 @@ impl Global { .iter() .zip(&mut group_ids) { - bgl_guard.insert(*bgl_id, bgl.clone()); + bgl_guard.insert(*bgl_id, Fallible::Valid(bgl.clone())); } for bgl_id in group_ids { - bgl_guard.insert_error(*bgl_id); + bgl_guard.insert(*bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } - let id = fid.assign(pipeline); + let id = fid.assign(Fallible::Valid(pipeline)); api_log!("Device::create_render_pipeline -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); // We also need to assign errors to the implicit pipeline layout and the // implicit bind group layouts. if let Some(ids) = implicit_context { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert_error(ids.root_id); + pipeline_layout_guard.insert(ids.root_id, Fallible::Invalid(Arc::new(String::new()))); for bgl_id in ids.group_ids { - bgl_guard.insert_error(bgl_id); + bgl_guard.insert(bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } @@ -1462,16 +1372,15 @@ impl Global { ) { let hub = &self.hub; + let fid = hub.bind_group_layouts.prepare(id_in); + let error = 'error: { - let pipeline = match hub.render_pipelines.get(pipeline_id) { + let pipeline = match hub.render_pipelines.get(pipeline_id).get() { Ok(pipeline) => pipeline, - Err(_) => break 'error binding_model::GetBindGroupLayoutError::InvalidPipeline, + Err(e) => break 'error e.into(), }; let id = match pipeline.layout.bind_group_layouts.get(index as usize) { - Some(bg) => hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign(bg.clone()), + Some(bg) => fid.assign(Fallible::Valid(bg.clone())), None => { break 'error binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index) } @@ -1479,10 +1388,7 @@ impl Global { return (id, None); }; - let id = hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(String::new()))); (id, Some(error)) } @@ -1492,9 +1398,11 @@ impl Global { let hub = &self.hub; - if let Some(_pipeline) = hub.render_pipelines.unregister(render_pipeline_id) { - #[cfg(feature = "trace")] - if let Some(t) = _pipeline.device.trace.lock().as_mut() { + let _pipeline = hub.render_pipelines.remove(render_pipeline_id); + + #[cfg(feature = "trace")] + if let Ok(pipeline) = _pipeline.get() { + if let Some(t) = pipeline.device.trace.lock().as_mut() { t.add(trace::Action::DestroyRenderPipeline(render_pipeline_id)); } } @@ -1517,7 +1425,7 @@ impl Global { let missing_implicit_pipeline_ids = desc.layout.is_none() && id_in.is_some() && implicit_pipeline_ids.is_none(); - let fid = hub.compute_pipelines.prepare(device_id.backend(), id_in); + let fid = hub.compute_pipelines.prepare(id_in); let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); let error = 'error: { @@ -1526,10 +1434,7 @@ impl Global { break 'error pipeline::ImplicitLayoutError::MissingImplicitPipelineIds.into(); } - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1542,34 +1447,23 @@ impl Global { let layout = desc .layout - .map(|layout| { - hub.pipeline_layouts - .get(layout) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout) - }) + .map(|layout| hub.pipeline_layouts.get(layout).get()) .transpose(); let layout = match layout { Ok(layout) => layout, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let cache = desc .cache - .map(|cache| { - hub.pipeline_caches - .get(cache) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidCache) - }) + .map(|cache| hub.pipeline_caches.get(cache).get()) .transpose(); let cache = match cache { Ok(cache) => cache, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; - let module = hub - .shader_modules - .get(desc.stage.module) - .map_err(|_| crate::validation::StageError::InvalidModule); + let module = hub.shader_modules.get(desc.stage.module).get(); let module = match module { Ok(module) => module, Err(e) => break 'error e.into(), @@ -1608,7 +1502,7 @@ impl Global { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert(ids.root_id, pipeline.layout.clone()); + pipeline_layout_guard.insert(ids.root_id, Fallible::Valid(pipeline.layout.clone())); let mut group_ids = ids.group_ids.iter(); // NOTE: If the first iterator is longer than the second, the `.zip()` impl will still advance the // the first iterator before realizing that the second iterator has finished. @@ -1620,29 +1514,29 @@ impl Global { .iter() .zip(&mut group_ids) { - bgl_guard.insert(*bgl_id, bgl.clone()); + bgl_guard.insert(*bgl_id, Fallible::Valid(bgl.clone())); } for bgl_id in group_ids { - bgl_guard.insert_error(*bgl_id); + bgl_guard.insert(*bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } - let id = fid.assign(pipeline); + let id = fid.assign(Fallible::Valid(pipeline)); api_log!("Device::create_compute_pipeline -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); // We also need to assign errors to the implicit pipeline layout and the // implicit bind group layouts. if let Some(ids) = implicit_context { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert_error(ids.root_id); + pipeline_layout_guard.insert(ids.root_id, Fallible::Invalid(Arc::new(String::new()))); for bgl_id in ids.group_ids { - bgl_guard.insert_error(bgl_id); + bgl_guard.insert(bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } @@ -1662,17 +1556,16 @@ impl Global { ) { let hub = &self.hub; + let fid = hub.bind_group_layouts.prepare(id_in); + let error = 'error: { - let pipeline = match hub.compute_pipelines.get(pipeline_id) { + let pipeline = match hub.compute_pipelines.get(pipeline_id).get() { Ok(pipeline) => pipeline, - Err(_) => break 'error binding_model::GetBindGroupLayoutError::InvalidPipeline, + Err(e) => break 'error e.into(), }; let id = match pipeline.layout.bind_group_layouts.get(index as usize) { - Some(bg) => hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign(bg.clone()), + Some(bg) => fid.assign(Fallible::Valid(bg.clone())), None => { break 'error binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index) } @@ -1681,10 +1574,7 @@ impl Global { return (id, None); }; - let id = hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(String::new()))); (id, Some(error)) } @@ -1694,9 +1584,11 @@ impl Global { let hub = &self.hub; - if let Some(_pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) { - #[cfg(feature = "trace")] - if let Some(t) = _pipeline.device.trace.lock().as_mut() { + let _pipeline = hub.compute_pipelines.remove(compute_pipeline_id); + + #[cfg(feature = "trace")] + if let Ok(pipeline) = _pipeline.get() { + if let Some(t) = pipeline.device.trace.lock().as_mut() { t.add(trace::Action::DestroyComputePipeline(compute_pipeline_id)); } } @@ -1718,13 +1610,9 @@ impl Global { let hub = &self.hub; - let fid = hub.pipeline_caches.prepare(device_id.backend(), id_in); + let fid = hub.pipeline_caches.prepare(id_in); let error: pipeline::CreatePipelineCacheError = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - // TODO: Handle error properly - Err(crate::storage::InvalidId) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1737,7 +1625,7 @@ impl Global { let cache = unsafe { device.create_pipeline_cache(desc) }; match cache { Ok(cache) => { - let id = fid.assign(cache); + let id = fid.assign(Fallible::Valid(cache)); api_log!("Device::create_pipeline_cache -> {id:?}"); return (id, None); } @@ -1745,7 +1633,7 @@ impl Global { } }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1756,12 +1644,13 @@ impl Global { let hub = &self.hub; - if let Some(cache) = hub.pipeline_caches.unregister(pipeline_cache_id) { - #[cfg(feature = "trace")] + let _cache = hub.pipeline_caches.remove(pipeline_cache_id); + + #[cfg(feature = "trace")] + if let Ok(cache) = _cache.get() { if let Some(t) = cache.device.trace.lock().as_mut() { t.add(trace::Action::DestroyPipelineCache(pipeline_cache_id)); } - drop(cache) } } @@ -1892,13 +1781,7 @@ impl Global { // User callbacks must not be called while we are holding locks. let user_callbacks; { - let hub = &self.hub; - let surface_guard = self.surfaces.read(); - - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1909,10 +1792,7 @@ impl Global { break 'error e.into(); } - let surface = match surface_guard.get(surface_id) { - Ok(surface) => surface, - Err(_) => break 'error E::InvalidSurface, - }; + let surface = self.surfaces.get(surface_id); let caps = match surface.get_capabilities(&device.adapter) { Ok(caps) => caps, @@ -1995,7 +1875,7 @@ impl Global { // // https://github.com/gfx-rs/wgpu/issues/4105 - let surface_raw = surface.raw(device_id.backend()).unwrap(); + let surface_raw = surface.raw(device.backend()).unwrap(); match unsafe { surface_raw.configure(device.raw(), &hal_config) } { Ok(()) => (), Err(error) => { @@ -2003,7 +1883,9 @@ impl Global { hal::SurfaceError::Outdated | hal::SurfaceError::Lost => { E::InvalidSurface } - hal::SurfaceError::Device(error) => E::Device(error.into()), + hal::SurfaceError::Device(error) => { + E::Device(device.handle_hal_error(error)) + } hal::SurfaceError::Other(message) => { log::error!("surface configuration failed: {}", message); E::InvalidSurface @@ -2037,11 +1919,7 @@ impl Global { ) -> Result { api_log!("Device::poll {maintain:?}"); - let hub = &self.hub; - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; + let device = self.hub.devices.get(device_id); let DevicePoll { closures, @@ -2079,7 +1957,6 @@ impl Global { /// submissions still in flight. fn poll_all_devices_of_api( &self, - backend: wgt::Backend, force_wait: bool, closures: &mut UserClosures, ) -> Result { @@ -2090,7 +1967,7 @@ impl Global { { let device_guard = hub.devices.read(); - for (_id, device) in device_guard.iter(backend) { + for (_id, device) in device_guard.iter() { let maintain = if force_wait { wgt::Maintain::Wait } else { @@ -2120,66 +1997,33 @@ impl Global { pub fn poll_all_devices(&self, force_wait: bool) -> Result { api_log!("poll_all_devices"); let mut closures = UserClosures::default(); - let mut all_queue_empty = true; - - #[cfg(vulkan)] - { - all_queue_empty &= - self.poll_all_devices_of_api(wgt::Backend::Vulkan, force_wait, &mut closures)?; - } - #[cfg(metal)] - { - all_queue_empty &= - self.poll_all_devices_of_api(wgt::Backend::Metal, force_wait, &mut closures)?; - } - #[cfg(dx12)] - { - all_queue_empty &= - self.poll_all_devices_of_api(wgt::Backend::Dx12, force_wait, &mut closures)?; - } - #[cfg(gles)] - { - all_queue_empty &= - self.poll_all_devices_of_api(wgt::Backend::Gl, force_wait, &mut closures)?; - } + let all_queue_empty = self.poll_all_devices_of_api(force_wait, &mut closures)?; closures.fire(); Ok(all_queue_empty) } - pub fn device_start_capture(&self, id: DeviceId) { + pub fn device_start_capture(&self, device_id: DeviceId) { api_log!("Device::start_capture"); - let hub = &self.hub; + let device = self.hub.devices.get(device_id); - if let Ok(device) = hub.devices.get(id) { - if !device.is_valid() { - return; - } - unsafe { device.raw().start_capture() }; + if !device.is_valid() { + return; } + unsafe { device.raw().start_capture() }; } - pub fn device_stop_capture(&self, id: DeviceId) { + pub fn device_stop_capture(&self, device_id: DeviceId) { api_log!("Device::stop_capture"); - let hub = &self.hub; + let device = self.hub.devices.get(device_id); - if let Ok(device) = hub.devices.get(id) { - if !device.is_valid() { - return; - } - unsafe { device.raw().stop_capture() }; + if !device.is_valid() { + return; } - } - - // This is a test-only function to force the device into an - // invalid state by inserting an error value in its place in - // the registry. - pub fn device_make_invalid(&self, device_id: DeviceId) { - let hub = &self.hub; - hub.devices.force_replace_with_error(device_id); + unsafe { device.raw().stop_capture() }; } pub fn pipeline_cache_get_data(&self, id: id::PipelineCacheId) -> Option> { @@ -2187,7 +2031,7 @@ impl Global { api_log!("PipelineCache::get_data"); let hub = &self.hub; - if let Ok(cache) = hub.pipeline_caches.get(id) { + if let Ok(cache) = hub.pipeline_caches.get(id).get() { // TODO: Is this check needed? if !cache.device.is_valid() { return None; @@ -2215,22 +2059,20 @@ impl Global { profiling::scope!("Device::drop"); api_log!("Device::drop {device_id:?}"); - let hub = &self.hub; - if let Some(device) = hub.devices.unregister(device_id) { - let device_lost_closure = device.lock_life().device_lost_closure.take(); - if let Some(closure) = device_lost_closure { - closure.call(DeviceLostReason::Dropped, String::from("Device dropped.")); - } + let device = self.hub.devices.remove(device_id); + let device_lost_closure = device.lock_life().device_lost_closure.take(); + if let Some(closure) = device_lost_closure { + closure.call(DeviceLostReason::Dropped, String::from("Device dropped.")); + } - // The things `Device::prepare_to_die` takes care are mostly - // unnecessary here. We know our queue is empty, so we don't - // need to wait for submissions or triage them. We know we were - // just polled, so `life_tracker.free_resources` is empty. - debug_assert!(device.lock_life().queue_empty()); - device.pending_writes.lock().deactivate(); + // The things `Device::prepare_to_die` takes care are mostly + // unnecessary here. We know our queue is empty, so we don't + // need to wait for submissions or triage them. We know we were + // just polled, so `life_tracker.free_resources` is empty. + debug_assert!(device.lock_life().queue_empty()); + device.pending_writes.lock().deactivate(); - drop(device); - } + drop(device); } // This closure will be called exactly once during "lose the device", @@ -2240,71 +2082,47 @@ impl Global { device_id: DeviceId, device_lost_closure: DeviceLostClosure, ) { - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(device_id) { - let mut life_tracker = device.lock_life(); - if let Some(existing_closure) = life_tracker.device_lost_closure.take() { - // It's important to not hold the lock while calling the closure. - drop(life_tracker); - existing_closure.call(DeviceLostReason::ReplacedCallback, "".to_string()); - life_tracker = device.lock_life(); - } - life_tracker.device_lost_closure = Some(device_lost_closure); - } else { - // No device? Okay. Just like we have to call any existing closure - // before we drop it, we need to call this closure before we exit - // this function, because there's no device that is ever going to - // call it. - device_lost_closure.call(DeviceLostReason::DeviceInvalid, "".to_string()); + let device = self.hub.devices.get(device_id); + + let mut life_tracker = device.lock_life(); + if let Some(existing_closure) = life_tracker.device_lost_closure.take() { + // It's important to not hold the lock while calling the closure. + drop(life_tracker); + existing_closure.call(DeviceLostReason::ReplacedCallback, "".to_string()); + life_tracker = device.lock_life(); } + life_tracker.device_lost_closure = Some(device_lost_closure); } pub fn device_destroy(&self, device_id: DeviceId) { api_log!("Device::destroy {device_id:?}"); - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(device_id) { - // Follow the steps at - // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy. - // It's legal to call destroy multiple times, but if the device - // is already invalid, there's nothing more to do. There's also - // no need to return an error. - if !device.is_valid() { - return; - } + let device = self.hub.devices.get(device_id); - // The last part of destroy is to lose the device. The spec says - // delay that until all "currently-enqueued operations on any - // queue on this device are completed." This is accomplished by - // setting valid to false, and then relying upon maintain to - // check for empty queues and a DeviceLostClosure. At that time, - // the DeviceLostClosure will be called with "destroyed" as the - // reason. - device.valid.store(false, Ordering::Relaxed); + // Follow the steps at + // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy. + // It's legal to call destroy multiple times, but if the device + // is already invalid, there's nothing more to do. There's also + // no need to return an error. + if !device.is_valid() { + return; } - } - - pub fn device_mark_lost(&self, device_id: DeviceId, message: &str) { - api_log!("Device::mark_lost {device_id:?}"); - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(device_id) { - device.lose(message); - } + // The last part of destroy is to lose the device. The spec says + // delay that until all "currently-enqueued operations on any + // queue on this device are completed." This is accomplished by + // setting valid to false, and then relying upon maintain to + // check for empty queues and a DeviceLostClosure. At that time, + // the DeviceLostClosure will be called with "destroyed" as the + // reason. + device.valid.store(false, Ordering::Release); } pub fn device_get_internal_counters(&self, device_id: DeviceId) -> wgt::InternalCounters { - let hub = &self.hub; - if let Ok(device) = hub.devices.get(device_id) { - wgt::InternalCounters { - hal: device.get_hal_counters(), - core: wgt::CoreCounters {}, - } - } else { - Default::default() + let device = self.hub.devices.get(device_id); + wgt::InternalCounters { + hal: device.get_hal_counters(), + core: wgt::CoreCounters {}, } } @@ -2312,21 +2130,15 @@ impl Global { &self, device_id: DeviceId, ) -> Option { - let hub = &self.hub; - hub.devices - .get(device_id) - .ok() - .and_then(|device| device.generate_allocator_report()) + let device = self.hub.devices.get(device_id); + device.generate_allocator_report() } pub fn queue_drop(&self, queue_id: QueueId) { profiling::scope!("Queue::drop"); api_log!("Queue::drop {queue_id:?}"); - let hub = &self.hub; - if let Some(queue) = hub.queues.unregister(queue_id) { - drop(queue); - } + self.hub.queues.remove(queue_id); } pub fn buffer_map_async( @@ -2342,9 +2154,9 @@ impl Global { let hub = &self.hub; let op_and_err = 'error: { - let buffer = match hub.buffers.get(buffer_id) { + let buffer = match hub.buffers.get(buffer_id).get() { Ok(buffer) => buffer, - Err(_) => break 'error Some((op, BufferAccessError::InvalidBufferId(buffer_id))), + Err(e) => break 'error Some((op, e.into())), }; buffer.map_async(offset, size, op).err() @@ -2375,10 +2187,7 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; { let snatch_guard = buffer.device.snatchable_lock.read(); @@ -2451,10 +2260,7 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; let snatch_guard = buffer.device.snatchable_lock.read(); buffer.check_destroyed(&snatch_guard)?; diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 777dd262ab..959f3cada7 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -308,7 +308,7 @@ fn map_buffer( let raw_buffer = buffer.try_raw(snatch_guard)?; let mapping = unsafe { raw.map_buffer(raw_buffer, offset..offset + size) - .map_err(DeviceError::from)? + .map_err(|e| buffer.device.handle_hal_error(e))? }; if !mapping.is_coherent && kind == HostMap::Read { @@ -336,19 +336,41 @@ fn map_buffer( let mapped = unsafe { std::slice::from_raw_parts_mut(mapping.ptr.as_ptr(), size as usize) }; - for uninitialized in buffer - .initialization_status - .write() - .drain(offset..(size + offset)) + // We can't call flush_mapped_ranges in this case, so we can't drain the uninitialized ranges either + if !mapping.is_coherent + && kind == HostMap::Read + && !buffer.usage.contains(wgt::BufferUsages::MAP_WRITE) { - // The mapping's pointer is already offset, however we track the - // uninitialized range relative to the buffer's start. - let fill_range = - (uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize; - mapped[fill_range].fill(0); - - if !mapping.is_coherent && kind == HostMap::Read { - unsafe { raw.flush_mapped_ranges(raw_buffer, &[uninitialized]) }; + for uninitialized in buffer + .initialization_status + .write() + .uninitialized(offset..(size + offset)) + { + // The mapping's pointer is already offset, however we track the + // uninitialized range relative to the buffer's start. + let fill_range = + (uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize; + mapped[fill_range].fill(0); + } + } else { + for uninitialized in buffer + .initialization_status + .write() + .drain(offset..(size + offset)) + { + // The mapping's pointer is already offset, however we track the + // uninitialized range relative to the buffer's start. + let fill_range = + (uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize; + mapped[fill_range].fill(0); + + // NOTE: This is only possible when MAPPABLE_PRIMARY_BUFFERS is enabled. + if !mapping.is_coherent + && kind == HostMap::Read + && buffer.usage.contains(wgt::BufferUsages::MAP_WRITE) + { + unsafe { raw.flush_mapped_ranges(raw_buffer, &[uninitialized]) }; + } } } @@ -392,19 +414,20 @@ pub enum DeviceError { OutOfMemory, #[error("Creation of a resource failed for a reason other than running out of memory.")] ResourceCreationFailed, - #[error("DeviceId is invalid")] - InvalidDeviceId, #[error(transparent)] DeviceMismatch(#[from] Box), } -impl From for DeviceError { - fn from(error: hal::DeviceError) -> Self { +impl DeviceError { + /// Only use this function in contexts where there is no `Device`. + /// + /// Use [`Device::handle_hal_error`] otherwise. + pub fn from_hal(error: hal::DeviceError) -> Self { match error { - hal::DeviceError::Lost => DeviceError::Lost, - hal::DeviceError::OutOfMemory => DeviceError::OutOfMemory, - hal::DeviceError::ResourceCreationFailed => DeviceError::ResourceCreationFailed, - hal::DeviceError::Unexpected => DeviceError::Lost, + hal::DeviceError::Lost => Self::Lost, + hal::DeviceError::OutOfMemory => Self::OutOfMemory, + hal::DeviceError::ResourceCreationFailed => Self::ResourceCreationFailed, + hal::DeviceError::Unexpected => Self::Lost, } } } @@ -434,20 +457,12 @@ pub struct ImplicitPipelineIds<'a> { impl ImplicitPipelineIds<'_> { fn prepare(self, hub: &Hub) -> ImplicitPipelineContext { - let backend = self.root_id.backend(); ImplicitPipelineContext { - root_id: hub - .pipeline_layouts - .prepare(backend, Some(self.root_id)) - .into_id(), + root_id: hub.pipeline_layouts.prepare(Some(self.root_id)).id(), group_ids: self .group_ids .iter() - .map(|id_in| { - hub.bind_group_layouts - .prepare(backend, Some(*id_in)) - .into_id() - }) + .map(|id_in| hub.bind_group_layouts.prepare(Some(*id_in)).id()) .collect(), } } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index e516e0dac7..f576b24125 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -4,7 +4,8 @@ use crate::{ api_log, command::{ extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range, - ClearError, CommandAllocator, CommandBuffer, CopySide, ImageCopyTexture, TransferError, + ClearError, CommandAllocator, CommandBuffer, CommandEncoderError, CopySide, + ImageCopyTexture, TransferError, }, conv, device::{DeviceError, WaitIdleError}, @@ -16,8 +17,8 @@ use crate::{ lock::RwLockWriteGuard, resource::{ Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError, - DestroyedTexture, FlushedStagingBuffer, Labeled, ParentDevice, ResourceErrorIdent, - StagingBuffer, Texture, TextureInner, Trackable, + DestroyedTexture, FlushedStagingBuffer, InvalidResourceError, Labeled, ParentDevice, + ResourceErrorIdent, StagingBuffer, Texture, TextureInner, Trackable, }, resource_log, track::{self, Tracker, TrackerIndex}, @@ -269,17 +270,20 @@ impl PendingWrites { fn pre_submit( &mut self, command_allocator: &CommandAllocator, - device: &dyn hal::DynDevice, - queue: &dyn hal::DynQueue, + device: &Device, + queue: &Queue, ) -> Result, DeviceError> { if self.is_recording { let pending_buffers = mem::take(&mut self.dst_buffers); let pending_textures = mem::take(&mut self.dst_textures); - let cmd_buf = unsafe { self.command_encoder.end_encoding()? }; + let cmd_buf = unsafe { self.command_encoder.end_encoding() } + .map_err(|e| device.handle_hal_error(e))?; self.is_recording = false; - let new_encoder = command_allocator.acquire_encoder(device, queue)?; + let new_encoder = command_allocator + .acquire_encoder(device.raw(), queue.raw()) + .map_err(|e| device.handle_hal_error(e))?; let encoder = EncoderInFlight { raw: mem::replace(&mut self.command_encoder, new_encoder), @@ -318,15 +322,9 @@ impl PendingWrites { } } -#[derive(Clone, Debug, Error)] -#[error("Queue is invalid")] -pub struct InvalidQueue; - #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum QueueWriteError { - #[error("QueueId is invalid")] - InvalidQueueId, #[error(transparent)] Queue(#[from] DeviceError), #[error(transparent)] @@ -335,13 +333,13 @@ pub enum QueueWriteError { MemoryInitFailure(#[from] ClearError), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum QueueSubmitError { - #[error("QueueId is invalid")] - InvalidQueueId, #[error(transparent)] Queue(#[from] DeviceError), #[error(transparent)] @@ -356,6 +354,10 @@ pub enum QueueSubmitError { SurfaceUnconfigured, #[error("GPU got stuck :(")] StuckGpu, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), + #[error(transparent)] + CommandEncoder(#[from] CommandEncoderError), } //TODO: move out common parts of write_xxx. @@ -373,15 +375,9 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| TransferError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -441,18 +437,15 @@ impl Global { profiling::scope!("Queue::create_staging_buffer"); let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; let staging_buffer = StagingBuffer::new(device, buffer_size)?; let ptr = unsafe { staging_buffer.ptr() }; - let fid = hub.staging_buffers.prepare(queue_id.backend(), id_in); - let id = fid.assign(Arc::new(staging_buffer)); + let fid = hub.staging_buffers.prepare(id_in); + let id = fid.assign(staging_buffer); resource_log!("Queue::create_staging_buffer {id:?}"); Ok((id, ptr)) @@ -468,18 +461,11 @@ impl Global { profiling::scope!("Queue::write_staging_buffer"); let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; - let staging_buffer = hub - .staging_buffers - .unregister(staging_buffer_id) - .and_then(Arc::into_inner) - .ok_or_else(|| QueueWriteError::Transfer(TransferError::InvalidBufferId(buffer_id)))?; + let staging_buffer = hub.staging_buffers.remove(staging_buffer_id); let mut pending_writes = device.pending_writes.lock(); @@ -512,10 +498,7 @@ impl Global { profiling::scope!("Queue::validate_write_buffer"); let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| TransferError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; self.queue_validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?; @@ -558,10 +541,7 @@ impl Global { ) -> Result<(), QueueWriteError> { let hub = &self.hub; - let dst = hub - .buffers - .get(buffer_id) - .map_err(|_| TransferError::InvalidBufferId(buffer_id))?; + let dst = hub.buffers.get(buffer_id).get()?; let transition = { let mut trackers = device.trackers.lock(); @@ -618,10 +598,7 @@ impl Global { let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -641,10 +618,7 @@ impl Global { return Ok(()); } - let dst = hub - .textures - .get(destination.texture) - .map_err(|_| TransferError::InvalidTextureId(destination.texture))?; + let dst = hub.textures.get(destination.texture).get()?; dst.same_device_as(queue.as_ref())?; @@ -735,12 +709,6 @@ impl Global { let snatch_guard = device.snatchable_lock.read(); - // Re-get `dst` immutably here, so that the mutable borrow of the - // `texture_guard.get` above ends in time for the `clear_texture` - // call above. Since we've held `texture_guard` the whole time, we know - // the texture hasn't gone away in the mean time, so we can unwrap. - let dst = hub.textures.get(destination.texture).unwrap(); - let dst_raw = dst.try_raw(&snatch_guard)?; let (block_width, block_height) = dst.desc.format.block_dimensions(); @@ -859,10 +827,7 @@ impl Global { let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -890,7 +855,7 @@ impl Global { let src_width = source.source.width(); let src_height = source.source.height(); - let dst = hub.textures.get(destination.texture).unwrap(); + let dst = hub.textures.get(destination.texture).get()?; if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) { return Err( @@ -1069,10 +1034,7 @@ impl Global { let (submit_index, callbacks) = { let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueSubmitError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -1093,115 +1055,77 @@ impl Global { let mut submit_surface_textures_owned = FastHashMap::default(); { - let mut command_buffer_guard = hub.command_buffers.write(); + let command_buffer_guard = hub.command_buffers.read(); if !command_buffer_ids.is_empty() { profiling::scope!("prepare"); + let mut first_error = None; + //TODO: if multiple command buffers are submitted, we can re-use the last // native command buffer of the previous chain instead of always creating // a temporary one, since the chains are not finished. // finish all the command buffers first - for &cmb_id in command_buffer_ids { + for command_buffer_id in command_buffer_ids { profiling::scope!("process command buffer"); // we reset the used surface textures every time we use // it, so make sure to set_size on it. used_surface_textures.set_size(device.tracker_indices.textures.size()); + let command_buffer = command_buffer_guard.get(*command_buffer_id); + + // Note that we are required to invalidate all command buffers in both the success and failure paths. + // This is why we `continue` and don't early return via `?`. #[allow(unused_mut)] - let mut cmdbuf = match command_buffer_guard.replace_with_error(cmb_id) { - Ok(cmdbuf) => cmdbuf, - Err(_) => continue, - }; + let mut cmd_buf_data = command_buffer.try_take(); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { - trace.add(Action::Submit( - submit_index, - cmdbuf - .data - .lock() - .as_mut() - .unwrap() - .commands - .take() - .unwrap(), - )); - } - - cmdbuf.same_device_as(queue.as_ref())?; - - if !cmdbuf.is_finished() { - let cmdbuf = Arc::into_inner(cmdbuf).expect( - "Command buffer cannot be destroyed because is still in use", - ); - device.destroy_command_buffer(cmdbuf); - continue; + if let Ok(ref mut cmd_buf_data) = cmd_buf_data { + trace.add(Action::Submit( + submit_index, + cmd_buf_data.commands.take().unwrap(), + )); + } } - { - profiling::scope!("check resource state"); - - let cmd_buf_data = cmdbuf.data.lock(); - let cmd_buf_trackers = &cmd_buf_data.as_ref().unwrap().trackers; - - // update submission IDs - { - profiling::scope!("buffers"); - for buffer in cmd_buf_trackers.buffers.used_resources() { - buffer.check_destroyed(&snatch_guard)?; - - match *buffer.map_state.lock() { - BufferMapState::Idle => (), - _ => { - return Err(QueueSubmitError::BufferStillMapped( - buffer.error_ident(), - )) - } - } + let mut baked = match cmd_buf_data { + Ok(cmd_buf_data) => { + let res = validate_command_buffer( + &command_buffer, + &queue, + &cmd_buf_data, + &snatch_guard, + &mut submit_surface_textures_owned, + &mut used_surface_textures, + ); + if let Err(err) = res { + first_error.get_or_insert(err); + cmd_buf_data.destroy(&command_buffer.device); + continue; } + cmd_buf_data.into_baked_commands() } - { - profiling::scope!("textures"); - for texture in cmd_buf_trackers.textures.used_resources() { - let should_extend = match texture.try_inner(&snatch_guard)? { - TextureInner::Native { .. } => false, - TextureInner::Surface { .. } => { - // Compare the Arcs by pointer as Textures don't implement Eq. - submit_surface_textures_owned - .insert(Arc::as_ptr(&texture), texture.clone()); - - true - } - }; - if should_extend { - unsafe { - used_surface_textures - .merge_single( - &texture, - None, - hal::TextureUses::PRESENT, - ) - .unwrap(); - }; - } - } + Err(err) => { + first_error.get_or_insert(err.into()); + continue; } + }; + + if first_error.is_some() { + continue; } - let mut baked = cmdbuf.from_arc_into_baked(); // execute resource transitions unsafe { - baked - .encoder - .begin_encoding(hal_label( - Some("(wgpu internal) Transit"), - device.instance_flags, - )) - .map_err(DeviceError::from)? - }; + baked.encoder.begin_encoding(hal_label( + Some("(wgpu internal) Transit"), + device.instance_flags, + )) + } + .map_err(|e| device.handle_hal_error(e))?; //Note: locking the trackers has to be done after the storages let mut trackers = device.trackers.lock(); @@ -1224,14 +1148,12 @@ impl Global { // but here we have a command encoder by hand, so it's easier to use it. if !used_surface_textures.is_empty() { unsafe { - baked - .encoder - .begin_encoding(hal_label( - Some("(wgpu internal) Present"), - device.instance_flags, - )) - .map_err(DeviceError::from)? - }; + baked.encoder.begin_encoding(hal_label( + Some("(wgpu internal) Present"), + device.instance_flags, + )) + } + .map_err(|e| device.handle_hal_error(e))?; let texture_barriers = trackers .textures .set_from_usage_scope_and_drain_transitions( @@ -1256,6 +1178,10 @@ impl Global { pending_textures: FastHashMap::default(), }); } + + if let Some(first_error) = first_error { + return Err(first_error); + } } } @@ -1299,7 +1225,7 @@ impl Global { } if let Some(pending_execution) = - pending_writes.pre_submit(&device.command_allocator, device.raw(), queue.raw())? + pending_writes.pre_submit(&device.command_allocator, device, &queue)? { active_executions.insert(0, pending_execution); } @@ -1324,15 +1250,13 @@ impl Global { } unsafe { - queue - .raw() - .submit( - &hal_command_buffers, - &submit_surface_textures, - (fence.as_mut(), submit_index), - ) - .map_err(DeviceError::from)?; + queue.raw().submit( + &hal_command_buffers, + &submit_surface_textures, + (fence.as_mut(), submit_index), + ) } + .map_err(|e| device.handle_hal_error(e))?; // Advance the successful submission index. device @@ -1372,27 +1296,71 @@ impl Global { Ok(submit_index) } - pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> Result { - let hub = &self.hub; - match hub.queues.get(queue_id) { - Ok(queue) => Ok(unsafe { queue.raw().get_timestamp_period() }), - Err(_) => Err(InvalidQueue), - } + pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 { + let queue = self.hub.queues.get(queue_id); + unsafe { queue.raw().get_timestamp_period() } } pub fn queue_on_submitted_work_done( &self, queue_id: QueueId, closure: SubmittedWorkDoneClosure, - ) -> Result<(), InvalidQueue> { + ) { api_log!("Queue::on_submitted_work_done {queue_id:?}"); //TODO: flush pending writes - let hub = &self.hub; - match hub.queues.get(queue_id) { - Ok(queue) => queue.device.lock_life().add_work_done_closure(closure), - Err(_) => return Err(InvalidQueue), + let queue = self.hub.queues.get(queue_id); + queue.device.lock_life().add_work_done_closure(closure); + } +} + +fn validate_command_buffer( + command_buffer: &CommandBuffer, + queue: &Queue, + cmd_buf_data: &crate::command::CommandBufferMutable, + snatch_guard: &crate::snatch::SnatchGuard<'_>, + submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc>, + used_surface_textures: &mut track::TextureUsageScope, +) -> Result<(), QueueSubmitError> { + command_buffer.same_device_as(queue)?; + cmd_buf_data.check_finished()?; + + { + profiling::scope!("check resource state"); + + { + profiling::scope!("buffers"); + for buffer in cmd_buf_data.trackers.buffers.used_resources() { + buffer.check_destroyed(snatch_guard)?; + + match *buffer.map_state.lock() { + BufferMapState::Idle => (), + _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())), + } + } + } + { + profiling::scope!("textures"); + for texture in cmd_buf_data.trackers.textures.used_resources() { + let should_extend = match texture.try_inner(snatch_guard)? { + TextureInner::Native { .. } => false, + TextureInner::Surface { .. } => { + // Compare the Arcs by pointer as Textures don't implement Eq. + submit_surface_textures_owned + .insert(Arc::as_ptr(&texture), texture.clone()); + + true + } + }; + if should_extend { + unsafe { + used_surface_textures + .merge_single(&texture, None, hal::TextureUses::PRESENT) + .unwrap(); + }; + } + } } - Ok(()) } + Ok(()) } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 5f50d38c8b..03b183e085 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -21,7 +21,7 @@ use crate::{ pipeline, pool::ResourcePool, resource::{ - self, Buffer, Labeled, ParentDevice, QuerySet, Sampler, StagingBuffer, Texture, + self, Buffer, Fallible, Labeled, ParentDevice, QuerySet, Sampler, StagingBuffer, Texture, TextureView, TextureViewNotRenderableReason, TrackingData, }, resource_log, @@ -39,7 +39,9 @@ use once_cell::sync::OnceCell; use smallvec::SmallVec; use thiserror::Error; -use wgt::{DeviceLostReason, TextureFormat, TextureSampleType, TextureViewDimension}; +use wgt::{ + math::align_to, DeviceLostReason, TextureFormat, TextureSampleType, TextureViewDimension, +}; use std::{ borrow::Cow, @@ -225,31 +227,29 @@ impl Device { desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, instance_flags: wgt::InstanceFlags, - ) -> Result { + ) -> Result { #[cfg(not(feature = "trace"))] if let Some(_) = trace_path { log::error!("Feature 'trace' is not enabled"); } - let fence = - unsafe { raw_device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; + let fence = unsafe { raw_device.create_fence() }.map_err(DeviceError::from_hal)?; let command_allocator = command::CommandAllocator::new(); let pending_encoder = command_allocator .acquire_encoder(raw_device.as_ref(), raw_queue) - .map_err(|_| CreateDeviceError::OutOfMemory)?; + .map_err(DeviceError::from_hal)?; let mut pending_writes = PendingWrites::new(pending_encoder); // Create zeroed buffer used for texture clears. let zero_buffer = unsafe { - raw_device - .create_buffer(&hal::BufferDescriptor { - label: hal_label(Some("(wgpu internal) zero init buffer"), instance_flags), - size: ZERO_BUFFER_SIZE, - usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST, - memory_flags: hal::MemoryFlags::empty(), - }) - .map_err(DeviceError::from)? - }; + raw_device.create_buffer(&hal::BufferDescriptor { + label: hal_label(Some("(wgpu internal) zero init buffer"), instance_flags), + size: ZERO_BUFFER_SIZE, + usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST, + memory_flags: hal::MemoryFlags::empty(), + }) + } + .map_err(DeviceError::from_hal)?; pending_writes.activate(); unsafe { pending_writes @@ -337,10 +337,23 @@ impl Device { } } + pub fn handle_hal_error(&self, error: hal::DeviceError) -> DeviceError { + match error { + hal::DeviceError::OutOfMemory => {} + hal::DeviceError::Lost + | hal::DeviceError::ResourceCreationFailed + | hal::DeviceError::Unexpected => { + self.lose(&error.to_string()); + } + } + DeviceError::from_hal(error) + } + pub(crate) fn release_queue(&self, queue: Box) { assert!(self.queue_to_drop.set(queue).is_ok()); } + #[track_caller] pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> { self.life_tracker.lock() } @@ -438,22 +451,19 @@ impl Device { wgt::Maintain::Wait => self .last_successful_submission_index .load(Ordering::Acquire), - wgt::Maintain::Poll => unsafe { - self.raw() - .get_fence_value(fence.as_ref()) - .map_err(DeviceError::from)? - }, + wgt::Maintain::Poll => unsafe { self.raw().get_fence_value(fence.as_ref()) } + .map_err(|e| self.handle_hal_error(e))?, }; // If necessary, wait for that submission to complete. if maintain.is_wait() { + log::trace!("Device::maintain: waiting for submission index {submission_index}"); unsafe { self.raw() .wait(fence.as_ref(), submission_index, CLEANUP_WAIT_MS) - .map_err(DeviceError::from)? - }; + } + .map_err(|e| self.handle_hal_error(e))?; } - log::trace!("Device::maintain: waiting for submission index {submission_index}"); let mut life_tracker = self.lock_life(); let submission_closures = @@ -585,7 +595,8 @@ impl Device { usage, memory_flags: hal::MemoryFlags::empty(), }; - let buffer = unsafe { self.raw().create_buffer(&hal_desc) }.map_err(DeviceError::from)?; + let buffer = + unsafe { self.raw().create_buffer(&hal_desc) }.map_err(|e| self.handle_hal_error(e))?; let buffer = Buffer { raw: Snatchable::new(buffer), @@ -661,6 +672,8 @@ impl Device { .describe_format_features(desc.format) .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error))?; + unsafe { self.raw().add_raw_texture(&*hal_texture) }; + let texture = Texture::new( self, resource::TextureInner::Native { raw: hal_texture }, @@ -681,11 +694,13 @@ impl Device { Ok(texture) } - pub fn create_buffer_from_hal( + pub(crate) fn create_buffer_from_hal( self: &Arc, hal_buffer: Box, desc: &resource::BufferDescriptor, - ) -> Arc { + ) -> Fallible { + unsafe { self.raw().add_raw_buffer(&*hal_buffer) }; + let buffer = Buffer { raw: Snatchable::new(hal_buffer), device: self.clone(), @@ -708,7 +723,7 @@ impl Device { .buffers .insert_single(&buffer, hal::BufferUses::empty()); - buffer + Fallible::Valid(buffer) } pub(crate) fn create_texture( @@ -928,11 +943,8 @@ impl Device { view_formats: hal_view_formats, }; - let raw_texture = unsafe { - self.raw() - .create_texture(&hal_desc) - .map_err(DeviceError::from)? - }; + let raw_texture = unsafe { self.raw().create_texture(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; let clear_mode = if hal_usage .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) @@ -975,7 +987,7 @@ impl Device { unsafe { self.raw().create_texture_view(raw_texture.as_ref(), &desc) } - .map_err(DeviceError::from)?, + .map_err(|e| self.handle_hal_error(e))?, )); }; } @@ -1281,11 +1293,8 @@ impl Device { range: resolved_range, }; - let raw = unsafe { - self.raw() - .create_texture_view(texture_raw, &hal_desc) - .map_err(|_| resource::CreateTextureViewError::OutOfMemory)? - }; + let raw = unsafe { self.raw().create_texture_view(texture_raw, &hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; let selector = TextureSelector { mips: desc.range.base_mip_level..mip_level_end, @@ -1416,11 +1425,8 @@ impl Device { border_color: desc.border_color, }; - let raw = unsafe { - self.raw() - .create_sampler(&hal_desc) - .map_err(DeviceError::from)? - }; + let raw = unsafe { self.raw().create_sampler(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; let sampler = Sampler { raw: ManuallyDrop::new(raw), @@ -1544,7 +1550,7 @@ impl Device { Err(error) => { return Err(match error { hal::ShaderError::Device(error) => { - pipeline::CreateShaderModuleError::Device(error.into()) + pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error)) } hal::ShaderError::Compilation(ref msg) => { log::error!("Shader error: {}", msg); @@ -1585,7 +1591,7 @@ impl Device { Err(error) => { return Err(match error { hal::ShaderError::Device(error) => { - pipeline::CreateShaderModuleError::Device(error.into()) + pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error)) } hal::ShaderError::Compilation(ref msg) => { log::error!("Shader error: {}", msg); @@ -1617,7 +1623,8 @@ impl Device { let encoder = self .command_allocator - .acquire_encoder(self.raw(), queue.raw())?; + .acquire_encoder(self.raw(), queue.raw()) + .map_err(|e| self.handle_hal_error(e))?; let command_buffer = command::CommandBuffer::new(encoder, self, label); @@ -1628,7 +1635,7 @@ impl Device { /// Generate information about late-validated buffer bindings for pipelines. //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way? - pub(crate) fn make_late_sized_buffer_groups( + fn make_late_sized_buffer_groups( shader_binding_sizes: &FastHashMap, layout: &binding_model::PipelineLayout, ) -> ArrayVec { @@ -1849,11 +1856,9 @@ impl Device { flags: bgl_flags, entries: &hal_bindings, }; - let raw = unsafe { - self.raw() - .create_bind_group_layout(&hal_desc) - .map_err(DeviceError::from)? - }; + + let raw = unsafe { self.raw().create_bind_group_layout(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); for entry in entry_map.values() { @@ -1880,8 +1885,8 @@ impl Device { Ok(bgl) } - pub(crate) fn create_buffer_binding<'a>( - self: &Arc, + fn create_buffer_binding<'a>( + &self, bb: &'a binding_model::ResolvedBufferBinding, binding: u32, decl: &wgt::BindGroupLayoutEntry, @@ -1889,7 +1894,6 @@ impl Device { dynamic_binding_info: &mut Vec, late_buffer_binding_sizes: &mut FastHashMap, used: &mut BindGroupStates, - limits: &wgt::Limits, snatch_guard: &'a SnatchGuard<'a>, ) -> Result, binding_model::CreateBindGroupError> { @@ -1914,7 +1918,7 @@ impl Device { wgt::BufferBindingType::Uniform => ( wgt::BufferUsages::UNIFORM, hal::BufferUses::UNIFORM, - limits.max_uniform_buffer_binding_size, + self.limits.max_uniform_buffer_binding_size, ), wgt::BufferBindingType::Storage { read_only } => ( wgt::BufferUsages::STORAGE, @@ -1923,12 +1927,12 @@ impl Device { } else { hal::BufferUses::STORAGE_READ_WRITE }, - limits.max_storage_buffer_binding_size, + self.limits.max_storage_buffer_binding_size, ), }; let (align, align_limit_name) = - binding_model::buffer_binding_type_alignment(limits, binding_ty); + binding_model::buffer_binding_type_alignment(&self.limits, binding_ty); if bb.offset % align as u64 != 0 { return Err(Error::UnalignedBufferOffset( bb.offset, @@ -2004,10 +2008,21 @@ impl Device { late_buffer_binding_sizes.insert(binding, late_size); } + // This was checked against the device's alignment requirements above, + // which should always be a multiple of `COPY_BUFFER_ALIGNMENT`. assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0); + + // `wgpu_hal` only restricts shader access to bound buffer regions with + // a certain resolution. For the sake of lazy initialization, round up + // the size of the bound range to reflect how much of the buffer is + // actually going to be visible to the shader. + let bounds_check_alignment = + binding_model::buffer_binding_type_bounds_check_alignment(&self.alignments, binding_ty); + let visible_size = align_to(bind_size, bounds_check_alignment); + used_buffer_ranges.extend(buffer.initialization_status.read().create_action( buffer, - bb.offset..bb.offset + bind_size, + bb.offset..bb.offset + visible_size, MemoryInitKind::NeedsInitializedMemory, )); @@ -2019,7 +2034,7 @@ impl Device { } fn create_sampler_binding<'a>( - self: &Arc, + &self, used: &mut BindGroupStates, binding: u32, decl: &wgt::BindGroupLayoutEntry, @@ -2067,8 +2082,8 @@ impl Device { Ok(sampler.raw()) } - pub(crate) fn create_texture_binding<'a>( - self: &Arc, + fn create_texture_binding<'a>( + &self, binding: u32, decl: &wgt::BindGroupLayoutEntry, view: &'a Arc, @@ -2166,7 +2181,6 @@ impl Device { &mut dynamic_binding_info, &mut late_buffer_binding_sizes, &mut used, - &self.limits, &snatch_guard, )?; @@ -2188,7 +2202,6 @@ impl Device { &mut dynamic_binding_info, &mut late_buffer_binding_sizes, &mut used, - &self.limits, &snatch_guard, )?; hal_buffers.push(bb); @@ -2275,11 +2288,8 @@ impl Device { textures: &hal_textures, acceleration_structures: &[], }; - let raw = unsafe { - self.raw() - .create_bind_group(&hal_desc) - .map_err(DeviceError::from)? - }; + let raw = unsafe { self.raw().create_bind_group(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; // collect in the order of BGL iteration let late_buffer_binding_sizes = layout @@ -2324,7 +2334,7 @@ impl Device { Ok(bind_group) } - pub(crate) fn check_array_binding( + fn check_array_binding( features: wgt::Features, count: Option, num_bindings: usize, @@ -2357,8 +2367,8 @@ impl Device { Ok(()) } - pub(crate) fn texture_use_parameters( - self: &Arc, + fn texture_use_parameters( + &self, binding: u32, decl: &wgt::BindGroupLayoutEntry, view: &TextureView, @@ -2573,11 +2583,8 @@ impl Device { push_constant_ranges: desc.push_constant_ranges.as_ref(), }; - let raw = unsafe { - self.raw() - .create_pipeline_layout(&hal_desc) - .map_err(DeviceError::from)? - }; + let raw = unsafe { self.raw().create_pipeline_layout(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; drop(raw_bind_group_layouts); @@ -2731,7 +2738,7 @@ impl Device { unsafe { self.raw().create_compute_pipeline(&pipeline_desc) }.map_err( |err| match err { hal::PipelineError::Device(error) => { - pipeline::CreateComputePipelineError::Device(error.into()) + pipeline::CreateComputePipelineError::Device(self.handle_hal_error(error)) } hal::PipelineError::Linkage(_stages, msg) => { pipeline::CreateComputePipelineError::Internal(msg) @@ -3311,7 +3318,7 @@ impl Device { unsafe { self.raw().create_render_pipeline(&pipeline_desc) }.map_err( |err| match err { hal::PipelineError::Device(error) => { - pipeline::CreateRenderPipelineError::Device(error.into()) + pipeline::CreateRenderPipelineError::Device(self.handle_hal_error(error)) } hal::PipelineError::Linkage(stage, msg) => { pipeline::CreateRenderPipelineError::Internal { stage, error: msg } @@ -3434,7 +3441,9 @@ impl Device { }; let raw = match unsafe { self.raw().create_pipeline_cache(&cache_desc) } { Ok(raw) => raw, - Err(e) => return Err(e.into()), + Err(e) => match e { + hal::PipelineCacheError::Device(e) => return Err(self.handle_hal_error(e).into()), + }, }; let cache = pipeline::PipelineCache { device: self.clone(), @@ -3448,10 +3457,7 @@ impl Device { Ok(cache) } - pub(crate) fn get_texture_format_features( - &self, - format: TextureFormat, - ) -> wgt::TextureFormatFeatures { + fn get_texture_format_features(&self, format: TextureFormat) -> wgt::TextureFormatFeatures { // Variant of adapter.get_texture_format_features that takes device features into account use wgt::TextureFormatFeatureFlags as tfsc; let mut format_features = self.adapter.get_texture_format_features(format); @@ -3465,7 +3471,7 @@ impl Device { format_features } - pub(crate) fn describe_format_features( + fn describe_format_features( &self, format: TextureFormat, ) -> Result { @@ -3494,9 +3500,11 @@ impl Device { submission_index: crate::SubmissionIndex, ) -> Result<(), DeviceError> { let fence = self.fence.read(); - let last_done_index = unsafe { self.raw().get_fence_value(fence.as_ref())? }; + let last_done_index = unsafe { self.raw().get_fence_value(fence.as_ref()) } + .map_err(|e| self.handle_hal_error(e))?; if last_done_index < submission_index { - unsafe { self.raw().wait(fence.as_ref(), submission_index, !0)? }; + unsafe { self.raw().wait(fence.as_ref(), submission_index, !0) } + .map_err(|e| self.handle_hal_error(e))?; drop(fence); let closures = self .lock_life() @@ -3555,7 +3563,7 @@ impl Device { Ok(query_set) } - pub(crate) fn lose(&self, message: &str) { + fn lose(&self, message: &str) { // Follow the steps at https://gpuweb.github.io/gpuweb/#lose-the-device. // Mark the device explicitly as invalid. This is checked in various @@ -3622,16 +3630,6 @@ impl Device { } impl Device { - pub(crate) fn destroy_command_buffer(&self, mut cmd_buf: command::CommandBuffer) { - let mut baked = cmd_buf.extract_baked_commands(); - unsafe { - baked.encoder.reset_all(baked.list); - } - unsafe { - self.raw().destroy_command_encoder(baked.encoder); - } - } - /// Wait for idle and remove resources that we can, before we die. pub(crate) fn prepare_to_die(&self) { self.pending_writes.lock().deactivate(); diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs index 4d79a81e3b..bb672612e4 100644 --- a/wgpu-core/src/global.rs +++ b/wgpu-core/src/global.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{ hal_api::HalApi, hub::{Hub, HubReport}, @@ -23,7 +25,7 @@ impl GlobalReport { pub struct Global { pub instance: Instance, - pub(crate) surfaces: Registry, + pub(crate) surfaces: Registry>, pub(crate) hub: Hub, } diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 5cbb736301..f4e8b9c756 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -107,10 +107,10 @@ use crate::{ instance::{Adapter, Surface}, pipeline::{ComputePipeline, PipelineCache, RenderPipeline, ShaderModule}, registry::{Registry, RegistryReport}, - resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, + resource::{Buffer, Fallible, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, storage::{Element, Storage}, }; -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc}; #[derive(Debug, PartialEq, Eq)] pub struct HubReport { @@ -162,24 +162,24 @@ impl HubReport { /// /// [`A::hub(global)`]: HalApi::hub pub struct Hub { - pub(crate) adapters: Registry, - pub(crate) devices: Registry, - pub(crate) queues: Registry, - pub(crate) pipeline_layouts: Registry, - pub(crate) shader_modules: Registry, - pub(crate) bind_group_layouts: Registry, - pub(crate) bind_groups: Registry, - pub(crate) command_buffers: Registry, - pub(crate) render_bundles: Registry, - pub(crate) render_pipelines: Registry, - pub(crate) compute_pipelines: Registry, - pub(crate) pipeline_caches: Registry, - pub(crate) query_sets: Registry, - pub(crate) buffers: Registry, + pub(crate) adapters: Registry>, + pub(crate) devices: Registry>, + pub(crate) queues: Registry>, + pub(crate) pipeline_layouts: Registry>, + pub(crate) shader_modules: Registry>, + pub(crate) bind_group_layouts: Registry>, + pub(crate) bind_groups: Registry>, + pub(crate) command_buffers: Registry>, + pub(crate) render_bundles: Registry>, + pub(crate) render_pipelines: Registry>, + pub(crate) compute_pipelines: Registry>, + pub(crate) pipeline_caches: Registry>, + pub(crate) query_sets: Registry>, + pub(crate) buffers: Registry>, pub(crate) staging_buffers: Registry, - pub(crate) textures: Registry, - pub(crate) texture_views: Registry, - pub(crate) samplers: Registry, + pub(crate) textures: Registry>, + pub(crate) texture_views: Registry>, + pub(crate) samplers: Registry>, } impl Hub { @@ -206,7 +206,7 @@ impl Hub { } } - pub(crate) fn clear(&self, surface_guard: &Storage) { + pub(crate) fn clear(&self, surface_guard: &Storage>) { let mut devices = self.devices.write(); for element in devices.map.iter() { if let Element::Occupied(ref device, _) = *element { diff --git a/wgpu-core/src/id.rs b/wgpu-core/src/id.rs index 19baa2e6f0..4e4897c832 100644 --- a/wgpu-core/src/id.rs +++ b/wgpu-core/src/id.rs @@ -4,18 +4,26 @@ use std::{ fmt::{self, Debug}, hash::Hash, marker::PhantomData, + mem::size_of, + num::NonZeroU64, }; -use wgt::{Backend, WasmNotSendSync}; +use wgt::WasmNotSendSync; -type IdType = u64; -type ZippedIndex = Index; -type NonZeroId = std::num::NonZeroU64; - -const INDEX_BITS: usize = ZippedIndex::BITS as usize; -const EPOCH_BITS: usize = INDEX_BITS - BACKEND_BITS; -const BACKEND_BITS: usize = 3; -const BACKEND_SHIFT: usize = INDEX_BITS * 2 - BACKEND_BITS; -pub const EPOCH_MASK: u32 = (1 << (EPOCH_BITS)) - 1; +const _: () = { + if size_of::() != 4 { + panic!() + } +}; +const _: () = { + if size_of::() != 4 { + panic!() + } +}; +const _: () = { + if size_of::() != 8 { + panic!() + } +}; /// The raw underlying representation of an identifier. #[repr(transparent)] @@ -30,50 +38,18 @@ pub const EPOCH_MASK: u32 = (1 << (EPOCH_BITS)) - 1; serde(from = "SerialId") )] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct RawId(NonZeroId); +pub struct RawId(NonZeroU64); impl RawId { - #[doc(hidden)] - #[inline] - pub fn from_non_zero(non_zero: NonZeroId) -> Self { - Self(non_zero) - } - - #[doc(hidden)] - #[inline] - pub fn into_non_zero(self) -> NonZeroId { - self.0 - } - /// Zip together an identifier and return its raw underlying representation. - pub fn zip(index: Index, epoch: Epoch, backend: Backend) -> RawId { - assert_eq!(0, epoch >> EPOCH_BITS); - assert_eq!(0, (index as IdType) >> INDEX_BITS); - let v = index as IdType - | ((epoch as IdType) << INDEX_BITS) - | ((backend as IdType) << BACKEND_SHIFT); - Self(NonZeroId::new(v).unwrap()) + pub fn zip(index: Index, epoch: Epoch) -> RawId { + let v = (index as u64) | ((epoch as u64) << 32); + Self(NonZeroU64::new(v).unwrap()) } /// Unzip a raw identifier into its components. - #[allow(trivial_numeric_casts)] - pub fn unzip(self) -> (Index, Epoch, Backend) { - ( - (self.0.get() as ZippedIndex) as Index, - (((self.0.get() >> INDEX_BITS) as ZippedIndex) & (EPOCH_MASK as ZippedIndex)) as Index, - self.backend(), - ) - } - - pub fn backend(self) -> Backend { - match self.0.get() >> (BACKEND_SHIFT) as u8 { - 0 => Backend::Empty, - 1 => Backend::Vulkan, - 2 => Backend::Metal, - 3 => Backend::Dx12, - 4 => Backend::Gl, - _ => unreachable!(), - } + pub fn unzip(self) -> (Index, Epoch) { + (self.0.get() as Index, (self.0.get() >> 32) as Epoch) } } @@ -116,20 +92,20 @@ pub struct Id(RawId, PhantomData); #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] enum SerialId { // The only variant forces RON to not ignore "Id" - Id(Index, Epoch, Backend), + Id(Index, Epoch), } impl From for SerialId { fn from(id: RawId) -> Self { - let (index, epoch, backend) = id.unzip(); - Self::Id(index, epoch, backend) + let (index, epoch) = id.unzip(); + Self::Id(index, epoch) } } impl From for RawId { fn from(id: SerialId) -> Self { match id { - SerialId::Id(index, epoch, backend) => RawId::zip(index, epoch, backend), + SerialId::Id(index, epoch) => RawId::zip(index, epoch), } } } @@ -150,29 +126,13 @@ where self.0 } - #[allow(dead_code)] - pub(crate) fn dummy(index: u32) -> Self { - Id::zip(index, 1, Backend::Empty) - } - - #[allow(dead_code)] - pub(crate) fn is_valid(&self) -> bool { - self.backend() != Backend::Empty - } - - /// Get the backend this identifier corresponds to. #[inline] - pub fn backend(self) -> Backend { - self.0.backend() + pub fn zip(index: Index, epoch: Epoch) -> Self { + Id(RawId::zip(index, epoch), PhantomData) } #[inline] - pub fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self { - Id(RawId::zip(index, epoch, backend), PhantomData) - } - - #[inline] - pub fn unzip(self) -> (Index, Epoch, Backend) { + pub fn unzip(self) -> (Index, Epoch) { self.0.unzip() } } @@ -194,16 +154,8 @@ where T: Marker, { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let (index, epoch, backend) = self.unzip(); - let backend = match backend { - Backend::Empty => "_", - Backend::Vulkan => "vk", - Backend::Metal => "mtl", - Backend::Dx12 => "d3d12", - Backend::Gl => "gl", - Backend::BrowserWebGpu => "webgpu", - }; - write!(formatter, "Id({index},{epoch},{backend})")?; + let (index, epoch) = self.unzip(); + write!(formatter, "Id({index},{epoch})")?; Ok(()) } } @@ -326,43 +278,16 @@ impl CommandBufferId { } } -#[test] -fn test_id_backend() { - for &b in &[ - Backend::Empty, - Backend::Vulkan, - Backend::Metal, - Backend::Dx12, - Backend::Gl, - ] { - let id = Id::<()>::zip(1, 0, b); - let (_id, _epoch, backend) = id.unzip(); - assert_eq!(id.backend(), b); - assert_eq!(backend, b); - } -} - #[test] fn test_id() { - let last_index = ((1u64 << INDEX_BITS) - 1) as Index; - let indexes = [1, last_index / 2 - 1, last_index / 2 + 1, last_index]; - let epochs = [1, EPOCH_MASK / 2 - 1, EPOCH_MASK / 2 + 1, EPOCH_MASK]; - let backends = [ - Backend::Empty, - Backend::Vulkan, - Backend::Metal, - Backend::Dx12, - Backend::Gl, - ]; + let indexes = [0, Index::MAX / 2 - 1, Index::MAX / 2 + 1, Index::MAX]; + let epochs = [1, Epoch::MAX / 2 - 1, Epoch::MAX / 2 + 1, Epoch::MAX]; for &i in &indexes { for &e in &epochs { - for &b in &backends { - let id = Id::<()>::zip(i, e, b); - let (index, epoch, backend) = id.unzip(); - assert_eq!(index, i); - assert_eq!(epoch, e); - assert_eq!(backend, b); - } + let id = Id::<()>::zip(i, e); + let (index, epoch) = id.unzip(); + assert_eq!(index, i); + assert_eq!(epoch, e); } } } diff --git a/wgpu-core/src/identity.rs b/wgpu-core/src/identity.rs index c89731f7af..0493b9d2cf 100644 --- a/wgpu-core/src/identity.rs +++ b/wgpu-core/src/identity.rs @@ -1,5 +1,3 @@ -use wgt::Backend; - use crate::{ id::{Id, Marker}, lock::{rank, Mutex}, @@ -52,7 +50,7 @@ impl IdentityValues { /// /// The backend is incorporated into the id, so that ids allocated with /// different `backend` values are always distinct. - pub fn alloc(&mut self, backend: Backend) -> Id { + pub fn alloc(&mut self) -> Id { assert!( self.id_source != IdSource::External, "Mix of internally allocated and externally provided IDs" @@ -61,12 +59,12 @@ impl IdentityValues { self.count += 1; match self.free.pop() { - Some((index, epoch)) => Id::zip(index, epoch + 1, backend), + Some((index, epoch)) => Id::zip(index, epoch + 1), None => { let index = self.next_index; self.next_index += 1; let epoch = 1; - Id::zip(index, epoch, backend) + Id::zip(index, epoch) } } } @@ -85,7 +83,7 @@ impl IdentityValues { /// Free `id`. It will never be returned from `alloc` again. pub fn release(&mut self, id: Id) { if let IdSource::Allocated = self.id_source { - let (index, epoch, _backend) = id.unzip(); + let (index, epoch) = id.unzip(); self.free.push((index, epoch)); } self.count -= 1; @@ -103,8 +101,8 @@ pub struct IdentityManager { } impl IdentityManager { - pub fn process(&self, backend: Backend) -> Id { - self.values.lock().alloc(backend) + pub fn process(&self) -> Id { + self.values.lock().alloc() } pub fn mark_as_used(&self, id: Id) -> Id { self.values.lock().mark_as_used(id) @@ -135,10 +133,10 @@ impl IdentityManager { fn test_epoch_end_of_life() { use crate::id; let man = IdentityManager::::new(); - let id1 = man.process(Backend::Empty); - assert_eq!(id1.unzip(), (0, 1, Backend::Empty)); + let id1 = man.process(); + assert_eq!(id1.unzip(), (0, 1)); man.free(id1); - let id2 = man.process(Backend::Empty); + let id2 = man.process(); // confirm that the epoch 1 is no longer re-used - assert_eq!(id2.unzip(), (0, 2, Backend::Empty)); + assert_eq!(id2.unzip(), (0, 2)); } diff --git a/wgpu-core/src/init_tracker/mod.rs b/wgpu-core/src/init_tracker/mod.rs index ccaac1e16f..15a79bf520 100644 --- a/wgpu-core/src/init_tracker/mod.rs +++ b/wgpu-core/src/init_tracker/mod.rs @@ -65,6 +65,35 @@ pub(crate) struct InitTracker { uninitialized_ranges: UninitializedRangeVec, } +pub(crate) struct UninitializedIter<'a, Idx: fmt::Debug + Ord + Copy> { + uninitialized_ranges: &'a UninitializedRangeVec, + drain_range: Range, + next_index: usize, +} + +impl<'a, Idx> Iterator for UninitializedIter<'a, Idx> +where + Idx: fmt::Debug + Ord + Copy, +{ + type Item = Range; + + fn next(&mut self) -> Option { + self.uninitialized_ranges + .get(self.next_index) + .and_then(|range| { + if range.start < self.drain_range.end { + self.next_index += 1; + Some( + range.start.max(self.drain_range.start) + ..range.end.min(self.drain_range.end), + ) + } else { + None + } + }) + } +} + pub(crate) struct InitTrackerDrain<'a, Idx: fmt::Debug + Ord + Copy> { uninitialized_ranges: &'a mut UninitializedRangeVec, drain_range: Range, @@ -190,6 +219,18 @@ where }) } + // Returns an iterator over the uninitialized ranges in a query range. + pub(crate) fn uninitialized(&mut self, drain_range: Range) -> UninitializedIter { + let index = self + .uninitialized_ranges + .partition_point(|r| r.end <= drain_range.start); + UninitializedIter { + drain_range, + uninitialized_ranges: &self.uninitialized_ranges, + next_index: index, + } + } + // Drains uninitialized ranges in a query range. pub(crate) fn drain(&mut self, drain_range: Range) -> InitTrackerDrain { let index = self diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 16a5b69c43..581c5ce0d9 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -1,13 +1,12 @@ use std::sync::Arc; use std::{borrow::Cow, collections::HashMap}; -use crate::hub::Hub; use crate::{ api_log, - device::{queue::Queue, resource::Device, DeviceDescriptor}, + device::{queue::Queue, resource::Device, DeviceDescriptor, DeviceError}, global::Global, hal_api::HalApi, - id::{markers, AdapterId, DeviceId, Id, Marker, QueueId, SurfaceId}, + id::{markers, AdapterId, DeviceId, QueueId, SurfaceId}, lock::{rank, Mutex}, present::Presentation, resource::ResourceType, @@ -272,20 +271,19 @@ impl Adapter { ) -> Result<(Arc, Arc), RequestDeviceError> { api_log!("Adapter::create_device"); - if let Ok(device) = Device::new( + let device = Device::new( hal_device.device, hal_device.queue.as_ref(), self, desc, trace_path, instance_flags, - ) { - let device = Arc::new(device); - let queue = Arc::new(Queue::new(device.clone(), hal_device.queue)); - device.set_queue(&queue); - return Ok((device, queue)); - } - Err(RequestDeviceError::OutOfMemory) + )?; + + let device = Arc::new(device); + let queue = Arc::new(Queue::new(device.clone(), hal_device.queue)); + device.set_queue(&queue); + Ok((device, queue)) } #[allow(clippy::type_complexity)] @@ -338,12 +336,7 @@ impl Adapter { &desc.memory_hints, ) } - .map_err(|err| match err { - hal::DeviceError::Lost => RequestDeviceError::DeviceLost, - hal::DeviceError::OutOfMemory => RequestDeviceError::OutOfMemory, - hal::DeviceError::ResourceCreationFailed => RequestDeviceError::Internal, - hal::DeviceError::Unexpected => RequestDeviceError::DeviceLost, - })?; + .map_err(DeviceError::from_hal)?; self.create_device_and_queue_from_hal(open, desc, instance_flags, trace_path) } @@ -352,22 +345,9 @@ impl Adapter { crate::impl_resource_type!(Adapter); crate::impl_storage_item!(Adapter); -#[derive(Clone, Debug, Error)] -#[non_exhaustive] -pub enum IsSurfaceSupportedError { - #[error("Invalid adapter")] - InvalidAdapter, - #[error("Invalid surface")] - InvalidSurface, -} - #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum GetSurfaceSupportError { - #[error("Invalid adapter")] - InvalidAdapter, - #[error("Invalid surface")] - InvalidSurface, #[error("Surface is not supported by the adapter")] Unsupported, } @@ -377,54 +357,22 @@ pub enum GetSurfaceSupportError { /// Error when requesting a device from the adaptor #[non_exhaustive] pub enum RequestDeviceError { - #[error("Parent adapter is invalid")] - InvalidAdapter, - #[error("Connection to device was lost during initialization")] - DeviceLost, - #[error("Device initialization failed due to implementation specific errors")] - Internal, + #[error(transparent)] + Device(#[from] DeviceError), #[error(transparent)] LimitsExceeded(#[from] FailedLimit), #[error("Device has no queue supporting graphics")] NoGraphicsQueue, - #[error("Not enough memory left to request device")] - OutOfMemory, #[error("Unsupported features were requested: {0:?}")] UnsupportedFeature(wgt::Features), } -pub enum AdapterInputs<'a, M: Marker> { - IdSet(&'a [Id]), - Mask(Backends, fn(Backend) -> Option>), -} - -impl AdapterInputs<'_, M> { - fn find(&self, b: Backend) -> Option>> { - match *self { - Self::IdSet(ids) => Some(Some(ids.iter().find(|id| id.backend() == b).copied()?)), - Self::Mask(bits, ref fun) => { - if bits.contains(b.into()) { - Some(fun(b)) - } else { - None - } - } - } - } -} - -#[derive(Clone, Debug, Error)] -#[error("Adapter is invalid")] -pub struct InvalidAdapter; - #[derive(Clone, Debug, Error)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[non_exhaustive] pub enum RequestAdapterError { #[error("No suitable adapter found")] NotFound, - #[error("Surface {0:?} is invalid")] - InvalidSurface(SurfaceId), } #[derive(Clone, Debug, Error)] @@ -498,7 +446,7 @@ impl Global { let id = self .surfaces - .prepare(wgt::Backend::Empty, id_in) // No specific backend for Surface, since it's not specific. + .prepare(id_in) // No specific backend for Surface, since it's not specific. .assign(Arc::new(surface)); Ok(id) } @@ -542,10 +490,7 @@ impl Global { surface_per_backend: std::iter::once((Backend::Metal, raw_surface)).collect(), }; - let id = self - .surfaces - .prepare(Backend::Metal, id_in) - .assign(Arc::new(surface)); + let id = self.surfaces.prepare(id_in).assign(Arc::new(surface)); Ok(id) } @@ -567,10 +512,7 @@ impl Global { surface_per_backend: std::iter::once((Backend::Dx12, surface)).collect(), }; - let id = self - .surfaces - .prepare(Backend::Dx12, id_in) - .assign(Arc::new(surface)); + let id = self.surfaces.prepare(id_in).assign(Arc::new(surface)); Ok(id) } @@ -624,9 +566,9 @@ impl Global { api_log!("Surface::drop {id:?}"); - let surface = self.surfaces.unregister(id); - let surface = Arc::into_inner(surface.unwrap()) - .expect("Surface cannot be destroyed because is still in use"); + let surface = self.surfaces.remove(id); + let surface = + Arc::into_inner(surface).expect("Surface cannot be destroyed because is still in use"); if let Some(present) = surface.presentation.lock().take() { for (&backend, surface) in &surface.surface_per_backend { @@ -638,185 +580,81 @@ impl Global { drop(surface) } - pub fn enumerate_adapters(&self, inputs: AdapterInputs) -> Vec { + pub fn enumerate_adapters(&self, backends: Backends) -> Vec { profiling::scope!("Instance::enumerate_adapters"); api_log!("Instance::enumerate_adapters"); - fn enumerate( - hub: &Hub, - backend: Backend, - instance: &dyn hal::DynInstance, - inputs: &AdapterInputs, - list: &mut Vec, - ) { - let Some(id_backend) = inputs.find(backend) else { - return; - }; - + let mut adapters = Vec::new(); + for (_, instance) in self + .instance + .instance_per_backend + .iter() + .filter(|(backend, _)| backends.contains(Backends::from(*backend))) + { profiling::scope!("enumerating", &*format!("{:?}", backend)); let hal_adapters = unsafe { instance.enumerate_adapters(None) }; for raw in hal_adapters { let adapter = Adapter::new(raw); log::info!("Adapter {:?}", adapter.raw.info); - let id = hub - .adapters - .prepare(backend, id_backend) - .assign(Arc::new(adapter)); - list.push(id); + let id = self.hub.adapters.prepare(None).assign(Arc::new(adapter)); + adapters.push(id); } } - - let mut adapters = Vec::new(); - for (backend, instance) in &self.instance.instance_per_backend { - enumerate( - &self.hub, - *backend, - instance.as_ref(), - &inputs, - &mut adapters, - ); - } adapters } - fn select( - &self, - backend: Backend, - selected: &mut usize, - new_id: Option, - mut list: Vec, - ) -> Option { - match selected.checked_sub(list.len()) { - Some(left) => { - *selected = left; - None - } - None => { - let adapter = Adapter::new(list.swap_remove(*selected)); - log::info!("Adapter {:?}", adapter.raw.info); - let id = self - .hub - .adapters - .prepare(backend, new_id) - .assign(Arc::new(adapter)); - Some(id) - } - } - } - pub fn request_adapter( &self, desc: &RequestAdapterOptions, - inputs: AdapterInputs, + backends: Backends, + id_in: Option, ) -> Result { profiling::scope!("Instance::request_adapter"); api_log!("Instance::request_adapter"); - fn gather( - backend: Backend, - instance: &Instance, - inputs: &AdapterInputs, - compatible_surface: Option<&Surface>, - force_software: bool, - device_types: &mut Vec, - ) -> (Option>, Vec) { - let id = inputs.find(backend); - match (id, instance.raw(backend)) { - (Some(id), Some(inst)) => { - let compatible_hal_surface = - compatible_surface.and_then(|surface| surface.raw(backend)); - let mut adapters = unsafe { inst.enumerate_adapters(compatible_hal_surface) }; - if force_software { - adapters.retain(|exposed| exposed.info.device_type == wgt::DeviceType::Cpu); - } - if let Some(surface) = compatible_surface { - adapters - .retain(|exposed| surface.get_capabilities_with_raw(exposed).is_ok()); - } - device_types.extend(adapters.iter().map(|ad| ad.info.device_type)); - (id, adapters) - } - _ => (None, Vec::new()), - } - } - - let compatible_surface = desc - .compatible_surface - .map(|id| { - self.surfaces - .get(id) - .map_err(|_| RequestAdapterError::InvalidSurface(id)) - }) - .transpose()?; + let compatible_surface = desc.compatible_surface.map(|id| self.surfaces.get(id)); let compatible_surface = compatible_surface.as_ref().map(|surface| surface.as_ref()); - let mut device_types = Vec::new(); - - #[cfg(vulkan)] - let (id_vulkan, adapters_vk) = gather( - Backend::Vulkan, - &self.instance, - &inputs, - compatible_surface, - desc.force_fallback_adapter, - &mut device_types, - ); - #[cfg(metal)] - let (id_metal, adapters_metal) = gather( - Backend::Metal, - &self.instance, - &inputs, - compatible_surface, - desc.force_fallback_adapter, - &mut device_types, - ); - #[cfg(dx12)] - let (id_dx12, adapters_dx12) = gather( - Backend::Dx12, - &self.instance, - &inputs, - compatible_surface, - desc.force_fallback_adapter, - &mut device_types, - ); - #[cfg(gles)] - let (id_gl, adapters_gl) = gather( - Backend::Gl, - &self.instance, - &inputs, - compatible_surface, - desc.force_fallback_adapter, - &mut device_types, - ); + let mut adapters = Vec::new(); - if device_types.is_empty() { - return Err(RequestAdapterError::NotFound); + for (backend, instance) in self + .instance + .instance_per_backend + .iter() + .filter(|(backend, _)| backends.contains(Backends::from(*backend))) + { + let compatible_hal_surface = + compatible_surface.and_then(|surface| surface.raw(*backend)); + let mut backend_adapters = + unsafe { instance.enumerate_adapters(compatible_hal_surface) }; + if desc.force_fallback_adapter { + backend_adapters.retain(|exposed| exposed.info.device_type == wgt::DeviceType::Cpu); + } + if let Some(surface) = compatible_surface { + backend_adapters + .retain(|exposed| surface.get_capabilities_with_raw(exposed).is_ok()); + } + adapters.extend(backend_adapters); } - let (mut integrated, mut discrete, mut virt, mut cpu, mut other) = - (None, None, None, None, None); - - for (i, ty) in device_types.into_iter().enumerate() { - match ty { - wgt::DeviceType::IntegratedGpu => { - integrated = integrated.or(Some(i)); - } - wgt::DeviceType::DiscreteGpu => { - discrete = discrete.or(Some(i)); - } - wgt::DeviceType::VirtualGpu => { - virt = virt.or(Some(i)); - } - wgt::DeviceType::Cpu => { - cpu = cpu.or(Some(i)); - } - wgt::DeviceType::Other => { - other = other.or(Some(i)); - } + match desc.power_preference { + PowerPreference::LowPower => { + sort(&mut adapters, true); } + PowerPreference::HighPerformance => { + sort(&mut adapters, false); + } + PowerPreference::None => {} + }; + + fn sort(adapters: &mut [hal::DynExposedAdapter], prefer_integrated_gpu: bool) { + adapters.sort_by(|a, b| { + get_order(a.info.device_type, prefer_integrated_gpu) + .cmp(&get_order(b.info.device_type, prefer_integrated_gpu)) + }); } - let preferred_gpu = match desc.power_preference { + fn get_order(device_type: wgt::DeviceType, prefer_integrated_gpu: bool) -> u8 { // Since devices of type "Other" might really be "Unknown" and come // from APIs like OpenGL that don't specify device type, Prefer more // Specific types over Other. @@ -824,42 +662,28 @@ impl Global { // This means that backends which do provide accurate device types // will be preferred if their device type indicates an actual // hardware GPU (integrated or discrete). - PowerPreference::LowPower => integrated.or(discrete).or(other).or(virt).or(cpu), - PowerPreference::HighPerformance => discrete.or(integrated).or(other).or(virt).or(cpu), - PowerPreference::None => { - let option_min = |a: Option, b: Option| { - if let (Some(a), Some(b)) = (a, b) { - Some(a.min(b)) - } else { - a.or(b) - } - }; - // Pick the lowest id of these types - option_min(option_min(discrete, integrated), other) + match device_type { + wgt::DeviceType::DiscreteGpu if prefer_integrated_gpu => 2, + wgt::DeviceType::IntegratedGpu if prefer_integrated_gpu => 1, + wgt::DeviceType::DiscreteGpu => 1, + wgt::DeviceType::IntegratedGpu => 2, + wgt::DeviceType::Other => 3, + wgt::DeviceType::VirtualGpu => 4, + wgt::DeviceType::Cpu => 5, } - }; - - let mut selected = preferred_gpu.unwrap_or(0); - #[cfg(vulkan)] - if let Some(id) = self.select(Backend::Vulkan, &mut selected, id_vulkan, adapters_vk) { - return Ok(id); } - #[cfg(metal)] - if let Some(id) = self.select(Backend::Metal, &mut selected, id_metal, adapters_metal) { - return Ok(id); - } - #[cfg(dx12)] - if let Some(id) = self.select(Backend::Dx12, &mut selected, id_dx12, adapters_dx12) { - return Ok(id); - } - #[cfg(gles)] - if let Some(id) = self.select(Backend::Gl, &mut selected, id_gl, adapters_gl) { - return Ok(id); - } - let _ = selected; - log::warn!("Some adapters are present, but enumerating them failed!"); - Err(RequestAdapterError::NotFound) + if let Some(adapter) = adapters.into_iter().next() { + log::info!("Adapter {:?}", adapter.info); + let id = self + .hub + .adapters + .prepare(id_in) + .assign(Arc::new(Adapter::new(adapter))); + Ok(id) + } else { + Err(RequestAdapterError::NotFound) + } } /// # Safety @@ -872,80 +696,58 @@ impl Global { ) -> AdapterId { profiling::scope!("Instance::create_adapter_from_hal"); - let fid = self.hub.adapters.prepare(hal_adapter.backend(), input); + let fid = self.hub.adapters.prepare(input); let id = fid.assign(Arc::new(Adapter::new(hal_adapter))); resource_log!("Created Adapter {:?}", id); id } - pub fn adapter_get_info( - &self, - adapter_id: AdapterId, - ) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.info.clone()) - .map_err(|_| InvalidAdapter) + pub fn adapter_get_info(&self, adapter_id: AdapterId) -> wgt::AdapterInfo { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.info.clone() } pub fn adapter_get_texture_format_features( &self, adapter_id: AdapterId, format: wgt::TextureFormat, - ) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.get_texture_format_features(format)) - .map_err(|_| InvalidAdapter) + ) -> wgt::TextureFormatFeatures { + let adapter = self.hub.adapters.get(adapter_id); + adapter.get_texture_format_features(format) } - pub fn adapter_features(&self, adapter_id: AdapterId) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.features) - .map_err(|_| InvalidAdapter) + pub fn adapter_features(&self, adapter_id: AdapterId) -> wgt::Features { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.features } - pub fn adapter_limits(&self, adapter_id: AdapterId) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.capabilities.limits.clone()) - .map_err(|_| InvalidAdapter) + pub fn adapter_limits(&self, adapter_id: AdapterId) -> wgt::Limits { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.capabilities.limits.clone() } pub fn adapter_downlevel_capabilities( &self, adapter_id: AdapterId, - ) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.capabilities.downlevel.clone()) - .map_err(|_| InvalidAdapter) + ) -> wgt::DownlevelCapabilities { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.capabilities.downlevel.clone() } pub fn adapter_get_presentation_timestamp( &self, adapter_id: AdapterId, - ) -> Result { - let hub = &self.hub; - - let adapter = hub.adapters.get(adapter_id).map_err(|_| InvalidAdapter)?; - - Ok(unsafe { adapter.raw.adapter.get_presentation_timestamp() }) + ) -> wgt::PresentationTimestamp { + let adapter = self.hub.adapters.get(adapter_id); + unsafe { adapter.raw.adapter.get_presentation_timestamp() } } pub fn adapter_drop(&self, adapter_id: AdapterId) { profiling::scope!("Adapter::drop"); api_log!("Adapter::drop {adapter_id:?}"); - let hub = &self.hub; - hub.adapters.unregister(adapter_id); + self.hub.adapters.remove(adapter_id); } } @@ -957,37 +759,24 @@ impl Global { trace_path: Option<&std::path::Path>, device_id_in: Option, queue_id_in: Option, - ) -> (DeviceId, QueueId, Option) { + ) -> Result<(DeviceId, QueueId), RequestDeviceError> { profiling::scope!("Adapter::request_device"); api_log!("Adapter::request_device"); - let backend = adapter_id.backend(); - let device_fid = self.hub.devices.prepare(backend, device_id_in); - let queue_fid = self.hub.queues.prepare(backend, queue_id_in); - - let error = 'error: { - let adapter = match self.hub.adapters.get(adapter_id) { - Ok(adapter) => adapter, - Err(_) => break 'error RequestDeviceError::InvalidAdapter, - }; - let (device, queue) = - match adapter.create_device_and_queue(desc, self.instance.flags, trace_path) { - Ok((device, queue)) => (device, queue), - Err(e) => break 'error e, - }; + let device_fid = self.hub.devices.prepare(device_id_in); + let queue_fid = self.hub.queues.prepare(queue_id_in); - let device_id = device_fid.assign(device); - resource_log!("Created Device {:?}", device_id); + let adapter = self.hub.adapters.get(adapter_id); + let (device, queue) = + adapter.create_device_and_queue(desc, self.instance.flags, trace_path)?; - let queue_id = queue_fid.assign(queue); - resource_log!("Created Queue {:?}", queue_id); + let device_id = device_fid.assign(device); + resource_log!("Created Device {:?}", device_id); - return (device_id, queue_id, None); - }; + let queue_id = queue_fid.assign(queue); + resource_log!("Created Queue {:?}", queue_id); - let device_id = device_fid.assign_error(); - let queue_id = queue_fid.assign_error(); - (device_id, queue_id, Some(error)) + Ok((device_id, queue_id)) } /// # Safety @@ -1002,40 +791,27 @@ impl Global { trace_path: Option<&std::path::Path>, device_id_in: Option, queue_id_in: Option, - ) -> (DeviceId, QueueId, Option) { + ) -> Result<(DeviceId, QueueId), RequestDeviceError> { profiling::scope!("Global::create_device_from_hal"); - let backend = adapter_id.backend(); - let devices_fid = self.hub.devices.prepare(backend, device_id_in); - let queues_fid = self.hub.queues.prepare(backend, queue_id_in); + let devices_fid = self.hub.devices.prepare(device_id_in); + let queues_fid = self.hub.queues.prepare(queue_id_in); - let error = 'error: { - let adapter = match self.hub.adapters.get(adapter_id) { - Ok(adapter) => adapter, - Err(_) => break 'error RequestDeviceError::InvalidAdapter, - }; - let (device, queue) = match adapter.create_device_and_queue_from_hal( - hal_device, - desc, - self.instance.flags, - trace_path, - ) { - Ok(device) => device, - Err(e) => break 'error e, - }; - - let device_id = devices_fid.assign(device); - resource_log!("Created Device {:?}", device_id); + let adapter = self.hub.adapters.get(adapter_id); + let (device, queue) = adapter.create_device_and_queue_from_hal( + hal_device, + desc, + self.instance.flags, + trace_path, + )?; - let queue_id = queues_fid.assign(queue); - resource_log!("Created Queue {:?}", queue_id); + let device_id = devices_fid.assign(device); + resource_log!("Created Device {:?}", device_id); - return (device_id, queue_id, None); - }; + let queue_id = queues_fid.assign(queue); + resource_log!("Created Queue {:?}", queue_id); - let device_id = devices_fid.assign_error(); - let queue_id = queues_fid.assign_error(); - (device_id, queue_id, Some(error)) + Ok((device_id, queue_id)) } } diff --git a/wgpu-core/src/lib.rs b/wgpu-core/src/lib.rs index ccbe64d527..521238a7d6 100644 --- a/wgpu-core/src/lib.rs +++ b/wgpu-core/src/lib.rs @@ -128,16 +128,26 @@ pub fn hal_label(opt: Option<&str>, flags: wgt::InstanceFlags) -> Option<&str> { opt } -const DOWNLEVEL_WARNING_MESSAGE: &str = "The underlying API or device in use does not \ -support enough features to be a fully compliant implementation of WebGPU. A subset of the features can still be used. \ -If you are running this program on native and not in a browser and wish to limit the features you use to the supported subset, \ -call Adapter::downlevel_properties or Device::downlevel_properties to get a listing of the features the current \ -platform supports."; -const DOWNLEVEL_ERROR_MESSAGE: &str = "This is not an invalid use of WebGPU: the underlying API or device does not \ -support enough features to be a fully compliant implementation. A subset of the features can still be used. \ -If you are running this program on native and not in a browser and wish to work around this issue, call \ -Adapter::downlevel_properties or Device::downlevel_properties to get a listing of the features the current \ -platform supports."; +const DOWNLEVEL_WARNING_MESSAGE: &str = concat!( + "The underlying API or device in use does not ", + "support enough features to be a fully compliant implementation of WebGPU. ", + "A subset of the features can still be used. ", + "If you are running this program on native and not in a browser and wish to limit ", + "the features you use to the supported subset, ", + "call Adapter::downlevel_properties or Device::downlevel_properties to get ", + "a listing of the features the current ", + "platform supports." +); + +const DOWNLEVEL_ERROR_MESSAGE: &str = concat!( + "This is not an invalid use of WebGPU: the underlying API or device does not ", + "support enough features to be a fully compliant implementation. ", + "A subset of the features can still be used. ", + "If you are running this program on native and not in a browser ", + "and wish to work around this issue, call ", + "Adapter::downlevel_properties or Device::downlevel_properties ", + "to get a listing of the features the current platform supports." +); #[cfg(feature = "api_log_info")] macro_rules! api_log { diff --git a/wgpu-core/src/lock/mod.rs b/wgpu-core/src/lock/mod.rs index a6593a062d..2927bf3aaf 100644 --- a/wgpu-core/src/lock/mod.rs +++ b/wgpu-core/src/lock/mod.rs @@ -9,17 +9,22 @@ //! checks to ensure that each thread acquires locks only in a //! specific order, to prevent deadlocks. //! +//! - The [`observing`] module defines lock types that record +//! `wgpu-core`'s lock acquisition activity to disk, for later +//! analysis by the `lock-analyzer` binary. +//! //! - The [`vanilla`] module defines lock types that are //! uninstrumented, no-overhead wrappers around the standard lock //! types. //! -//! (We plan to add more wrappers in the future.) -//! //! If the `wgpu_validate_locks` config is set (for example, with //! `RUSTFLAGS='--cfg wgpu_validate_locks'`), `wgpu-core` uses the //! [`ranked`] module's locks. We hope to make this the default for //! debug builds soon. //! +//! If the `observe_locks` feature is enabled, `wgpu-core` uses the +//! [`observing`] module's locks. +//! //! Otherwise, `wgpu-core` uses the [`vanilla`] module's locks. //! //! [`Mutex`]: parking_lot::Mutex @@ -31,11 +36,19 @@ pub mod rank; #[cfg_attr(not(wgpu_validate_locks), allow(dead_code))] mod ranked; -#[cfg_attr(wgpu_validate_locks, allow(dead_code))] +#[cfg(feature = "observe_locks")] +mod observing; + +#[cfg_attr(any(wgpu_validate_locks, feature = "observe_locks"), allow(dead_code))] mod vanilla; #[cfg(wgpu_validate_locks)] -pub use ranked::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use ranked as chosen; + +#[cfg(feature = "observe_locks")] +use observing as chosen; + +#[cfg(not(any(wgpu_validate_locks, feature = "observe_locks")))] +use vanilla as chosen; -#[cfg(not(wgpu_validate_locks))] -pub use vanilla::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; +pub use chosen::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; diff --git a/wgpu-core/src/lock/observing.rs b/wgpu-core/src/lock/observing.rs new file mode 100644 index 0000000000..afda1ad574 --- /dev/null +++ b/wgpu-core/src/lock/observing.rs @@ -0,0 +1,489 @@ +//! Lock types that observe lock acquisition order. +//! +//! This module's [`Mutex`] type is instrumented to observe the +//! nesting of `wgpu-core` lock acquisitions. Whenever `wgpu-core` +//! acquires one lock while it is already holding another, we note +//! that nesting pair. This tells us what the [`LockRank::followers`] +//! set for each lock would need to include to accommodate +//! `wgpu-core`'s observed behavior. +//! +//! When `wgpu-core`'s `observe_locks` feature is enabled, if the +//! `WGPU_CORE_LOCK_OBSERVE_DIR` environment variable is set to the +//! path of an existing directory, then every thread that acquires a +//! lock in `wgpu-core` will write its own log file to that directory. +//! You can then run the `wgpu` workspace's `lock-analyzer` binary to +//! read those files and summarize the results. The output from +//! `lock-analyzer` has the same form as the lock ranks given in +//! [`lock/rank.rs`]. +//! +//! If the `WGPU_CORE_LOCK_OBSERVE_DIR` environment variable is not +//! set, then no instrumentation takes place, and the locks behave +//! normally. +//! +//! To make sure we capture all acquisitions regardless of when the +//! program exits, each thread writes events directly to its log file +//! as they occur. A `write` system call is generally just a copy from +//! userspace into the kernel's buffer, so hopefully this approach +//! will still have tolerable performance. +//! +//! [`lock/rank.rs`]: ../../../src/wgpu_core/lock/rank.rs.html + +use crate::FastHashSet; + +use super::rank::{LockRank, LockRankSet}; +use std::{ + cell::RefCell, + fs::File, + panic::Location, + path::{Path, PathBuf}, +}; + +/// A `Mutex` instrumented for lock acquisition order observation. +/// +/// This is just a wrapper around a [`parking_lot::Mutex`], along with +/// its rank in the `wgpu_core` lock ordering. +/// +/// For details, see [the module documentation][self]. +pub struct Mutex { + inner: parking_lot::Mutex, + rank: LockRank, +} + +/// A guard produced by locking [`Mutex`]. +/// +/// This is just a wrapper around a [`parking_lot::MutexGuard`], along +/// with the state needed to track lock acquisition. +/// +/// For details, see [the module documentation][self]. +pub struct MutexGuard<'a, T> { + inner: parking_lot::MutexGuard<'a, T>, + _state: LockStateGuard, +} + +impl Mutex { + pub fn new(rank: LockRank, value: T) -> Mutex { + Mutex { + inner: parking_lot::Mutex::new(value), + rank, + } + } + + #[track_caller] + pub fn lock(&self) -> MutexGuard { + let saved = acquire(self.rank, Location::caller()); + MutexGuard { + inner: self.inner.lock(), + _state: LockStateGuard { saved }, + } + } +} + +impl<'a, T> MutexGuard<'a, T> { + pub fn try_map(s: Self, f: F) -> Result, ()> + where + F: FnOnce(&mut T) -> Option<&mut U>, + { + parking_lot::MutexGuard::try_map(s.inner, f).map_err(|_| ()) + } +} + +impl<'a, T> std::ops::Deref for MutexGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.inner.deref() + } +} + +impl<'a, T> std::ops::DerefMut for MutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.inner.deref_mut() + } +} + +impl std::fmt::Debug for Mutex { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.inner.fmt(f) + } +} + +/// An `RwLock` instrumented for lock acquisition order observation. +/// +/// This is just a wrapper around a [`parking_lot::RwLock`], along with +/// its rank in the `wgpu_core` lock ordering. +/// +/// For details, see [the module documentation][self]. +pub struct RwLock { + inner: parking_lot::RwLock, + rank: LockRank, +} + +/// A read guard produced by locking [`RwLock`] for reading. +/// +/// This is just a wrapper around a [`parking_lot::RwLockReadGuard`], along with +/// the state needed to track lock acquisition. +/// +/// For details, see [the module documentation][self]. +pub struct RwLockReadGuard<'a, T> { + inner: parking_lot::RwLockReadGuard<'a, T>, + _state: LockStateGuard, +} + +/// A write guard produced by locking [`RwLock`] for writing. +/// +/// This is just a wrapper around a [`parking_lot::RwLockWriteGuard`], along +/// with the state needed to track lock acquisition. +/// +/// For details, see [the module documentation][self]. +pub struct RwLockWriteGuard<'a, T> { + inner: parking_lot::RwLockWriteGuard<'a, T>, + _state: LockStateGuard, +} + +impl RwLock { + pub fn new(rank: LockRank, value: T) -> RwLock { + RwLock { + inner: parking_lot::RwLock::new(value), + rank, + } + } + + #[track_caller] + pub fn read(&self) -> RwLockReadGuard { + let saved = acquire(self.rank, Location::caller()); + RwLockReadGuard { + inner: self.inner.read(), + _state: LockStateGuard { saved }, + } + } + + #[track_caller] + pub fn write(&self) -> RwLockWriteGuard { + let saved = acquire(self.rank, Location::caller()); + RwLockWriteGuard { + inner: self.inner.write(), + _state: LockStateGuard { saved }, + } + } +} + +impl<'a, T> RwLockWriteGuard<'a, T> { + pub fn downgrade(this: Self) -> RwLockReadGuard<'a, T> { + RwLockReadGuard { + inner: parking_lot::RwLockWriteGuard::downgrade(this.inner), + _state: this._state, + } + } +} + +impl std::fmt::Debug for RwLock { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.inner.fmt(f) + } +} + +impl<'a, T> std::ops::Deref for RwLockReadGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.inner.deref() + } +} + +impl<'a, T> std::ops::Deref for RwLockWriteGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.inner.deref() + } +} + +impl<'a, T> std::ops::DerefMut for RwLockWriteGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.inner.deref_mut() + } +} + +/// A container that restores a prior per-thread lock state when dropped. +/// +/// This type serves two purposes: +/// +/// - Operations like `RwLockWriteGuard::downgrade` would like to be able to +/// destructure lock guards and reassemble their pieces into new guards, but +/// if the guard type itself implements `Drop`, we can't destructure it +/// without unsafe code or pointless `Option`s whose state is almost always +/// statically known. +/// +/// - We can just implement `Drop` for this type once, and then use it in lock +/// guards, rather than implementing `Drop` separately for each guard type. +struct LockStateGuard { + /// The youngest lock that was already held when we acquired this + /// one, if any. + saved: Option, +} + +impl Drop for LockStateGuard { + fn drop(&mut self) { + release(self.saved) + } +} + +/// Check and record the acquisition of a lock with `new_rank`. +/// +/// Log the acquisition of a lock with `new_rank`, and +/// update the per-thread state accordingly. +/// +/// Return the `Option` state that must be restored when this lock is +/// released. +fn acquire(new_rank: LockRank, location: &'static Location<'static>) -> Option { + LOCK_STATE.with_borrow_mut(|state| match *state { + ThreadState::Disabled => None, + ThreadState::Initial => { + let Ok(dir) = std::env::var("WGPU_CORE_LOCK_OBSERVE_DIR") else { + *state = ThreadState::Disabled; + return None; + }; + + // Create the observation log file. + let mut log = ObservationLog::create(dir) + .expect("Failed to open lock observation file (does the dir exist?)"); + + // Log the full set of lock ranks, so that the analysis can even see + // locks that are only acquired in isolation. + for rank in LockRankSet::all().iter() { + log.write_rank(rank); + } + + // Update our state to reflect that we are logging acquisitions, and + // that we have acquired this lock. + *state = ThreadState::Enabled { + held_lock: Some(HeldLock { + rank: new_rank, + location, + }), + log, + }; + + // Since this is the first acquisition on this thread, we know that + // there is no prior lock held, and thus nothing to log yet. + None + } + ThreadState::Enabled { + ref mut held_lock, + ref mut log, + } => { + if let Some(ref held_lock) = held_lock { + log.write_acquisition(held_lock, new_rank, location); + } + + std::mem::replace( + held_lock, + Some(HeldLock { + rank: new_rank, + location, + }), + ) + } + }) +} + +/// Record the release of a lock whose saved state was `saved`. +fn release(saved: Option) { + LOCK_STATE.with_borrow_mut(|state| { + if let ThreadState::Enabled { + ref mut held_lock, .. + } = *state + { + *held_lock = saved; + } + }); +} + +thread_local! { + static LOCK_STATE: RefCell = const { RefCell::new(ThreadState::Initial) }; +} + +/// Thread-local state for lock observation. +enum ThreadState { + /// This thread hasn't yet checked the environment variable. + Initial, + + /// This thread checked the environment variable, and it was + /// unset, so this thread is not observing lock acquisitions. + Disabled, + + /// Lock observation is enabled for this thread. + Enabled { + held_lock: Option, + log: ObservationLog, + }, +} + +/// Information about a currently held lock. +#[derive(Debug, Copy, Clone)] +struct HeldLock { + /// The lock's rank. + rank: LockRank, + + /// Where we acquired the lock. + location: &'static Location<'static>, +} + +/// A log to which we can write observations of lock activity. +struct ObservationLog { + /// The file to which we are logging lock observations. + log_file: File, + + /// [`Location`]s we've seen so far. + /// + /// This is a hashset of raw pointers because raw pointers have + /// the [`Eq`] and [`Hash`] relations we want: the pointer value, not + /// the contents. There's no unsafe code in this module. + locations_seen: FastHashSet<*const Location<'static>>, + + /// Buffer for serializing events, retained for allocation reuse. + buffer: Vec, +} + +#[allow(trivial_casts)] +impl ObservationLog { + /// Create an observation log in `dir` for the current pid and thread. + fn create(dir: impl AsRef) -> Result { + let mut path = PathBuf::from(dir.as_ref()); + path.push(format!( + "locks-{}.{:?}.ron", + std::process::id(), + std::thread::current().id() + )); + let log_file = File::create(&path)?; + Ok(ObservationLog { + log_file, + locations_seen: FastHashSet::default(), + buffer: Vec::new(), + }) + } + + /// Record the acquisition of one lock while holding another. + /// + /// Log that we acquired a lock of `new_rank` at `new_location` while still + /// holding other locks, the most recently acquired of which has + /// `older_rank`. + fn write_acquisition( + &mut self, + older_lock: &HeldLock, + new_rank: LockRank, + new_location: &'static Location<'static>, + ) { + self.write_location(older_lock.location); + self.write_location(new_location); + self.write_action(&Action::Acquisition { + older_rank: older_lock.rank.bit.number(), + older_location: addr(older_lock.location), + newer_rank: new_rank.bit.number(), + newer_location: addr(new_location), + }); + } + + fn write_location(&mut self, location: &'static Location<'static>) { + if self.locations_seen.insert(location) { + self.write_action(&Action::Location { + address: addr(location), + file: location.file(), + line: location.line(), + column: location.column(), + }); + } + } + + fn write_rank(&mut self, rank: LockRankSet) { + self.write_action(&Action::Rank { + bit: rank.number(), + member_name: rank.member_name(), + const_name: rank.const_name(), + }); + } + + fn write_action(&mut self, action: &Action) { + use std::io::Write; + + self.buffer.clear(); + ron::ser::to_writer(&mut self.buffer, &action) + .expect("error serializing `lock::observing::Action`"); + self.buffer.push(b'\n'); + self.log_file + .write_all(&self.buffer) + .expect("error writing `lock::observing::Action`"); + } +} + +/// An action logged by a thread that is observing lock acquisition order. +/// +/// Each thread's log file is a sequence of these enums, serialized +/// using the [`ron`] crate, one action per line. +/// +/// Lock observation cannot assume that there will be any convenient +/// finalization point before the program exits, so in practice, +/// actions must be written immediately when they occur. This means we +/// can't, say, accumulate tables and write them out when they're +/// complete. The `lock-analyzer` binary is then responsible for +/// consolidating the data into a single table of observed transitions. +#[derive(serde::Serialize)] +enum Action { + /// A location that we will refer to in later actions. + /// + /// We write one of these events the first time we see a + /// particular `Location`. Treating this as a separate action + /// simply lets us avoid repeating the content over and over + /// again in every [`Acquisition`] action. + /// + /// [`Acquisition`]: Action::Acquisition + Location { + address: usize, + file: &'static str, + line: u32, + column: u32, + }, + + /// A lock rank that we will refer to in later actions. + /// + /// We write out one these events for every lock rank at the + /// beginning of each thread's log file. Treating this as a + /// separate action simply lets us avoid repeating the names over + /// and over again in every [`Acquisition`] action. + /// + /// [`Acquisition`]: Action::Acquisition + Rank { + bit: u32, + member_name: &'static str, + const_name: &'static str, + }, + + /// An attempt to acquire a lock while holding another lock. + Acquisition { + /// The number of the already acquired lock's rank. + older_rank: u32, + + /// The source position at which we acquired it. Specifically, + /// its `Location`'s address, as an integer. + older_location: usize, + + /// The number of the rank of the lock we are acquiring. + newer_rank: u32, + + /// The source position at which we are acquiring it. + /// Specifically, its `Location`'s address, as an integer. + newer_location: usize, + }, +} + +impl LockRankSet { + /// Return the number of this rank's first member. + fn number(self) -> u32 { + self.bits().trailing_zeros() + } +} + +/// Convenience for `std::ptr::from_ref(t) as usize`. +fn addr(t: &T) -> usize { + std::ptr::from_ref(t) as usize +} diff --git a/wgpu-core/src/lock/rank.rs b/wgpu-core/src/lock/rank.rs index 162d3d2604..abb5cb002d 100644 --- a/wgpu-core/src/lock/rank.rs +++ b/wgpu-core/src/lock/rank.rs @@ -63,7 +63,7 @@ macro_rules! define_lock_ranks { } impl LockRankSet { - pub fn name(self) -> &'static str { + pub fn member_name(self) -> &'static str { match self { $( LockRankSet:: $name => $member, @@ -71,6 +71,16 @@ macro_rules! define_lock_ranks { _ => "", } } + + #[cfg_attr(not(feature = "observe_locks"), allow(dead_code))] + pub fn const_name(self) -> &'static str { + match self { + $( + LockRankSet:: $name => stringify!($name), + )* + _ => "", + } + } } $( diff --git a/wgpu-core/src/lock/ranked.rs b/wgpu-core/src/lock/ranked.rs index 13301f86fc..c3aedb1b08 100644 --- a/wgpu-core/src/lock/ranked.rs +++ b/wgpu-core/src/lock/ranked.rs @@ -63,9 +63,7 @@ use std::{cell::Cell, panic::Location}; /// This is just a wrapper around a [`parking_lot::Mutex`], along with /// its rank in the `wgpu_core` lock ordering. /// -/// For details, see [the module documentation][mod]. -/// -/// [mod]: crate::lock::ranked +/// For details, see [the module documentation][self]. pub struct Mutex { inner: parking_lot::Mutex, rank: LockRank, @@ -76,9 +74,7 @@ pub struct Mutex { /// This is just a wrapper around a [`parking_lot::MutexGuard`], along /// with the state needed to track lock acquisition. /// -/// For details, see [the module documentation][mod]. -/// -/// [mod]: crate::lock::ranked +/// For details, see [the module documentation][self]. pub struct MutexGuard<'a, T> { inner: parking_lot::MutexGuard<'a, T>, saved: LockStateGuard, @@ -144,12 +140,12 @@ fn acquire(new_rank: LockRank, location: &'static Location<'static>) -> LockStat last locked {:<35} at {}\n\ now locking {:<35} at {}\n\ Locking {} after locking {} is not permitted.", - last_rank.bit.name(), + last_rank.bit.member_name(), last_location, - new_rank.bit.name(), + new_rank.bit.member_name(), location, - new_rank.bit.name(), - last_rank.bit.name(), + new_rank.bit.member_name(), + last_rank.bit.member_name(), ); } LOCK_STATE.set(LockState { @@ -220,9 +216,7 @@ impl std::fmt::Debug for Mutex { /// This is just a wrapper around a [`parking_lot::RwLock`], along with /// its rank in the `wgpu_core` lock ordering. /// -/// For details, see [the module documentation][mod]. -/// -/// [mod]: crate::lock::ranked +/// For details, see [the module documentation][self]. pub struct RwLock { inner: parking_lot::RwLock, rank: LockRank, @@ -233,9 +227,7 @@ pub struct RwLock { /// This is just a wrapper around a [`parking_lot::RwLockReadGuard`], along with /// the state needed to track lock acquisition. /// -/// For details, see [the module documentation][mod]. -/// -/// [mod]: crate::lock::ranked +/// For details, see [the module documentation][self]. pub struct RwLockReadGuard<'a, T> { inner: parking_lot::RwLockReadGuard<'a, T>, saved: LockStateGuard, @@ -246,9 +238,7 @@ pub struct RwLockReadGuard<'a, T> { /// This is just a wrapper around a [`parking_lot::RwLockWriteGuard`], along /// with the state needed to track lock acquisition. /// -/// For details, see [the module documentation][mod]. -/// -/// [mod]: crate::lock::ranked +/// For details, see [the module documentation][self]. pub struct RwLockWriteGuard<'a, T> { inner: parking_lot::RwLockWriteGuard<'a, T>, saved: LockStateGuard, diff --git a/wgpu-core/src/lock/vanilla.rs b/wgpu-core/src/lock/vanilla.rs index 9a35b6d9d8..51e472b118 100644 --- a/wgpu-core/src/lock/vanilla.rs +++ b/wgpu-core/src/lock/vanilla.rs @@ -30,6 +30,15 @@ impl Mutex { } } +impl<'a, T> MutexGuard<'a, T> { + pub fn try_map(s: Self, f: F) -> Result, ()> + where + F: FnOnce(&mut T) -> Option<&mut U>, + { + parking_lot::MutexGuard::try_map(s.0, f).map_err(|_| ()) + } +} + impl<'a, T> std::ops::Deref for MutexGuard<'a, T> { type Target = T; diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index db1c1ba76a..08e7167db6 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -4,7 +4,7 @@ use crate::{ command::ColorAttachmentError, device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, id::{PipelineCacheId, PipelineLayoutId, ShaderModuleId}, - resource::{Labeled, TrackingData}, + resource::{InvalidResourceError, Labeled, TrackingData}, resource_log, validation, Label, }; use arrayvec::ArrayVec; @@ -222,10 +222,6 @@ pub struct ResolvedComputePipelineDescriptor<'a> { pub enum CreateComputePipelineError { #[error(transparent)] Device(#[from] DeviceError), - #[error("Pipeline layout is invalid")] - InvalidLayout, - #[error("Cache is invalid")] - InvalidCache, #[error("Unable to derive an implicit layout")] Implicit(#[from] ImplicitLayoutError), #[error("Error matching shader requirements against the pipeline")] @@ -236,6 +232,8 @@ pub enum CreateComputePipelineError { PipelineConstants(String), #[error(transparent)] MissingDownlevelFlags(#[from] MissingDownlevelFlags), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Debug)] @@ -286,16 +284,6 @@ pub enum CreatePipelineCacheError { Internal(String), } -impl From for CreatePipelineCacheError { - fn from(value: hal::PipelineCacheError) -> Self { - match value { - hal::PipelineCacheError::Device(device) => { - CreatePipelineCacheError::Device(device.into()) - } - } - } -} - #[derive(Debug)] pub struct PipelineCache { pub(crate) raw: ManuallyDrop>, @@ -477,10 +465,6 @@ pub enum CreateRenderPipelineError { ColorAttachment(#[from] ColorAttachmentError), #[error(transparent)] Device(#[from] DeviceError), - #[error("Pipeline layout is invalid")] - InvalidLayout, - #[error("Pipeline cache is invalid")] - InvalidCache, #[error("Unable to derive an implicit layout")] Implicit(#[from] ImplicitLayoutError), #[error("Color state [{0}] is invalid")] @@ -550,6 +534,8 @@ pub enum CreateRenderPipelineError { "but no render target for the pipeline was specified." ))] NoTargetSpecified, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } bitflags::bitflags! { diff --git a/wgpu-core/src/pipeline_cache.rs b/wgpu-core/src/pipeline_cache.rs index b88fc21dda..e506d2cd5b 100644 --- a/wgpu-core/src/pipeline_cache.rs +++ b/wgpu-core/src/pipeline_cache.rs @@ -1,7 +1,9 @@ +use std::mem::size_of; + use thiserror::Error; use wgt::AdapterInfo; -pub const HEADER_LENGTH: usize = std::mem::size_of::(); +pub const HEADER_LENGTH: usize = size_of::(); #[derive(Debug, PartialEq, Eq, Clone, Error)] #[non_exhaustive] @@ -112,7 +114,7 @@ pub fn add_cache_header( const MAGIC: [u8; 8] = *b"WGPUPLCH"; const HEADER_VERSION: u32 = 1; -const ABI: u32 = std::mem::size_of::<*const ()>() as u32; +const ABI: u32 = size_of::<*const ()>() as u32; /// The value used to fill [`PipelineCacheHeader::hash_space`] /// @@ -179,10 +181,7 @@ impl PipelineCacheHeader { let data_size = reader.read_u64()?; let data_hash = reader.read_u64()?; - assert_eq!( - reader.total_read, - std::mem::size_of::() - ); + assert_eq!(reader.total_read, size_of::()); Some(( PipelineCacheHeader { diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index 697156b35f..c9d0124bf4 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -122,10 +122,7 @@ impl Global { let hub = &self.hub; - let surface = self - .surfaces - .get(surface_id) - .map_err(|_| SurfaceError::Invalid)?; + let surface = self.surfaces.get(surface_id); let (device, config) = if let Some(ref present) = *surface.presentation.lock() { present.device.check_is_valid()?; @@ -134,7 +131,7 @@ impl Global { return Err(SurfaceError::NotConfigured); }; - let fid = hub.textures.prepare(device.backend(), texture_id_in); + let fid = hub.textures.prepare(texture_id_in); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -179,7 +176,7 @@ impl Global { let clear_view_desc = hal::TextureViewDescriptor { label: hal_label( Some("(wgpu internal) clear surface texture view"), - self.instance.flags, + device.instance_flags, ), format: config.format, dimension: wgt::TextureViewDimension::D2, @@ -191,7 +188,7 @@ impl Global { .raw() .create_texture_view(ast.texture.as_ref().borrow(), &clear_view_desc) } - .map_err(DeviceError::from)?; + .map_err(|e| device.handle_hal_error(e))?; let mut presentation = surface.presentation.lock(); let present = presentation.as_mut().unwrap(); @@ -218,7 +215,7 @@ impl Global { .textures .insert_single(&texture, hal::TextureUses::UNINITIALIZED); - let id = fid.assign(texture); + let id = fid.assign(resource::Fallible::Valid(texture)); if present.acquired_texture.is_some() { return Err(SurfaceError::AlreadyAcquired); @@ -238,7 +235,7 @@ impl Global { match err { hal::SurfaceError::Lost => Status::Lost, hal::SurfaceError::Device(err) => { - return Err(DeviceError::from(err).into()); + return Err(device.handle_hal_error(err).into()); } hal::SurfaceError::Outdated => Status::Outdated, hal::SurfaceError::Other(msg) => { @@ -257,10 +254,7 @@ impl Global { let hub = &self.hub; - let surface = self - .surfaces - .get(surface_id) - .map_err(|_| SurfaceError::Invalid)?; + let surface = self.surfaces.get(surface_id); let mut presentation = surface.presentation.lock(); let present = match presentation.as_mut() { @@ -286,8 +280,8 @@ impl Global { // The texture ID got added to the device tracker by `submit()`, // and now we are moving it away. - let texture = hub.textures.unregister(texture_id); - if let Some(texture) = texture { + let texture = hub.textures.remove(texture_id).get(); + if let Ok(texture) = texture { device .trackers .lock() @@ -315,7 +309,9 @@ impl Global { Ok(()) => Ok(Status::Good), Err(err) => match err { hal::SurfaceError::Lost => Ok(Status::Lost), - hal::SurfaceError::Device(err) => Err(SurfaceError::from(DeviceError::from(err))), + hal::SurfaceError::Device(err) => { + Err(SurfaceError::from(device.handle_hal_error(err))) + } hal::SurfaceError::Outdated => Ok(Status::Outdated), hal::SurfaceError::Other(msg) => { log::error!("acquire error: {}", msg); @@ -330,10 +326,7 @@ impl Global { let hub = &self.hub; - let surface = self - .surfaces - .get(surface_id) - .map_err(|_| SurfaceError::Invalid)?; + let surface = self.surfaces.get(surface_id); let mut presentation = surface.presentation.lock(); let present = match presentation.as_mut() { Some(present) => present, @@ -357,9 +350,9 @@ impl Global { // The texture ID got added to the device tracker by `submit()`, // and now we are moving it away. - let texture = hub.textures.unregister(texture_id); + let texture = hub.textures.remove(texture_id).get(); - if let Some(texture) = texture { + if let Ok(texture) = texture { device .trackers .lock() diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index fa7e0def6c..b3349235e9 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -1,10 +1,10 @@ -use std::sync::Arc; +use std::{mem::size_of, sync::Arc}; use crate::{ id::Id, identity::IdentityManager, lock::{rank, RwLock, RwLockReadGuard, RwLockWriteGuard}, - storage::{Element, InvalidId, Storage, StorageItem}, + storage::{Element, Storage, StorageItem}, }; #[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] @@ -12,7 +12,6 @@ pub struct RegistryReport { pub num_allocated: usize, pub num_kept_from_user: usize, pub num_released_from_user: usize, - pub num_error: usize, pub element_size: usize, } @@ -56,63 +55,43 @@ pub(crate) struct FutureId<'a, T: StorageItem> { } impl FutureId<'_, T> { - #[allow(dead_code)] pub fn id(&self) -> Id { self.id } - pub fn into_id(self) -> Id { - self.id - } - /// Assign a new resource to this ID. /// /// Registers it with the registry. - pub fn assign(self, value: Arc) -> Id { + pub fn assign(self, value: T) -> Id { let mut data = self.data.write(); data.insert(self.id, value); self.id } - - pub fn assign_error(self) -> Id { - self.data.write().insert_error(self.id); - self.id - } } impl Registry { - pub(crate) fn prepare( - &self, - backend: wgt::Backend, - id_in: Option>, - ) -> FutureId { + pub(crate) fn prepare(&self, id_in: Option>) -> FutureId { FutureId { id: match id_in { Some(id_in) => { self.identity.mark_as_used(id_in); id_in } - None => self.identity.process(backend), + None => self.identity.process(), }, data: &self.storage, } } - pub(crate) fn get(&self, id: Id) -> Result, InvalidId> { - self.read().get_owned(id) - } + #[track_caller] pub(crate) fn read<'a>(&'a self) -> RwLockReadGuard<'a, Storage> { self.storage.read() } + #[track_caller] pub(crate) fn write<'a>(&'a self) -> RwLockWriteGuard<'a, Storage> { self.storage.write() } - pub(crate) fn force_replace_with_error(&self, id: Id) { - let mut storage = self.storage.write(); - storage.remove(id); - storage.insert_error(id); - } - pub(crate) fn unregister(&self, id: Id) -> Option> { + pub(crate) fn remove(&self, id: Id) -> T { let value = self.storage.write().remove(id); // This needs to happen *after* removing it from the storage, to maintain the // invariant that `self.identity` only contains ids which are actually available @@ -125,7 +104,7 @@ impl Registry { pub(crate) fn generate_report(&self) -> RegistryReport { let storage = self.storage.read(); let mut report = RegistryReport { - element_size: std::mem::size_of::(), + element_size: size_of::(), ..Default::default() }; report.num_allocated = self.identity.values.lock().count(); @@ -133,13 +112,18 @@ impl Registry { match *element { Element::Occupied(..) => report.num_kept_from_user += 1, Element::Vacant => report.num_released_from_user += 1, - Element::Error(_) => report.num_error += 1, } } report } } +impl Registry { + pub(crate) fn get(&self, id: Id) -> T { + self.read().get(id) + } +} + #[cfg(test)] mod tests { use std::sync::Arc; @@ -166,9 +150,9 @@ mod tests { s.spawn(|| { for _ in 0..1000 { let value = Arc::new(TestData); - let new_id = registry.prepare(wgt::Backend::Empty, None); + let new_id = registry.prepare(None); let id = new_id.assign(value); - registry.unregister(id); + registry.remove(id); } }); } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 184851fc2a..5df285da54 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -106,8 +106,8 @@ pub(crate) trait ParentDevice: Labeled { } } - fn same_device(&self, device: &Arc) -> Result<(), DeviceError> { - if Arc::ptr_eq(self.device(), device) { + fn same_device(&self, device: &Device) -> Result<(), DeviceError> { + if std::ptr::eq(&**self.device(), device) { Ok(()) } else { Err(DeviceError::DeviceMismatch(Box::new(DeviceMismatch { @@ -305,7 +305,7 @@ impl BufferMapCallback { let status = match result { Ok(()) => BufferMapAsyncStatus::Success, Err(BufferAccessError::Device(_)) => BufferMapAsyncStatus::ContextLost, - Err(BufferAccessError::InvalidBufferId(_)) + Err(BufferAccessError::InvalidResource(_)) | Err(BufferAccessError::DestroyedResource(_)) => BufferMapAsyncStatus::Invalid, Err(BufferAccessError::AlreadyMapped) => BufferMapAsyncStatus::AlreadyMapped, Err(BufferAccessError::MapAlreadyPending) => { @@ -324,7 +324,9 @@ impl BufferMapCallback { | Err(BufferAccessError::NegativeRange { .. }) => { BufferMapAsyncStatus::InvalidRange } - Err(_) => BufferMapAsyncStatus::Error, + Err(BufferAccessError::Failed) + | Err(BufferAccessError::NotMapped) + | Err(BufferAccessError::MapAborted) => BufferMapAsyncStatus::Error, }; (inner.callback)(status, inner.user_data); @@ -347,8 +349,6 @@ pub enum BufferAccessError { Device(#[from] DeviceError), #[error("Buffer map failed")] Failed, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("Buffer is already mapped")] @@ -386,6 +386,8 @@ pub enum BufferAccessError { }, #[error("Buffer map aborted")] MapAborted, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -410,6 +412,45 @@ pub struct MissingTextureUsageError { #[error("{0} has been destroyed")] pub struct DestroyedResourceError(pub ResourceErrorIdent); +#[derive(Clone, Debug, Error)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[error("{0} is invalid")] +pub struct InvalidResourceError(pub ResourceErrorIdent); + +pub(crate) enum Fallible { + Valid(Arc), + Invalid(Arc), +} + +impl Fallible { + pub fn get(self) -> Result, InvalidResourceError> { + match self { + Fallible::Valid(v) => Ok(v), + Fallible::Invalid(label) => Err(InvalidResourceError(ResourceErrorIdent { + r#type: Cow::Borrowed(T::TYPE), + label: (*label).clone(), + })), + } + } +} + +impl Clone for Fallible { + fn clone(&self) -> Self { + match self { + Self::Valid(v) => Self::Valid(v.clone()), + Self::Invalid(l) => Self::Invalid(l.clone()), + } + } +} + +impl ResourceType for Fallible { + const TYPE: &'static str = T::TYPE; +} + +impl crate::storage::StorageItem for Fallible { + type Marker = T::Marker; +} + pub type BufferAccessResult = Result<(), BufferAccessError>; #[derive(Debug)] @@ -832,8 +873,10 @@ impl StagingBuffer { memory_flags: hal::MemoryFlags::TRANSIENT, }; - let raw = unsafe { device.raw().create_buffer(&stage_desc)? }; - let mapping = unsafe { device.raw().map_buffer(raw.as_ref(), 0..size.get()) }?; + let raw = unsafe { device.raw().create_buffer(&stage_desc) } + .map_err(|e| device.handle_hal_error(e))?; + let mapping = unsafe { device.raw().map_buffer(raw.as_ref(), 0..size.get()) } + .map_err(|e| device.handle_hal_error(e))?; let staging_buffer = StagingBuffer { raw, @@ -1014,7 +1057,7 @@ impl Texture { if init { TextureInitTracker::new(desc.mip_level_count, desc.array_layer_count()) } else { - TextureInitTracker::new(0, 0) + TextureInitTracker::new(desc.mip_level_count, 0) }, ), full_range: TextureSelector { @@ -1200,7 +1243,7 @@ impl Global { let hub = &self.hub; - if let Ok(buffer) = hub.buffers.get(id) { + if let Ok(buffer) = hub.buffers.get(id).get() { let snatch_guard = buffer.device.snatchable_lock.read(); let hal_buffer = buffer .raw(&snatch_guard) @@ -1223,7 +1266,7 @@ impl Global { let hub = &self.hub; - if let Ok(texture) = hub.textures.get(id) { + if let Ok(texture) = hub.textures.get(id).get() { let snatch_guard = texture.device.snatchable_lock.read(); let hal_texture = texture.raw(&snatch_guard); let hal_texture = hal_texture @@ -1247,7 +1290,7 @@ impl Global { let hub = &self.hub; - if let Ok(texture_view) = hub.texture_views.get(id) { + if let Ok(texture_view) = hub.texture_views.get(id).get() { let snatch_guard = texture_view.device.snatchable_lock.read(); let hal_texture_view = texture_view.raw(&snatch_guard); let hal_texture_view = hal_texture_view @@ -1270,11 +1313,8 @@ impl Global { profiling::scope!("Adapter::as_hal"); let hub = &self.hub; - let adapter = hub.adapters.get(id).ok(); - let hal_adapter = adapter - .as_ref() - .map(|adapter| &adapter.raw.adapter) - .and_then(|adapter| adapter.as_any().downcast_ref()); + let adapter = hub.adapters.get(id); + let hal_adapter = adapter.raw.adapter.as_any().downcast_ref(); hal_adapter_callback(hal_adapter) } @@ -1289,12 +1329,8 @@ impl Global { ) -> R { profiling::scope!("Device::as_hal"); - let hub = &self.hub; - let device = hub.devices.get(id).ok(); - let hal_device = device - .as_ref() - .map(|device| device.raw()) - .and_then(|device| device.as_any().downcast_ref()); + let device = self.hub.devices.get(id); + let hal_device = device.raw().as_any().downcast_ref(); hal_device_callback(hal_device) } @@ -1309,14 +1345,9 @@ impl Global { ) -> R { profiling::scope!("Device::fence_as_hal"); - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(id) { - let fence = device.fence.read(); - hal_fence_callback(fence.as_any().downcast_ref()) - } else { - hal_fence_callback(None) - } + let device = self.hub.devices.get(id); + let fence = device.fence.read(); + hal_fence_callback(fence.as_any().downcast_ref()) } /// # Safety @@ -1328,10 +1359,9 @@ impl Global { ) -> R { profiling::scope!("Surface::as_hal"); - let surface = self.surfaces.get(id).ok(); + let surface = self.surfaces.get(id); let hal_surface = surface - .as_ref() - .and_then(|surface| surface.raw(A::VARIANT)) + .raw(A::VARIANT) .and_then(|surface| surface.as_any().downcast_ref()); hal_surface_callback(hal_surface) @@ -1353,12 +1383,13 @@ impl Global { let hub = &self.hub; - if let Ok(cmd_buf) = hub.command_buffers.get(id.into_command_buffer_id()) { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = hub.command_buffers.get(id.into_command_buffer_id()); + let cmd_buf_data = cmd_buf.try_get(); + + if let Ok(mut cmd_buf_data) = cmd_buf_data { let cmd_buf_raw = cmd_buf_data .encoder - .open() + .open(&cmd_buf.device) .ok() .and_then(|encoder| encoder.as_any_mut().downcast_mut()); hal_command_encoder_callback(cmd_buf_raw) @@ -1616,8 +1647,6 @@ impl TextureView { pub enum CreateTextureViewError { #[error(transparent)] Device(#[from] DeviceError), - #[error("TextureId {0:?} is invalid")] - InvalidTextureId(TextureId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("Not enough memory left to create texture view")] @@ -1660,6 +1689,8 @@ pub enum CreateTextureViewError { texture: wgt::TextureFormat, view: wgt::TextureFormat, }, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -1832,8 +1863,8 @@ impl QuerySet { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum DestroyError { - #[error("Resource is invalid")] - Invalid, #[error("Resource is already destroyed")] AlreadyDestroyed, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index c5e91eedd4..5930748759 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -1,33 +1,35 @@ use std::sync::Arc; -use wgt::Backend; - use crate::id::{Id, Marker}; use crate::resource::ResourceType; use crate::{Epoch, Index}; /// An entry in a `Storage::map` table. #[derive(Debug)] -pub(crate) enum Element { +pub(crate) enum Element +where + T: StorageItem, +{ /// There are no live ids with this index. Vacant, /// There is one live id with this index, allocated at the given /// epoch. - Occupied(Arc, Epoch), - - /// Like `Occupied`, but an error occurred when creating the - /// resource. - Error(Epoch), + Occupied(T, Epoch), } -#[derive(Clone, Debug)] -pub(crate) struct InvalidId; - pub(crate) trait StorageItem: ResourceType { type Marker: Marker; } +impl ResourceType for Arc { + const TYPE: &'static str = T::TYPE; +} + +impl StorageItem for Arc { + type Marker = T::Marker; +} + #[macro_export] macro_rules! impl_storage_item { ($ty:ident) => { @@ -70,34 +72,13 @@ impl Storage where T: StorageItem, { - /// Get a reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get(&self, id: Id) -> Result<&Arc, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch), - None | Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), - Some(&Element::Error(epoch)) => (Err(InvalidId), epoch), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{:?}] is no longer alive", - self.kind, id - ); - result - } - - /// Get an owned reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get_owned(&self, id: Id) -> Result, InvalidId> { - Ok(Arc::clone(self.get(id)?)) - } - - fn insert_impl(&mut self, index: usize, epoch: Epoch, element: Element) { + pub(crate) fn insert(&mut self, id: Id, value: T) { + let (index, epoch) = id.unzip(); + let index = index as usize; if index >= self.map.len() { self.map.resize_with(index + 1, || Element::Vacant); } - match std::mem::replace(&mut self.map[index], element) { + match std::mem::replace(&mut self.map[index], Element::Occupied(value, epoch)) { Element::Vacant => {} Element::Occupied(_, storage_epoch) => { assert_ne!( @@ -107,58 +88,27 @@ where T::TYPE ); } - Element::Error(storage_epoch) => { - assert_ne!( - epoch, - storage_epoch, - "Index {index:?} of {} is already occupied with Error", - T::TYPE - ); - } - } - } - - pub(crate) fn insert(&mut self, id: Id, value: Arc) { - let (index, epoch, _backend) = id.unzip(); - self.insert_impl(index as usize, epoch, Element::Occupied(value, epoch)) - } - - pub(crate) fn insert_error(&mut self, id: Id) { - let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, epoch, Element::Error(epoch)) - } - - pub(crate) fn replace_with_error(&mut self, id: Id) -> Result, InvalidId> { - let (index, epoch, _) = id.unzip(); - match std::mem::replace(&mut self.map[index as usize], Element::Error(epoch)) { - Element::Vacant => panic!("Cannot access vacant resource"), - Element::Occupied(value, storage_epoch) => { - assert_eq!(epoch, storage_epoch); - Ok(value) - } - _ => Err(InvalidId), } } - pub(crate) fn remove(&mut self, id: Id) -> Option> { - let (index, epoch, _) = id.unzip(); + pub(crate) fn remove(&mut self, id: Id) -> T { + let (index, epoch) = id.unzip(); match std::mem::replace(&mut self.map[index as usize], Element::Vacant) { Element::Occupied(value, storage_epoch) => { assert_eq!(epoch, storage_epoch); - Some(value) + value } - Element::Error(_) => None, Element::Vacant => panic!("Cannot remove a vacant resource"), } } - pub(crate) fn iter(&self, backend: Backend) -> impl Iterator, &Arc)> { + pub(crate) fn iter(&self) -> impl Iterator, &T)> { self.map .iter() .enumerate() .filter_map(move |(index, x)| match *x { Element::Occupied(ref value, storage_epoch) => { - Some((Id::zip(index as Index, storage_epoch, backend), value)) + Some((Id::zip(index as Index, storage_epoch), value)) } _ => None, }) @@ -168,3 +118,24 @@ where self.map.len() } } + +impl Storage +where + T: StorageItem + Clone, +{ + /// Get an owned reference to an item. + /// Panics if there is an epoch mismatch, the entry is empty or in error. + pub(crate) fn get(&self, id: Id) -> T { + let (index, epoch) = id.unzip(); + let (result, storage_epoch) = match self.map.get(index as usize) { + Some(&Element::Occupied(ref v, epoch)) => (v.clone(), epoch), + None | Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), + }; + assert_eq!( + epoch, storage_epoch, + "{}[{:?}] is no longer alive", + self.kind, id + ); + result + } +} diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index 1c74bffd97..60c2238961 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -442,10 +442,9 @@ impl TextureTracker { let transitions = self .temp .drain(..) - .map(|pending| { + .inspect(|pending| { let tex = unsafe { self.metadata.get_resource_unchecked(pending.id as _) }; textures.push(tex.inner.get(snatch_guard)); - pending }) .collect(); (transitions, textures) diff --git a/wgpu-core/src/validation.rs b/wgpu-core/src/validation.rs index c16d4d2179..3134930eb7 100644 --- a/wgpu-core/src/validation.rs +++ b/wgpu-core/src/validation.rs @@ -1,4 +1,4 @@ -use crate::{device::bgl, FastHashMap, FastHashSet}; +use crate::{device::bgl, resource::InvalidResourceError, FastHashMap, FastHashSet}; use arrayvec::ArrayVec; use std::{collections::hash_map::Entry, fmt}; use thiserror::Error; @@ -200,8 +200,6 @@ pub enum InputError { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum StageError { - #[error("Shader module is invalid")] - InvalidModule, #[error( "Shader entry point's workgroup size {current:?} ({current_total} total invocations) must be less or equal to the per-dimension limit {limit:?} and the total invocation limit {total}" )] @@ -241,6 +239,8 @@ pub enum StageError { but no entry point was specified" )] MultipleEntryPointsFound, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option { @@ -275,7 +275,7 @@ fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option Sf::Rgb10a2Uint, Tf::Rgb10a2Unorm => Sf::Rgb10a2Unorm, - Tf::Rg11b10UFloat => Sf::Rg11b10UFloat, + Tf::Rg11b10Ufloat => Sf::Rg11b10Ufloat, Tf::R64Uint => Sf::R64Uint, Tf::Rg32Uint => Sf::Rg32Uint, @@ -332,7 +332,7 @@ fn map_storage_format_from_naga(format: naga::StorageFormat) -> wgt::TextureForm Sf::Rgb10a2Uint => Tf::Rgb10a2Uint, Sf::Rgb10a2Unorm => Tf::Rgb10a2Unorm, - Sf::Rg11b10UFloat => Tf::Rg11b10UFloat, + Sf::Rg11b10Ufloat => Tf::Rg11b10Ufloat, Sf::R64Uint => Tf::R64Uint, Sf::Rg32Uint => Tf::Rg32Uint, @@ -661,7 +661,7 @@ impl NumericType { Tf::Rgba8Sint | Tf::Rgba16Sint | Tf::Rgba32Sint => { (NumericDimension::Vector(Vs::Quad), Scalar::I32) } - Tf::Rg11b10UFloat => (NumericDimension::Vector(Vs::Tri), Scalar::F32), + Tf::Rg11b10Ufloat => (NumericDimension::Vector(Vs::Tri), Scalar::F32), Tf::Stencil8 | Tf::Depth16Unorm | Tf::Depth32Float diff --git a/wgpu-hal/Cargo.toml b/wgpu-hal/Cargo.toml index ee2808b19d..d3a05f0790 100644 --- a/wgpu-hal/Cargo.toml +++ b/wgpu-hal/Cargo.toml @@ -78,6 +78,7 @@ dx12 = [ "gpu-allocator/d3d12", "naga/hlsl-out-if-target-windows", "windows/Win32_Graphics_Direct3D_Fxc", + "windows/Win32_Graphics_Direct3D_Dxc", "windows/Win32_Graphics_Direct3D", "windows/Win32_Graphics_Direct3D12", "windows/Win32_Graphics_DirectComposition", @@ -89,7 +90,6 @@ dx12 = [ "windows/Win32_System_Threading", "windows/Win32_UI_WindowsAndMessaging", ] -dxc_shader_compiler = ["dep:hassle-rs"] renderdoc = ["dep:libloading", "dep:renderdoc-sys"] fragile-send-sync-non-atomic-wasm = ["wgt/fragile-send-sync-non-atomic-wasm"] # Panic when running into an out-of-memory error (for debugging purposes). @@ -156,7 +156,6 @@ windows = { workspace = true, optional = true } bit-set = { workspace = true, optional = true } range-alloc = { workspace = true, optional = true } gpu-allocator = { workspace = true, optional = true } -hassle-rs = { workspace = true, optional = true } # For core macros. This crate is also reexported as windows::core. windows-core = { workspace = true, optional = true } @@ -207,5 +206,8 @@ env_logger.workspace = true glam.workspace = true # for ray-traced-triangle example winit.workspace = true # for "halmark" example -[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] -glutin.workspace = true # for "gles" example +[target.'cfg(not(any(target_arch = "wasm32", target_os = "ios")))'.dev-dependencies] +glutin-winit = { workspace = true, features = ["egl", "wgl", "wayland", "x11"] } # for "raw-gles" example +glutin = { workspace = true, features = ["egl", "wgl", "wayland", "x11"] } # for "raw-gles" example +rwh_05 = { version = "0.5", package = "raw-window-handle" } # temporary compatibility for glutin-winit in "raw-gles" example +winit = { workspace = true, features = ["rwh_05"] } # for "raw-gles" example diff --git a/wgpu-hal/examples/halmark/main.rs b/wgpu-hal/examples/halmark/main.rs index dabcea418a..8ab7f1cb47 100644 --- a/wgpu-hal/examples/halmark/main.rs +++ b/wgpu-hal/examples/halmark/main.rs @@ -14,7 +14,9 @@ use winit::{ use std::{ borrow::{Borrow, Cow}, - iter, mem, ptr, + iter, + mem::size_of, + ptr, time::Instant, }; @@ -193,7 +195,7 @@ impl Example { ty: wgt::BindingType::Buffer { ty: wgt::BufferBindingType::Uniform, has_dynamic_offset: false, - min_binding_size: wgt::BufferSize::new(mem::size_of::() as _), + min_binding_size: wgt::BufferSize::new(size_of::() as _), }, count: None, }, @@ -228,7 +230,7 @@ impl Example { ty: wgt::BindingType::Buffer { ty: wgt::BufferBindingType::Uniform, has_dynamic_offset: true, - min_binding_size: wgt::BufferSize::new(mem::size_of::() as _), + min_binding_size: wgt::BufferSize::new(size_of::() as _), }, count: None, }], @@ -394,7 +396,7 @@ impl Example { let global_buffer_desc = hal::BufferDescriptor { label: Some("global"), - size: mem::size_of::() as wgt::BufferAddress, + size: size_of::() as wgt::BufferAddress, usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::UNIFORM, memory_flags: hal::MemoryFlags::PREFER_COHERENT, }; @@ -406,7 +408,7 @@ impl Example { ptr::copy_nonoverlapping( &globals as *const Globals as *const u8, mapping.ptr.as_ptr(), - mem::size_of::(), + size_of::(), ); device.unmap_buffer(&buffer); assert!(mapping.is_coherent); @@ -414,7 +416,7 @@ impl Example { }; let local_alignment = wgt::math::align_to( - mem::size_of::() as u32, + size_of::() as u32, capabilities.limits.min_uniform_buffer_offset_alignment, ); let local_buffer_desc = hal::BufferDescriptor { @@ -476,7 +478,7 @@ impl Example { let local_buffer_binding = hal::BufferBinding { buffer: &local_buffer, offset: 0, - size: wgt::BufferSize::new(mem::size_of::() as _), + size: wgt::BufferSize::new(size_of::() as _), }; let local_group_desc = hal::BindGroupDescriptor { label: Some("local"), diff --git a/wgpu-hal/examples/raw-gles.rs b/wgpu-hal/examples/raw-gles.rs index 06df610658..2743ce9b82 100644 --- a/wgpu-hal/examples/raw-gles.rs +++ b/wgpu-hal/examples/raw-gles.rs @@ -10,61 +10,198 @@ extern crate wgpu_hal as hal; -#[cfg(not(any(windows, target_arch = "wasm32")))] +#[cfg(not(any(target_arch = "wasm32", target_os = "ios")))] fn main() { + use std::{ffi::CString, num::NonZeroU32}; + + use glutin::{ + config::GlConfig as _, + context::{NotCurrentGlContext as _, PossiblyCurrentGlContext as _, Version}, + display::{GetGlDisplay as _, GlDisplay as _}, + surface::GlSurface as _, + }; + use glutin_winit::GlWindow as _; + use rwh_05::HasRawWindowHandle as _; + env_logger::init(); println!("Initializing external GL context"); - let event_loop = glutin::event_loop::EventLoop::new(); - let window_builder = glutin::window::WindowBuilder::new(); - let gl_context = unsafe { - glutin::ContextBuilder::new() - .with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGlEs, (3, 0))) - .build_windowed(window_builder, &event_loop) - .unwrap() - .make_current() - .unwrap() - }; - let inner_size = gl_context.window().inner_size(); + let event_loop = winit::event_loop::EventLoop::new().unwrap(); + // Only Windows requires the window to be present before creating the display. + // Other platforms don't really need one. + let window_builder = cfg!(windows).then(|| { + winit::window::WindowBuilder::new() + .with_title("WGPU raw GLES example (press Escape to exit)") + }); - println!("Hooking up to wgpu-hal"); - let exposed = unsafe { - ::Adapter::new_external(|name| { - gl_context.get_proc_address(name) - }) + // The template will match only the configurations supporting rendering + // to Windows. + let template = glutin::config::ConfigTemplateBuilder::new(); + + let display_builder = glutin_winit::DisplayBuilder::new().with_window_builder(window_builder); + + // Find the config with the maximum number of samples, so our triangle will be + // smooth. + pub fn gl_config_picker( + configs: Box + '_>, + ) -> glutin::config::Config { + configs + .reduce(|accum, config| { + if config.num_samples() > accum.num_samples() { + config + } else { + accum + } + }) + .expect("Failed to find a matching config") } - .expect("GL adapter can't be initialized"); - fill_screen(&exposed, inner_size.width, inner_size.height); - - println!("Showing the window"); - gl_context.swap_buffers().unwrap(); - - event_loop.run(move |event, _, control_flow| { - use glutin::{ - event::{Event, KeyboardInput, VirtualKeyCode, WindowEvent}, - event_loop::ControlFlow, - }; - *control_flow = ControlFlow::Wait; - - match event { - Event::LoopDestroyed => (), - Event::WindowEvent { - event: - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - _ => (), - } + let (mut window, gl_config) = display_builder + .build(&event_loop, template, gl_config_picker) + .expect("Failed to build window and config from display"); + + println!("Picked a config with {} samples", gl_config.num_samples()); + + let raw_window_handle = window.as_ref().map(|window| window.raw_window_handle()); + + // XXX The display could be obtained from any object created by it, so we can + // query it from the config. + let gl_display = gl_config.display(); + + // Glutin tries to create an OpenGL context by default. Force it to use any version of GLES. + let context_attributes = glutin::context::ContextAttributesBuilder::new() + // WGPU expects GLES 3.0+. + .with_context_api(glutin::context::ContextApi::Gles(Some(Version::new(3, 0)))) + .build(raw_window_handle); + + let mut not_current_gl_context = Some(unsafe { + gl_display + .create_context(&gl_config, &context_attributes) + .expect("failed to create context") }); + + let mut state = None; + + // Only needs to be loaded once + let mut exposed = None; + + event_loop + .run(move |event, window_target| { + use winit::{ + event::{Event, KeyEvent, WindowEvent}, + event_loop::ControlFlow, + keyboard::{Key, NamedKey}, + }; + window_target.set_control_flow(ControlFlow::Wait); + + match event { + // Event::LoopExiting => (), + Event::WindowEvent { + window_id: _, + event: + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + event: + KeyEvent { + logical_key: Key::Named(NamedKey::Escape), + .. + }, + .. + }, + } => window_target.exit(), + Event::Resumed => { + let window = window.take().unwrap_or_else(|| { + let window_builder = winit::window::WindowBuilder::new() + .with_title("WGPU raw GLES example (press Escape to exit)"); + glutin_winit::finalize_window(window_target, window_builder, &gl_config) + .unwrap() + }); + + let attrs = window.build_surface_attributes(Default::default()); + let gl_surface = unsafe { + gl_config + .display() + .create_window_surface(&gl_config, &attrs) + .expect("Cannot create GL WindowSurface") + }; + + // Make it current. + let gl_context = not_current_gl_context + .take() + .unwrap() + .make_current(&gl_surface) + .expect("GL context cannot be made current with WindowSurface"); + + // The context needs to be current for the Renderer to set up shaders and + // buffers. It also performs function loading, which needs a current context on + // WGL. + println!("Hooking up to wgpu-hal"); + exposed.get_or_insert_with(|| { + unsafe { + ::Adapter::new_external(|name| { + // XXX: On WGL this should only be called after the context was made current + gl_config + .display() + .get_proc_address(&CString::new(name).expect(name)) + }) + } + .expect("GL adapter can't be initialized") + }); + + assert!(state.replace((gl_context, gl_surface, window)).is_none()); + } + Event::Suspended => { + // This event is only raised on Android, where the backing NativeWindow for a GL + // Surface can appear and disappear at any moment. + println!("Android window removed"); + + // Destroy the GL Surface and un-current the GL Context before ndk-glue releases + // the window back to the system. + let (gl_context, ..) = state.take().unwrap(); + assert!(not_current_gl_context + .replace(gl_context.make_not_current().unwrap()) + .is_none()); + } + Event::WindowEvent { + window_id: _, + event: WindowEvent::Resized(size), + } => { + if size.width != 0 && size.height != 0 { + // Some platforms like EGL require resizing GL surface to update the size + // Notable platforms here are Wayland and macOS, other don't require it + // and the function is no-op, but it's wise to resize it for portability + // reasons. + if let Some((gl_context, gl_surface, _)) = &state { + gl_surface.resize( + gl_context, + NonZeroU32::new(size.width).unwrap(), + NonZeroU32::new(size.height).unwrap(), + ); + // XXX: If there's a state for fill_screen(), this would need to be updated too. + } + } + } + Event::WindowEvent { + window_id: _, + event: WindowEvent::RedrawRequested, + } => { + if let (Some(exposed), Some((gl_context, gl_surface, window))) = + (&exposed, &state) + { + let inner_size = window.inner_size(); + + fill_screen(exposed, inner_size.width, inner_size.height); + + println!("Showing the window"); + gl_surface + .swap_buffers(gl_context) + .expect("Failed to swap buffers"); + } + } + _ => (), + } + }) + .expect("Couldn't run event loop"); } #[cfg(target_os = "emscripten")] @@ -117,10 +254,18 @@ fn main() { fill_screen(&exposed, 640, 400); } -#[cfg(any(windows, all(target_arch = "wasm32", not(target_os = "emscripten"))))] -fn main() {} +#[cfg(any( + all(target_arch = "wasm32", not(target_os = "emscripten")), + target_os = "ios" +))] +fn main() { + eprintln!("This example is not supported on Windows and non-emscripten wasm32") +} -#[cfg(any(not(any(windows, target_arch = "wasm32")), target_os = "emscripten"))] +#[cfg(not(any( + all(target_arch = "wasm32", not(target_os = "emscripten")), + target_os = "ios" +)))] fn fill_screen(exposed: &hal::ExposedAdapter, width: u32, height: u32) { use hal::{Adapter as _, CommandEncoder as _, Device as _, Queue as _}; diff --git a/wgpu-hal/examples/ray-traced-triangle/main.rs b/wgpu-hal/examples/ray-traced-triangle/main.rs index b1aceeb101..dd91843734 100644 --- a/wgpu-hal/examples/ray-traced-triangle/main.rs +++ b/wgpu-hal/examples/ray-traced-triangle/main.rs @@ -8,7 +8,9 @@ use raw_window_handle::{HasDisplayHandle, HasWindowHandle}; use glam::{Affine3A, Mat4, Vec3}; use std::{ borrow::{Borrow, Cow}, - iter, mem, ptr, + iter, + mem::size_of, + ptr, time::Instant, }; use winit::window::WindowButtons; @@ -203,7 +205,7 @@ struct Example { uniform_buffer: A::Buffer, pipeline_layout: A::PipelineLayout, vertices_buffer: A::Buffer, - indices_buffer: A::Buffer, + indices_buffer: Option, texture: A::Texture, instances: [AccelerationStructureInstance; 3], instances_buffer: A::Buffer, @@ -215,6 +217,18 @@ struct Example { impl Example { fn init(window: &winit::window::Window) -> Result> { + let mut index_buffer = false; + + for arg in std::env::args() { + if arg == "index_buffer" { + index_buffer = true; + } + } + + if index_buffer { + log::info!("using index buffer") + } + let instance_desc = hal::InstanceDescriptor { name: "example", flags: wgt::InstanceFlags::default(), @@ -304,7 +318,7 @@ impl Example { ty: wgt::BindingType::Buffer { ty: wgt::BufferBindingType::Uniform, has_dynamic_offset: false, - min_binding_size: wgt::BufferSize::new(mem::size_of::() as _), + min_binding_size: wgt::BufferSize::new(size_of::() as _), }, count: None, }, @@ -418,29 +432,34 @@ impl Example { vertices_buffer }; - let indices_buffer = unsafe { - let indices_buffer = device - .create_buffer(&hal::BufferDescriptor { - label: Some("indices buffer"), - size: indices_size_in_bytes as u64, - usage: hal::BufferUses::MAP_WRITE - | hal::BufferUses::BOTTOM_LEVEL_ACCELERATION_STRUCTURE_INPUT, - memory_flags: hal::MemoryFlags::TRANSIENT | hal::MemoryFlags::PREFER_COHERENT, - }) - .unwrap(); - - let mapping = device - .map_buffer(&indices_buffer, 0..indices_size_in_bytes as u64) - .unwrap(); - ptr::copy_nonoverlapping( - indices.as_ptr() as *const u8, - mapping.ptr.as_ptr(), - indices_size_in_bytes, - ); - device.unmap_buffer(&indices_buffer); - assert!(mapping.is_coherent); + let indices_buffer = if index_buffer { + unsafe { + let indices_buffer = device + .create_buffer(&hal::BufferDescriptor { + label: Some("indices buffer"), + size: indices_size_in_bytes as u64, + usage: hal::BufferUses::MAP_WRITE + | hal::BufferUses::BOTTOM_LEVEL_ACCELERATION_STRUCTURE_INPUT, + memory_flags: hal::MemoryFlags::TRANSIENT + | hal::MemoryFlags::PREFER_COHERENT, + }) + .unwrap(); - indices_buffer + let mapping = device + .map_buffer(&indices_buffer, 0..indices_size_in_bytes as u64) + .unwrap(); + ptr::copy_nonoverlapping( + indices.as_ptr() as *const u8, + mapping.ptr.as_ptr(), + indices_size_in_bytes, + ); + device.unmap_buffer(&indices_buffer); + assert!(mapping.is_coherent); + + Some((indices_buffer, indices.len())) + } + } else { + None }; let blas_triangles = vec![hal::AccelerationStructureTriangles { @@ -449,12 +468,15 @@ impl Example { vertex_format: wgt::VertexFormat::Float32x3, vertex_count: vertices.len() as u32, vertex_stride: 3 * 4, - indices: Some(hal::AccelerationStructureTriangleIndices { - buffer: Some(&indices_buffer), - format: wgt::IndexFormat::Uint32, - offset: 0, - count: indices.len() as u32, + indices: indices_buffer.as_ref().map(|(buf, len)| { + hal::AccelerationStructureTriangleIndices { + buffer: Some(buf), + format: wgt::IndexFormat::Uint32, + offset: 0, + count: *len as u32, + } }), + transform: None, flags: hal::AccelerationStructureGeometryFlags::OPAQUE, }]; @@ -516,7 +538,7 @@ impl Example { } }; - let uniforms_size = std::mem::size_of::(); + let uniforms_size = size_of::(); let uniform_buffer = unsafe { let uniform_buffer = device @@ -657,8 +679,7 @@ impl Example { ), ]; - let instances_buffer_size = - instances.len() * std::mem::size_of::(); + let instances_buffer_size = instances.len() * size_of::(); let instances_buffer = unsafe { let instances_buffer = device @@ -799,7 +820,7 @@ impl Example { tlas, scratch_buffer, time: 0.0, - indices_buffer, + indices_buffer: indices_buffer.map(|(buf, _)| buf), vertices_buffer, uniform_buffer, texture_view, @@ -828,7 +849,7 @@ impl Example { }; let instances_buffer_size = - self.instances.len() * std::mem::size_of::(); + self.instances.len() * size_of::(); let tlas_flags = hal::AccelerationStructureBuildFlags::PREFER_FAST_TRACE | hal::AccelerationStructureBuildFlags::ALLOW_UPDATE; @@ -1025,7 +1046,9 @@ impl Example { self.device.destroy_bind_group(self.bind_group); self.device.destroy_buffer(self.scratch_buffer); self.device.destroy_buffer(self.instances_buffer); - self.device.destroy_buffer(self.indices_buffer); + if let Some(buffer) = self.indices_buffer { + self.device.destroy_buffer(buffer); + } self.device.destroy_buffer(self.vertices_buffer); self.device.destroy_buffer(self.uniform_buffer); self.device.destroy_acceleration_structure(self.tlas); diff --git a/wgpu-hal/src/auxil/dxgi/conv.rs b/wgpu-hal/src/auxil/dxgi/conv.rs index 64d978531e..10a584d565 100644 --- a/wgpu-hal/src/auxil/dxgi/conv.rs +++ b/wgpu-hal/src/auxil/dxgi/conv.rs @@ -47,7 +47,7 @@ pub fn map_texture_format_failable( Tf::Rgb9e5Ufloat => DXGI_FORMAT_R9G9B9E5_SHAREDEXP, Tf::Rgb10a2Uint => DXGI_FORMAT_R10G10B10A2_UINT, Tf::Rgb10a2Unorm => DXGI_FORMAT_R10G10B10A2_UNORM, - Tf::Rg11b10UFloat => DXGI_FORMAT_R11G11B10_FLOAT, + Tf::Rg11b10Ufloat => DXGI_FORMAT_R11G11B10_FLOAT, Tf::R64Uint => DXGI_FORMAT_R32G32_UINT, // R64 emulated by R32G32 Tf::Rg32Uint => DXGI_FORMAT_R32G32_UINT, Tf::Rg32Sint => DXGI_FORMAT_R32G32_SINT, diff --git a/wgpu-hal/src/auxil/dxgi/factory.rs b/wgpu-hal/src/auxil/dxgi/factory.rs index 6c68ffeea6..4b71abda37 100644 --- a/wgpu-hal/src/auxil/dxgi/factory.rs +++ b/wgpu-hal/src/auxil/dxgi/factory.rs @@ -4,12 +4,7 @@ use windows::{core::Interface as _, Win32::Graphics::Dxgi}; use crate::dx12::DxgiLib; -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum DxgiFactoryType { - Factory2, - Factory4, - Factory6, -} +// We can rely on the presence of DXGI 1.4 since D3D12 requires WDDM 2.0, Windows 10 (1507), and so does DXGI 1.4. fn should_keep_adapter(adapter: &Dxgi::IDXGIAdapter1) -> bool { let desc = unsafe { adapter.GetDesc1() }.unwrap(); @@ -52,75 +47,27 @@ fn should_keep_adapter(adapter: &Dxgi::IDXGIAdapter1) -> bool { } pub enum DxgiAdapter { - Adapter1(Dxgi::IDXGIAdapter1), - Adapter2(Dxgi::IDXGIAdapter2), + /// Provided by DXGI 1.4 Adapter3(Dxgi::IDXGIAdapter3), + /// Provided by DXGI 1.6 Adapter4(Dxgi::IDXGIAdapter4), } -impl windows::core::Param for &DxgiAdapter { - unsafe fn param(self) -> windows::core::ParamValue { - unsafe { self.deref().param() } - } -} - impl Deref for DxgiAdapter { - type Target = Dxgi::IDXGIAdapter; + type Target = Dxgi::IDXGIAdapter3; fn deref(&self) -> &Self::Target { match self { - DxgiAdapter::Adapter1(a) => a, - DxgiAdapter::Adapter2(a) => a, DxgiAdapter::Adapter3(a) => a, DxgiAdapter::Adapter4(a) => a, } } } -impl DxgiAdapter { - pub fn as_adapter2(&self) -> Option<&Dxgi::IDXGIAdapter2> { - match self { - Self::Adapter1(_) => None, - Self::Adapter2(f) => Some(f), - Self::Adapter3(f) => Some(f), - Self::Adapter4(f) => Some(f), - } - } - - pub fn unwrap_adapter2(&self) -> &Dxgi::IDXGIAdapter2 { - self.as_adapter2().unwrap() - } -} - pub fn enumerate_adapters(factory: DxgiFactory) -> Vec { let mut adapters = Vec::with_capacity(8); for cur_index in 0.. { - if let DxgiFactory::Factory6(ref factory6) = factory { - profiling::scope!("IDXGIFactory6::EnumAdapterByGpuPreference"); - // We're already at dxgi1.6, we can grab IDXGIAdapter4 directly - let adapter4: Dxgi::IDXGIAdapter4 = match unsafe { - factory6.EnumAdapterByGpuPreference( - cur_index, - Dxgi::DXGI_GPU_PREFERENCE_HIGH_PERFORMANCE, - ) - } { - Ok(a) => a, - Err(e) if e.code() == Dxgi::DXGI_ERROR_NOT_FOUND => break, - Err(e) => { - log::error!("Failed enumerating adapters: {}", e); - break; - } - }; - - if !should_keep_adapter(&adapter4) { - continue; - } - - adapters.push(DxgiAdapter::Adapter4(adapter4)); - continue; - } - profiling::scope!("IDXGIFactory1::EnumAdapters1"); let adapter1: Dxgi::IDXGIAdapter1 = match unsafe { factory.EnumAdapters1(cur_index) } { Ok(a) => a, @@ -135,31 +82,12 @@ pub fn enumerate_adapters(factory: DxgiFactory) -> Vec { continue; } - // Do the most aggressive casts first, skipping Adapter4 as we definitely don't have dxgi1_6. - - // Adapter1 -> Adapter3 - match adapter1.cast::() { - Ok(adapter3) => { - adapters.push(DxgiAdapter::Adapter3(adapter3)); - continue; - } - Err(err) => { - log::warn!("Failed casting Adapter1 to Adapter3: {}", err); - } - } - - // Adapter1 -> Adapter2 - match adapter1.cast::() { - Ok(adapter2) => { - adapters.push(DxgiAdapter::Adapter2(adapter2)); - continue; - } - Err(err) => { - log::warn!("Failed casting Adapter1 to Adapter2: {}", err); - } + if let Ok(adapter4) = adapter1.cast::() { + adapters.push(DxgiAdapter::Adapter4(adapter4)); + } else { + let adapter3 = adapter1.cast::().unwrap(); + adapters.push(DxgiAdapter::Adapter3(adapter3)); } - - adapters.push(DxgiAdapter::Adapter1(adapter1)); } adapters @@ -167,52 +95,37 @@ pub fn enumerate_adapters(factory: DxgiFactory) -> Vec { #[derive(Clone, Debug)] pub enum DxgiFactory { - Factory1(Dxgi::IDXGIFactory1), - Factory2(Dxgi::IDXGIFactory2), + /// Provided by DXGI 1.4 Factory4(Dxgi::IDXGIFactory4), + /// Provided by DXGI 1.5 + Factory5(Dxgi::IDXGIFactory5), + /// Provided by DXGI 1.6 Factory6(Dxgi::IDXGIFactory6), } impl Deref for DxgiFactory { - type Target = Dxgi::IDXGIFactory1; + type Target = Dxgi::IDXGIFactory4; fn deref(&self) -> &Self::Target { match self { - DxgiFactory::Factory1(f) => f, - DxgiFactory::Factory2(f) => f, DxgiFactory::Factory4(f) => f, + DxgiFactory::Factory5(f) => f, DxgiFactory::Factory6(f) => f, } } } impl DxgiFactory { - pub fn as_factory2(&self) -> Option<&Dxgi::IDXGIFactory2> { - match self { - Self::Factory1(_) => None, - Self::Factory2(f) => Some(f), - Self::Factory4(f) => Some(f), - Self::Factory6(f) => Some(f), - } - } - - pub fn unwrap_factory2(&self) -> &Dxgi::IDXGIFactory2 { - self.as_factory2().unwrap() - } - pub fn as_factory5(&self) -> Option<&Dxgi::IDXGIFactory5> { match self { - Self::Factory1(_) | Self::Factory2(_) | Self::Factory4(_) => None, + Self::Factory4(_) => None, + Self::Factory5(f) => Some(f), Self::Factory6(f) => Some(f), } } } -/// Tries to create a [`Dxgi::IDXGIFactory6`], then a [`Dxgi::IDXGIFactory4`], then a [`Dxgi::IDXGIFactory2`], then a [`Dxgi::IDXGIFactory1`], -/// returning the one that succeeds, or if the required_factory_type fails to be -/// created. pub fn create_factory( - required_factory_type: DxgiFactoryType, instance_flags: wgt::InstanceFlags, ) -> Result<(DxgiLib, DxgiFactory), crate::InstanceError> { let lib_dxgi = DxgiLib::new().map_err(|e| { @@ -225,111 +138,31 @@ pub fn create_factory( // The `DXGI_CREATE_FACTORY_DEBUG` flag is only allowed to be passed to // `CreateDXGIFactory2` if the debug interface is actually available. So // we check for whether it exists first. - match lib_dxgi.debug_interface1() { - Ok(pair) => match pair { - Ok(_debug_controller) => { - factory_flags |= Dxgi::DXGI_CREATE_FACTORY_DEBUG; - } - Err(err) => { - log::warn!("Unable to enable DXGI debug interface: {}", err); - } - }, - Err(err) => { - log::warn!("Debug interface function for DXGI not found: {:?}", err); - } + if let Ok(Some(_)) = lib_dxgi.debug_interface1() { + factory_flags |= Dxgi::DXGI_CREATE_FACTORY_DEBUG; } // Intercept `OutputDebugString` calls super::exception::register_exception_handler(); } - // Try to create IDXGIFactory4 - let factory4 = match lib_dxgi.create_factory2(factory_flags) { - Ok(pair) => match pair { - Ok(factory) => Some(factory), - // We hard error here as we _should have_ been able to make a factory4 but couldn't. - Err(err) => { - // err is a Cow, not an Error implementor - return Err(crate::InstanceError::new(format!( - "failed to create IDXGIFactory4: {err:?}" - ))); - } - }, - // If we require factory4, hard error. - Err(err) if required_factory_type == DxgiFactoryType::Factory4 => { + let factory4 = match lib_dxgi.create_factory4(factory_flags) { + Ok(factory) => factory, + Err(err) => { return Err(crate::InstanceError::with_source( - String::from("IDXGIFactory1 creation function not found"), + String::from("IDXGIFactory4 creation failed"), err, )); } - // If we don't print it to warn as all win7 will hit this case. - Err(err) => { - log::warn!("IDXGIFactory1 creation function not found: {err:?}"); - None - } }; - if let Some(factory4) = factory4 { - // Try to cast the IDXGIFactory4 into IDXGIFactory6 - let factory6 = factory4.cast::(); - match factory6 { - Ok(factory6) => { - return Ok((lib_dxgi, DxgiFactory::Factory6(factory6))); - } - // If we require factory6, hard error. - Err(err) if required_factory_type == DxgiFactoryType::Factory6 => { - // err is a Cow, not an Error implementor - return Err(crate::InstanceError::new(format!( - "failed to cast IDXGIFactory4 to IDXGIFactory6: {err:?}" - ))); - } - // If we don't print it to warn. - Err(err) => { - log::warn!("Failed to cast IDXGIFactory4 to IDXGIFactory6: {:?}", err); - return Ok((lib_dxgi, DxgiFactory::Factory4(factory4))); - } - } + if let Ok(factory6) = factory4.cast::() { + return Ok((lib_dxgi, DxgiFactory::Factory6(factory6))); } - // Try to create IDXGIFactory1 - let factory1 = match lib_dxgi.create_factory1() { - Ok(pair) => match pair { - Ok(factory) => factory, - Err(err) => { - // err is a Cow, not an Error implementor - return Err(crate::InstanceError::new(format!( - "failed to create IDXGIFactory1: {err:?}" - ))); - } - }, - // We always require at least factory1, so hard error - Err(err) => { - return Err(crate::InstanceError::with_source( - String::from("IDXGIFactory1 creation function not found"), - err, - )); - } - }; - - // Try to cast the IDXGIFactory1 into IDXGIFactory2 - let factory2 = factory1.cast::(); - match factory2 { - Ok(factory2) => { - return Ok((lib_dxgi, DxgiFactory::Factory2(factory2))); - } - // If we require factory2, hard error. - Err(err) if required_factory_type == DxgiFactoryType::Factory2 => { - // err is a Cow, not an Error implementor - return Err(crate::InstanceError::new(format!( - "failed to cast IDXGIFactory1 to IDXGIFactory2: {err:?}" - ))); - } - // If we don't print it to warn. - Err(err) => { - log::warn!("Failed to cast IDXGIFactory1 to IDXGIFactory2: {:?}", err); - } + if let Ok(factory5) = factory4.cast::() { + return Ok((lib_dxgi, DxgiFactory::Factory5(factory5))); } - // We tried to create 4 and 2, but only succeeded with 1. - Ok((lib_dxgi, DxgiFactory::Factory1(factory1))) + Ok((lib_dxgi, DxgiFactory::Factory4(factory4))) } diff --git a/wgpu-hal/src/auxil/dxgi/result.rs b/wgpu-hal/src/auxil/dxgi/result.rs index 3bb88b5bf1..61e3c6acf0 100644 --- a/wgpu-hal/src/auxil/dxgi/result.rs +++ b/wgpu-hal/src/auxil/dxgi/result.rs @@ -1,56 +1,32 @@ -use std::borrow::Cow; - use windows::Win32::{Foundation, Graphics::Dxgi}; pub(crate) trait HResult { - fn into_result(self) -> Result>; fn into_device_result(self, description: &str) -> Result; } impl HResult for windows::core::Result { - fn into_result(self) -> Result> { - // TODO: use windows-rs built-in error formatting? - let description = match self { - Ok(t) => return Ok(t), - Err(e) if e.code() == Foundation::E_UNEXPECTED => "unexpected", - Err(e) if e.code() == Foundation::E_NOTIMPL => "not implemented", - Err(e) if e.code() == Foundation::E_OUTOFMEMORY => "out of memory", - Err(e) if e.code() == Foundation::E_INVALIDARG => "invalid argument", - Err(e) => return Err(Cow::Owned(format!("{e:?}"))), - }; - Err(Cow::Borrowed(description)) - } fn into_device_result(self, description: &str) -> Result { #![allow(unreachable_code)] - let err_code = if let Err(err) = &self { - Some(err.code()) - } else { - None - }; - self.into_result().map_err(|err| { + self.map_err(|err| { log::error!("{} failed: {}", description, err); - let Some(err_code) = err_code else { - unreachable!() - }; - - match err_code { + match err.code() { Foundation::E_OUTOFMEMORY => { #[cfg(feature = "oom_panic")] panic!("{description} failed: Out of memory"); - return crate::DeviceError::OutOfMemory; + crate::DeviceError::OutOfMemory } Dxgi::DXGI_ERROR_DEVICE_RESET | Dxgi::DXGI_ERROR_DEVICE_REMOVED => { #[cfg(feature = "device_lost_panic")] panic!("{description} failed: Device lost ({err})"); + crate::DeviceError::Lost } _ => { #[cfg(feature = "internal_error_panic")] panic!("{description} failed: {err}"); + crate::DeviceError::Unexpected } } - - crate::DeviceError::Lost }) } } diff --git a/wgpu-hal/src/auxil/dxgi/time.rs b/wgpu-hal/src/auxil/dxgi/time.rs index 08bc3cee03..1b312fd651 100644 --- a/wgpu-hal/src/auxil/dxgi/time.rs +++ b/wgpu-hal/src/auxil/dxgi/time.rs @@ -62,7 +62,7 @@ impl PresentationTimer { let kernelbase = libloading::os::windows::Library::open_already_loaded("kernelbase.dll").unwrap(); // No concerns about lifetimes here as kernelbase is always there. - let ptr = unsafe { kernelbase.get(b"QueryInterruptTimePrecise").unwrap() }; + let ptr = unsafe { kernelbase.get(b"QueryInterruptTimePrecise\0").unwrap() }; Self::IPresentationManager { fnQueryInterruptTimePrecise: *ptr, } diff --git a/wgpu-hal/src/dx12/adapter.rs b/wgpu-hal/src/dx12/adapter.rs index eb4102dbd2..4b510290d9 100644 --- a/wgpu-hal/src/dx12/adapter.rs +++ b/wgpu-hal/src/dx12/adapter.rs @@ -1,4 +1,9 @@ -use std::{mem, ptr, sync::Arc, thread}; +use std::{ + mem::{size_of, size_of_val}, + ptr, + sync::Arc, + thread, +}; use parking_lot::Mutex; use windows::{ @@ -59,19 +64,9 @@ impl super::Adapter { // Create the device so that we can get the capabilities. let device = { profiling::scope!("ID3D12Device::create_device"); - match library.create_device(&adapter, Direct3D::D3D_FEATURE_LEVEL_11_0) { - Ok(pair) => match pair { - Ok(device) => device, - Err(err) => { - log::warn!("Device creation failed: {}", err); - return None; - } - }, - Err(err) => { - log::warn!("Device creation function is not found: {:?}", err); - return None; - } - } + library + .create_device(&adapter, Direct3D::D3D_FEATURE_LEVEL_11_0) + .ok()?? }; profiling::scope!("feature queries"); @@ -92,7 +87,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_FEATURE_LEVELS, <*mut _>::cast(&mut device_levels), - mem::size_of_val(&device_levels) as u32, + size_of_val(&device_levels) as u32, ) } .unwrap(); @@ -100,7 +95,7 @@ impl super::Adapter { // We have found a possible adapter. // Acquire the device information. - let desc = unsafe { adapter.unwrap_adapter2().GetDesc2() }.unwrap(); + let desc = unsafe { adapter.GetDesc2() }.unwrap(); let device_name = auxil::dxgi::conv::map_adapter_name(desc.Description); @@ -110,7 +105,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_ARCHITECTURE, <*mut _>::cast(&mut features_architecture), - mem::size_of_val(&features_architecture) as u32, + size_of_val(&features_architecture) as u32, ) } .unwrap(); @@ -154,7 +149,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS, <*mut _>::cast(&mut options), - mem::size_of_val(&options) as u32, + size_of_val(&options) as u32, ) } .unwrap(); @@ -165,7 +160,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS2, <*mut _>::cast(&mut features2), - mem::size_of_val(&features2) as u32, + size_of_val(&features2) as u32, ) } .is_ok() @@ -178,7 +173,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS3, <*mut _>::cast(&mut features3), - mem::size_of_val(&features3) as u32, + size_of_val(&features3) as u32, ) } .is_ok() @@ -194,7 +189,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS7, <*mut _>::cast(&mut features7), - mem::size_of_val(&features7) as u32, + size_of_val(&features7) as u32, ) } .is_ok() @@ -224,7 +219,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_SHADER_MODEL, <*mut _>::cast(&mut sm), - mem::size_of_val(&sm) as u32, + size_of_val(&sm) as u32, ) } .is_ok() @@ -354,7 +349,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_FORMAT_SUPPORT, <*mut _>::cast(&mut bgra8unorm_info), - mem::size_of_val(&bgra8unorm_info) as u32, + size_of_val(&bgra8unorm_info) as u32, ) }; hr.is_ok() @@ -372,7 +367,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS1, <*mut _>::cast(&mut features1), - mem::size_of_val(&features1) as u32, + size_of_val(&features1) as u32, ) }; @@ -396,7 +391,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS9, <*mut _>::cast(&mut features9), - mem::size_of_val(&features9) as u32, + size_of_val(&features9) as u32, ) } .is_ok() @@ -526,6 +521,9 @@ impl super::Adapter { Direct3D12::D3D12_TEXTURE_DATA_PITCH_ALIGNMENT as u64, ) .unwrap(), + // Direct3D correctly bounds-checks all array accesses: + // https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#18.6.8.2%20Device%20Memory%20Reads + uniform_bounds_check_alignment: wgt::BufferSize::new(1).unwrap(), }, downlevel, }, @@ -606,7 +604,7 @@ impl crate::Adapter for super::Adapter { self.device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_FORMAT_SUPPORT, <*mut _>::cast(&mut data), - mem::size_of_val(&data) as u32, + size_of_val(&data) as u32, ) } .unwrap(); @@ -624,7 +622,7 @@ impl crate::Adapter for super::Adapter { self.device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_FORMAT_SUPPORT, ptr::addr_of_mut!(data_srv_uav).cast(), - mem::size_of::() as u32, + size_of::() as u32, ) } .unwrap(); @@ -720,7 +718,7 @@ impl crate::Adapter for super::Adapter { self.device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_MULTISAMPLE_QUALITY_LEVELS, <*mut _>::cast(&mut ms_levels), - mem::size_of_val(&ms_levels) as u32, + size_of_val(&ms_levels) as u32, ) } .is_ok() diff --git a/wgpu-hal/src/dx12/command.rs b/wgpu-hal/src/dx12/command.rs index 5f32480fdb..00f56cdb5f 100644 --- a/wgpu-hal/src/dx12/command.rs +++ b/wgpu-hal/src/dx12/command.rs @@ -53,7 +53,6 @@ impl crate::BufferTextureCopy { impl super::Temp { fn prepare_marker(&mut self, marker: &str) -> (&[u16], u32) { - // TODO: Store in HSTRING self.marker.clear(); self.marker.extend(marker.encode_utf16()); self.marker.push(0); @@ -153,7 +152,7 @@ impl super::CommandEncoder { self.update_root_elements(); } - //Note: we have to call this lazily before draw calls. Otherwise, D3D complains + // Note: we have to call this lazily before draw calls. Otherwise, D3D complains // about the root parameters being incompatible with root signature. fn update_root_elements(&mut self) { use super::{BufferViewKind as Bvk, PassKind as Pk}; @@ -265,7 +264,8 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn begin_encoding(&mut self, label: crate::Label) -> Result<(), crate::DeviceError> { let list = loop { if let Some(list) = self.free_lists.pop() { - let reset_result = unsafe { list.Reset(&self.allocator, None) }.into_result(); + // TODO: Is an error expected here and should we print it? + let reset_result = unsafe { list.Reset(&self.allocator, None) }; if reset_result.is_ok() { break Some(list); } @@ -314,7 +314,9 @@ impl crate::CommandEncoder for super::CommandEncoder { for cmd_buf in command_buffers { self.free_lists.push(cmd_buf.raw); } - let _todo_handle_error = unsafe { self.allocator.Reset() }; + if let Err(e) = unsafe { self.allocator.Reset() } { + log::error!("ID3D12CommandAllocator::Reset() failed with {e}"); + } } unsafe fn transition_buffers<'a, T>(&mut self, barriers: T) @@ -724,8 +726,7 @@ impl crate::CommandEncoder for super::CommandEncoder { cat.clear_value.b as f32, cat.clear_value.a as f32, ]; - // TODO: Empty slice vs None? - unsafe { list.ClearRenderTargetView(*rtv, &value, Some(&[])) }; + unsafe { list.ClearRenderTargetView(*rtv, &value, None) }; } if let Some(ref target) = cat.resolve_target { self.pass.resolves.push(super::PassResolve { @@ -754,12 +755,23 @@ impl crate::CommandEncoder for super::CommandEncoder { if let Some(ds_view) = ds_view { if flags != Direct3D12::D3D12_CLEAR_FLAGS::default() { unsafe { - list.ClearDepthStencilView( + // list.ClearDepthStencilView( + // ds_view, + // flags, + // ds.clear_value.0, + // ds.clear_value.1 as u8, + // None, + // ) + // TODO: Replace with the above in the next breaking windows-rs release, + // when https://github.com/microsoft/win32metadata/pull/1971 is in. + (windows_core::Interface::vtable(list).ClearDepthStencilView)( + windows_core::Interface::as_raw(list), ds_view, flags, ds.clear_value.0, ds.clear_value.1 as u8, - &[], + 0, + std::ptr::null(), ) } } @@ -796,7 +808,7 @@ impl crate::CommandEncoder for super::CommandEncoder { Type: Direct3D12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: Direct3D12::D3D12_RESOURCE_BARRIER_FLAG_NONE, Anonymous: Direct3D12::D3D12_RESOURCE_BARRIER_0 { - //Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. + // Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. // If it's not the case, we can include the `TextureUses` in `PassResove`. Transition: mem::ManuallyDrop::new( Direct3D12::D3D12_RESOURCE_TRANSITION_BARRIER { @@ -813,7 +825,7 @@ impl crate::CommandEncoder for super::CommandEncoder { Type: Direct3D12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: Direct3D12::D3D12_RESOURCE_BARRIER_FLAG_NONE, Anonymous: Direct3D12::D3D12_RESOURCE_BARRIER_0 { - //Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. + // Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. // If it's not the case, we can include the `TextureUses` in `PassResolve`. Transition: mem::ManuallyDrop::new( Direct3D12::D3D12_RESOURCE_TRANSITION_BARRIER { diff --git a/wgpu-hal/src/dx12/descriptor.rs b/wgpu-hal/src/dx12/descriptor.rs index ebb42ddcd1..f3b7f26f25 100644 --- a/wgpu-hal/src/dx12/descriptor.rs +++ b/wgpu-hal/src/dx12/descriptor.rs @@ -56,16 +56,18 @@ impl GeneralHeap { .into_device_result("Descriptor heap creation")? }; + let start = DualHandle { + cpu: unsafe { raw.GetCPUDescriptorHandleForHeapStart() }, + gpu: unsafe { raw.GetGPUDescriptorHandleForHeapStart() }, + count: 0, + }; + Ok(Self { - raw: raw.clone(), + raw, ty, handle_size: unsafe { device.GetDescriptorHandleIncrementSize(ty) } as u64, total_handles, - start: DualHandle { - cpu: unsafe { raw.GetCPUDescriptorHandleForHeapStart() }, - gpu: unsafe { raw.GetGPUDescriptorHandleForHeapStart() }, - count: 0, - }, + start, ranges: Mutex::new(RangeAllocator::new(0..total_handles)), }) } @@ -268,12 +270,14 @@ impl CpuHeap { let raw = unsafe { device.CreateDescriptorHeap::(&desc) } .into_device_result("CPU descriptor heap creation")?; + let start = unsafe { raw.GetCPUDescriptorHandleForHeapStart() }; + Ok(Self { inner: Mutex::new(CpuHeapInner { - _raw: raw.clone(), + _raw: raw, stage: Vec::new(), }), - start: unsafe { raw.GetCPUDescriptorHandleForHeapStart() }, + start, handle_size, total, }) @@ -297,7 +301,7 @@ impl fmt::Debug for CpuHeap { } pub(super) unsafe fn upload( - device: Direct3D12::ID3D12Device, + device: &Direct3D12::ID3D12Device, src: &CpuHeapInner, dst: &GeneralHeap, dummy_copy_counts: &[u32], diff --git a/wgpu-hal/src/dx12/device.rs b/wgpu-hal/src/dx12/device.rs index dd68160315..43425bb5f1 100644 --- a/wgpu-hal/src/dx12/device.rs +++ b/wgpu-hal/src/dx12/device.rs @@ -1,5 +1,6 @@ use std::{ - ffi, mem, + ffi, + mem::{self, size_of}, num::NonZeroU32, ptr, sync::Arc, @@ -84,7 +85,7 @@ impl super::Device { } .into_device_result("Zero buffer creation")?; - let zero_buffer = zero_buffer.ok_or(crate::DeviceError::ResourceCreationFailed)?; + let zero_buffer = zero_buffer.ok_or(crate::DeviceError::Unexpected)?; // Note: without `D3D12_HEAP_FLAG_CREATE_NOT_ZEROED` // this resource is zeroed by default. @@ -113,7 +114,7 @@ impl super::Device { ) } .into_device_result("Command signature creation")?; - signature.ok_or(crate::DeviceError::ResourceCreationFailed) + signature.ok_or(crate::DeviceError::Unexpected) } let shared = super::DeviceShared { @@ -121,7 +122,7 @@ impl super::Device { cmd_signatures: super::CommandSignatures { draw: create_command_signature( &raw, - mem::size_of::(), + size_of::(), &[Direct3D12::D3D12_INDIRECT_ARGUMENT_DESC { Type: Direct3D12::D3D12_INDIRECT_ARGUMENT_TYPE_DRAW, ..Default::default() @@ -130,7 +131,7 @@ impl super::Device { )?, draw_indexed: create_command_signature( &raw, - mem::size_of::(), + size_of::(), &[Direct3D12::D3D12_INDIRECT_ARGUMENT_DESC { Type: Direct3D12::D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED, ..Default::default() @@ -139,7 +140,7 @@ impl super::Device { )?, dispatch: create_command_signature( &raw, - mem::size_of::(), + size_of::(), &[Direct3D12::D3D12_INDIRECT_ARGUMENT_DESC { Type: Direct3D12::D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH, ..Default::default() @@ -287,7 +288,7 @@ impl super::Device { }; let full_stage = format!( - "{}_{}\0", + "{}_{}", naga_stage.to_hlsl_str(), naga_options.shader_model.to_str() ); @@ -305,28 +306,33 @@ impl super::Device { let source_name = stage.module.raw_name.as_deref(); // Compile with DXC if available, otherwise fall back to FXC - let (result, log_level) = if let Some(ref dxc_container) = self.dxc_container { + let result = if let Some(ref dxc_container) = self.dxc_container { shader_compilation::compile_dxc( self, &source, source_name, raw_ep, stage_bit, - full_stage, + &full_stage, dxc_container, ) } else { - let full_stage = ffi::CStr::from_bytes_with_nul(full_stage.as_bytes()).unwrap(); shader_compilation::compile_fxc( self, &source, source_name, - &ffi::CString::new(raw_ep.as_str()).unwrap(), + raw_ep, stage_bit, - full_stage, + &full_stage, ) }; + let log_level = if result.is_ok() { + log::Level::Info + } else { + log::Level::Error + }; + log::log!( log_level, "Naga generated shader for {:?} at {:?}:\n{}", @@ -387,7 +393,6 @@ impl crate::Device for super::Device { &self, desc: &crate::BufferDescriptor, ) -> Result { - let mut resource = None; let mut size = desc.size; if desc.usage.contains(crate::BufferUses::UNIFORM) { let align_mask = Direct3D12::D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT as u64 - 1; @@ -410,10 +415,8 @@ impl crate::Device for super::Device { Flags: conv::map_buffer_usage_to_resource_flags(desc.usage), }; - let allocation = - super::suballocation::create_buffer_resource(self, desc, raw_desc, &mut resource)?; - - let resource = resource.ok_or(crate::DeviceError::ResourceCreationFailed)?; + let (resource, allocation) = + super::suballocation::create_buffer_resource(self, desc, raw_desc)?; if let Some(label) = desc.label { unsafe { resource.SetName(&windows::core::HSTRING::from(label)) } @@ -441,6 +444,10 @@ impl crate::Device for super::Device { self.counters.buffers.sub(1); } + unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) { + self.counters.buffers.add(1); + } + unsafe fn map_buffer( &self, buffer: &super::Buffer, @@ -470,10 +477,6 @@ impl crate::Device for super::Device { &self, desc: &crate::TextureDescriptor, ) -> Result { - use super::suballocation::create_texture_resource; - - let mut resource = None; - let raw_desc = Direct3D12::D3D12_RESOURCE_DESC { Dimension: conv::map_texture_dimension(desc.dimension), Alignment: 0, @@ -495,9 +498,9 @@ impl crate::Device for super::Device { Flags: conv::map_texture_usage_to_resource_flags(desc.usage), }; - let allocation = create_texture_resource(self, desc, raw_desc, &mut resource)?; + let (resource, allocation) = + super::suballocation::create_texture_resource(self, desc, raw_desc)?; - let resource = resource.ok_or(crate::DeviceError::ResourceCreationFailed)?; if let Some(label) = desc.label { unsafe { resource.SetName(&windows::core::HSTRING::from(label)) } .into_device_result("SetName")?; @@ -532,6 +535,10 @@ impl crate::Device for super::Device { self.counters.textures.sub(1); } + unsafe fn add_raw_texture(&self, _texture: &super::Texture) { + self.counters.textures.add(1); + } + unsafe fn create_texture_view( &self, texture: &super::Texture, @@ -1295,7 +1302,7 @@ impl crate::Device for super::Device { Some(inner) => { let dual = unsafe { descriptor::upload( - self.raw.clone(), + &self.raw, &inner, &self.shared.heap_views, &desc.layout.copy_counts, @@ -1309,7 +1316,7 @@ impl crate::Device for super::Device { Some(inner) => { let dual = unsafe { descriptor::upload( - self.raw.clone(), + &self.raw, &inner, &self.shared.heap_samplers, &desc.layout.copy_counts, @@ -1643,7 +1650,7 @@ impl crate::Device for super::Device { } .into_device_result("Query heap creation")?; - let raw = raw.ok_or(crate::DeviceError::ResourceCreationFailed)?; + let raw = raw.ok_or(crate::DeviceError::Unexpected)?; if let Some(label) = desc.label { unsafe { raw.SetName(&windows::core::HSTRING::from(label)) } diff --git a/wgpu-hal/src/dx12/instance.rs b/wgpu-hal/src/dx12/instance.rs index 0365616195..31d0511d39 100644 --- a/wgpu-hal/src/dx12/instance.rs +++ b/wgpu-hal/src/dx12/instance.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{mem::size_of_val, sync::Arc}; use parking_lot::RwLock; use windows::{ @@ -10,10 +10,7 @@ use windows::{ }; use super::SurfaceTarget; -use crate::{ - auxil::{self, dxgi::result::HResult as _}, - dx12::D3D12Lib, -}; +use crate::{auxil, dx12::D3D12Lib}; impl Drop for super::Instance { fn drop(&mut self) { @@ -37,55 +34,28 @@ impl crate::Instance for super::Instance { .intersects(wgt::InstanceFlags::VALIDATION | wgt::InstanceFlags::GPU_BASED_VALIDATION) { // Enable debug layer - match lib_main.debug_interface() { - Ok(pair) => match pair { - Ok(debug_controller) => { - if desc.flags.intersects(wgt::InstanceFlags::VALIDATION) { - unsafe { debug_controller.EnableDebugLayer() } - } - if desc - .flags - .intersects(wgt::InstanceFlags::GPU_BASED_VALIDATION) - { - #[allow(clippy::collapsible_if)] - if let Ok(debug1) = debug_controller.cast::() - { - unsafe { debug1.SetEnableGPUBasedValidation(true) } - } else { - log::warn!("Failed to enable GPU-based validation"); - } - } - } - Err(err) => { - log::warn!("Unable to enable D3D12 debug interface: {}", err); + if let Ok(Some(debug_controller)) = lib_main.debug_interface() { + if desc.flags.intersects(wgt::InstanceFlags::VALIDATION) { + unsafe { debug_controller.EnableDebugLayer() } + } + if desc + .flags + .intersects(wgt::InstanceFlags::GPU_BASED_VALIDATION) + { + #[allow(clippy::collapsible_if)] + if let Ok(debug1) = debug_controller.cast::() { + unsafe { debug1.SetEnableGPUBasedValidation(true) } + } else { + log::warn!("Failed to enable GPU-based validation"); } - }, - Err(err) => { - log::warn!("Debug interface function for D3D12 not found: {:?}", err); } } } - // Create DXGIFactory4 - let (lib_dxgi, factory) = auxil::dxgi::factory::create_factory( - auxil::dxgi::factory::DxgiFactoryType::Factory4, - desc.flags, - )?; + let (lib_dxgi, factory) = auxil::dxgi::factory::create_factory(desc.flags)?; // Create IDXGIFactoryMedia - let factory_media = match lib_dxgi.create_factory_media() { - Ok(pair) => match pair { - Ok(factory_media) => Some(factory_media), - Err(err) => { - log::error!("Failed to create IDXGIFactoryMedia: {}", err); - None - } - }, - Err(err) => { - log::warn!("IDXGIFactory1 creation function not found: {:?}", err); - None - } - }; + let factory_media = lib_dxgi.create_factory_media().ok(); let mut supports_allow_tearing = false; if let Some(factory5) = factory.as_factory5() { @@ -94,12 +64,12 @@ impl crate::Instance for super::Instance { factory5.CheckFeatureSupport( Dxgi::DXGI_FEATURE_PRESENT_ALLOW_TEARING, <*mut _>::cast(&mut allow_tearing), - std::mem::size_of_val(&allow_tearing) as u32, + size_of_val(&allow_tearing) as u32, ) }; - match hr.into_result() { - Err(err) => log::warn!("Unable to check for tearing support: {}", err), + match hr { + Err(err) => log::warn!("Unable to check for tearing support: {err}"), Ok(()) => supports_allow_tearing = true, } } diff --git a/wgpu-hal/src/dx12/mod.rs b/wgpu-hal/src/dx12/mod.rs index e4b9e74637..0efb418135 100644 --- a/wgpu-hal/src/dx12/mod.rs +++ b/wgpu-hal/src/dx12/mod.rs @@ -49,14 +49,13 @@ use std::{ffi, fmt, mem, num::NonZeroU32, ops::Deref, sync::Arc}; use arrayvec::ArrayVec; use parking_lot::{Mutex, RwLock}; use windows::{ - core::{Interface, Param as _}, + core::{Free, Interface}, Win32::{ Foundation, Graphics::{Direct3D, Direct3D12, DirectComposition, Dxgi}, System::Threading, }, }; -use windows_core::Free; use crate::auxil::{ self, @@ -66,21 +65,50 @@ use crate::auxil::{ }, }; +#[derive(Debug)] +struct DynLib { + inner: libloading::Library, +} + +impl DynLib { + unsafe fn new

(filename: P) -> Result + where + P: AsRef, + { + unsafe { libloading::Library::new(filename) }.map(|inner| Self { inner }) + } + + unsafe fn get( + &self, + symbol: &[u8], + ) -> Result, crate::DeviceError> { + unsafe { self.inner.get(symbol) }.map_err(|e| match e { + libloading::Error::GetProcAddress { .. } | libloading::Error::GetProcAddressUnknown => { + crate::DeviceError::Unexpected + } + libloading::Error::IncompatibleSize + | libloading::Error::CreateCString { .. } + | libloading::Error::CreateCStringWithTrailing { .. } => crate::hal_internal_error(e), + _ => crate::DeviceError::Unexpected, // could be unreachable!() but we prefer to be more robust + }) + } +} + #[derive(Debug)] struct D3D12Lib { - lib: libloading::Library, + lib: DynLib, } impl D3D12Lib { fn new() -> Result { - unsafe { libloading::Library::new("d3d12.dll").map(|lib| D3D12Lib { lib }) } + unsafe { DynLib::new("d3d12.dll").map(|lib| Self { lib }) } } fn create_device( &self, adapter: &DxgiAdapter, feature_level: Direct3D::D3D_FEATURE_LEVEL, - ) -> Result, libloading::Error> { + ) -> Result, crate::DeviceError> { // Calls windows::Win32::Graphics::Direct3D12::D3D12CreateDevice on d3d12.dll type Fun = extern "system" fn( padapter: *mut core::ffi::c_void, @@ -88,17 +116,30 @@ impl D3D12Lib { riid: *const windows_core::GUID, ppdevice: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12CreateDevice") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12CreateDevice\0") }?; - let mut result__ = None; - Ok((func)( - unsafe { adapter.param().abi() }, + let mut result__: Option = None; + + let res = (func)( + adapter.as_raw(), feature_level, // TODO: Generic? &Direct3D12::ID3D12Device::IID, <*mut _>::cast(&mut result__), ) - .map(|| result__.expect("D3D12CreateDevice succeeded but result is NULL?"))) + .ok(); + + if let Err(ref err) = res { + match err.code() { + Dxgi::DXGI_ERROR_UNSUPPORTED => return Ok(None), + Dxgi::DXGI_ERROR_DRIVER_INTERNAL_ERROR => return Err(crate::DeviceError::Lost), + _ => {} + } + } + + res.into_device_result("Device creation")?; + + result__.ok_or(crate::DeviceError::Unexpected).map(Some) } fn serialize_root_signature( @@ -115,11 +156,8 @@ impl D3D12Lib { ppblob: *mut *mut core::ffi::c_void, pperrorblob: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12SerializeRootSignature") } - .map_err(|e| { - log::error!("Unable to find serialization function: {:?}", e); - crate::DeviceError::Lost - })?; + let func: libloading::Symbol = + unsafe { self.lib.get(b"D3D12SerializeRootSignature\0") }?; let desc = Direct3D12::D3D12_ROOT_SIGNATURE_DESC { NumParameters: parameters.len() as _, @@ -138,8 +176,6 @@ impl D3D12Lib { <*mut _>::cast(&mut error), ) .ok() - // TODO: If there's a HRESULT, error may still be non-null and - // contain info. .into_device_result("Root signature serialization")?; if let Some(error) = error { @@ -148,104 +184,116 @@ impl D3D12Lib { "Root signature serialization error: {:?}", unsafe { error.as_c_str() }.unwrap().to_str().unwrap() ); - return Err(crate::DeviceError::Lost); + return Err(crate::DeviceError::Unexpected); // could be hal_usage_error or hal_internal_error } - Ok(D3DBlob(blob.expect( - "D3D12SerializeRootSignature succeeded but result is NULL?", - ))) + blob.ok_or(crate::DeviceError::Unexpected) } - fn debug_interface( - &self, - ) -> Result, libloading::Error> { + fn debug_interface(&self) -> Result, crate::DeviceError> { // Calls windows::Win32::Graphics::Direct3D12::D3D12GetDebugInterface on d3d12.dll type Fun = extern "system" fn( riid: *const windows_core::GUID, ppvdebug: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12GetDebugInterface") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12GetDebugInterface\0") }?; + + let mut result__ = None; + + let res = (func)(&Direct3D12::ID3D12Debug::IID, <*mut _>::cast(&mut result__)).ok(); + + if let Err(ref err) = res { + match err.code() { + Dxgi::DXGI_ERROR_SDK_COMPONENT_MISSING => return Ok(None), + _ => {} + } + } - let mut result__ = core::ptr::null_mut(); - Ok((func)(&Direct3D12::ID3D12Debug::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) })) + res.into_device_result("GetDebugInterface")?; + + result__.ok_or(crate::DeviceError::Unexpected).map(Some) } } #[derive(Debug)] pub(super) struct DxgiLib { - lib: libloading::Library, + lib: DynLib, } impl DxgiLib { pub fn new() -> Result { - unsafe { libloading::Library::new("dxgi.dll").map(|lib| DxgiLib { lib }) } + unsafe { DynLib::new("dxgi.dll").map(|lib| Self { lib }) } } - pub fn debug_interface1( - &self, - ) -> Result, libloading::Error> { + /// Will error with crate::DeviceError::Unexpected if DXGI 1.3 is not available. + pub fn debug_interface1(&self) -> Result, crate::DeviceError> { // Calls windows::Win32::Graphics::Dxgi::DXGIGetDebugInterface1 on dxgi.dll type Fun = extern "system" fn( flags: u32, riid: *const windows_core::GUID, pdebug: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"DXGIGetDebugInterface1") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"DXGIGetDebugInterface1\0") }?; - let mut result__ = core::ptr::null_mut(); - Ok((func)(0, &Dxgi::IDXGIInfoQueue::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) })) - } + let mut result__ = None; - pub fn create_factory1( - &self, - ) -> Result, libloading::Error> { - // Calls windows::Win32::Graphics::Dxgi::CreateDXGIFactory1 on dxgi.dll - type Fun = extern "system" fn( - riid: *const windows_core::GUID, - ppfactory: *mut *mut core::ffi::c_void, - ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory1") }?; + let res = (func)(0, &Dxgi::IDXGIInfoQueue::IID, <*mut _>::cast(&mut result__)).ok(); + + if let Err(ref err) = res { + match err.code() { + Dxgi::DXGI_ERROR_SDK_COMPONENT_MISSING => return Ok(None), + _ => {} + } + } - let mut result__ = core::ptr::null_mut(); - Ok((func)(&Dxgi::IDXGIFactory1::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) })) + res.into_device_result("debug_interface1")?; + + result__.ok_or(crate::DeviceError::Unexpected).map(Some) } - pub fn create_factory2( + /// Will error with crate::DeviceError::Unexpected if DXGI 1.4 is not available. + pub fn create_factory4( &self, factory_flags: Dxgi::DXGI_CREATE_FACTORY_FLAGS, - ) -> Result, libloading::Error> { + ) -> Result { // Calls windows::Win32::Graphics::Dxgi::CreateDXGIFactory2 on dxgi.dll type Fun = extern "system" fn( flags: Dxgi::DXGI_CREATE_FACTORY_FLAGS, riid: *const windows_core::GUID, ppfactory: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory2") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory2\0") }?; + + let mut result__ = None; - let mut result__ = core::ptr::null_mut(); - Ok( - (func)(factory_flags, &Dxgi::IDXGIFactory4::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) }), + (func)( + factory_flags, + &Dxgi::IDXGIFactory4::IID, + <*mut _>::cast(&mut result__), ) + .ok() + .into_device_result("create_factory4")?; + + result__.ok_or(crate::DeviceError::Unexpected) } - pub fn create_factory_media( - &self, - ) -> Result, libloading::Error> { + /// Will error with crate::DeviceError::Unexpected if DXGI 1.3 is not available. + pub fn create_factory_media(&self) -> Result { // Calls windows::Win32::Graphics::Dxgi::CreateDXGIFactory1 on dxgi.dll type Fun = extern "system" fn( riid: *const windows_core::GUID, ppfactory: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory1") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory1\0") }?; + + let mut result__ = None; - let mut result__ = core::ptr::null_mut(); // https://learn.microsoft.com/en-us/windows/win32/api/dxgi1_3/nn-dxgi1_3-idxgifactorymedia - Ok((func)(&Dxgi::IDXGIFactoryMedia::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) })) + (func)(&Dxgi::IDXGIFactoryMedia::IID, <*mut _>::cast(&mut result__)) + .ok() + .into_device_result("create_factory_media")?; + + result__.ok_or(crate::DeviceError::Unexpected) } } @@ -368,7 +416,7 @@ pub struct Instance { } impl Instance { - pub unsafe fn create_surface_from_visual(&self, visual: *mut std::ffi::c_void) -> Surface { + pub unsafe fn create_surface_from_visual(&self, visual: *mut ffi::c_void) -> Surface { let visual = unsafe { DirectComposition::IDCompositionVisual::from_raw_borrowed(&visual) } .expect("COM pointer should not be NULL"); Surface { @@ -382,7 +430,7 @@ impl Instance { pub unsafe fn create_surface_from_surface_handle( &self, - surface_handle: *mut std::ffi::c_void, + surface_handle: *mut ffi::c_void, ) -> Surface { // TODO: We're not given ownership, so we shouldn't call HANDLE::free(). This puts an extra burden on the caller to keep it alive. // https://learn.microsoft.com/en-us/windows/win32/api/handleapi/nf-handleapi-duplicatehandle could help us, even though DirectComposition is not in the list? @@ -399,7 +447,7 @@ impl Instance { pub unsafe fn create_surface_from_swap_chain_panel( &self, - swap_chain_panel: *mut std::ffi::c_void, + swap_chain_panel: *mut ffi::c_void, ) -> Surface { let swap_chain_panel = unsafe { types::ISwapChainPanelNative::from_raw_borrowed(&swap_chain_panel) } @@ -622,7 +670,7 @@ struct PassState { #[test] fn test_dirty_mask() { - assert_eq!(MAX_ROOT_ELEMENTS, mem::size_of::() * 8); + assert_eq!(MAX_ROOT_ELEMENTS, u64::BITS as usize); } impl PassState { @@ -897,8 +945,7 @@ pub struct ShaderModule { impl crate::DynShaderModule for ShaderModule {} pub(super) enum CompiledShader { - #[allow(unused)] - Dxc(Vec), + Dxc(Direct3D::Dxc::IDxcBlob), Fxc(Direct3D::ID3DBlob), } @@ -906,8 +953,8 @@ impl CompiledShader { fn create_native_shader(&self) -> Direct3D12::D3D12_SHADER_BYTECODE { match self { CompiledShader::Dxc(shader) => Direct3D12::D3D12_SHADER_BYTECODE { - pShaderBytecode: shader.as_ptr().cast(), - BytecodeLength: shader.len(), + pShaderBytecode: unsafe { shader.GetBufferPointer() }, + BytecodeLength: unsafe { shader.GetBufferSize() }, }, CompiledShader::Fxc(shader) => Direct3D12::D3D12_SHADER_BYTECODE { pShaderBytecode: unsafe { shader.GetBufferPointer() }, @@ -1026,8 +1073,8 @@ impl crate::Surface for Surface { flags, ) }; - if let Err(err) = result.into_result() { - log::error!("ResizeBuffers failed: {}", err); + if let Err(err) = result { + log::error!("ResizeBuffers failed: {err}"); return Err(crate::SurfaceError::Other("window is in use")); } raw @@ -1053,11 +1100,13 @@ impl crate::Surface for Surface { }; let swap_chain1 = match self.target { SurfaceTarget::Visual(_) | SurfaceTarget::SwapChainPanel(_) => { - profiling::scope!("IDXGIFactory4::CreateSwapChainForComposition"); + profiling::scope!("IDXGIFactory2::CreateSwapChainForComposition"); unsafe { - self.factory - .unwrap_factory2() - .CreateSwapChainForComposition(&device.present_queue, &desc, None) + self.factory.CreateSwapChainForComposition( + &device.present_queue, + &desc, + None, + ) } } SurfaceTarget::SurfaceHandle(handle) => { @@ -1077,9 +1126,9 @@ impl crate::Surface for Surface { } } SurfaceTarget::WndHandle(hwnd) => { - profiling::scope!("IDXGIFactory4::CreateSwapChainForHwnd"); + profiling::scope!("IDXGIFactory2::CreateSwapChainForHwnd"); unsafe { - self.factory.unwrap_factory2().CreateSwapChainForHwnd( + self.factory.CreateSwapChainForHwnd( &device.present_queue, hwnd, &desc, @@ -1092,24 +1141,22 @@ impl crate::Surface for Surface { let swap_chain1 = swap_chain1.map_err(|err| { log::error!("SwapChain creation error: {}", err); - crate::SurfaceError::Other("swap chain creation") + crate::SurfaceError::Other("swapchain creation") })?; match &self.target { SurfaceTarget::WndHandle(_) | SurfaceTarget::SurfaceHandle(_) => {} SurfaceTarget::Visual(visual) => { - if let Err(err) = unsafe { visual.SetContent(&swap_chain1) }.into_result() { - log::error!("Unable to SetContent: {}", err); + if let Err(err) = unsafe { visual.SetContent(&swap_chain1) } { + log::error!("Unable to SetContent: {err}"); return Err(crate::SurfaceError::Other( "IDCompositionVisual::SetContent", )); } } SurfaceTarget::SwapChainPanel(swap_chain_panel) => { - if let Err(err) = - unsafe { swap_chain_panel.SetSwapChain(&swap_chain1) }.into_result() - { - log::error!("Unable to SetSwapChain: {}", err); + if let Err(err) = unsafe { swap_chain_panel.SetSwapChain(&swap_chain1) } { + log::error!("Unable to SetSwapChain: {err}"); return Err(crate::SurfaceError::Other( "ISwapChainPanelNative::SetSwapChain", )); @@ -1117,13 +1164,10 @@ impl crate::Surface for Surface { } } - match swap_chain1.cast::() { - Ok(swap_chain3) => swap_chain3, - Err(err) => { - log::error!("Unable to cast swap chain: {}", err); - return Err(crate::SurfaceError::Other("swap chain cast to 3")); - } - } + swap_chain1.cast::().map_err(|err| { + log::error!("Unable to cast swapchain: {err}"); + crate::SurfaceError::Other("swapchain cast to version 3") + })? } }; diff --git a/wgpu-hal/src/dx12/shader_compilation.rs b/wgpu-hal/src/dx12/shader_compilation.rs index 8385082e35..e3d4d03c6c 100644 --- a/wgpu-hal/src/dx12/shader_compilation.rs +++ b/wgpu-hal/src/dx12/shader_compilation.rs @@ -1,13 +1,11 @@ -use std::ffi::CStr; -use std::ptr; - -pub(super) use dxc::{compile_dxc, get_dxc_container, DxcContainer}; -use windows::Win32::Graphics::Direct3D; - use crate::auxil::dxgi::result::HResult; +use std::ffi::CStr; +use std::path::PathBuf; +use windows::{ + core::{Interface, PCSTR, PCWSTR}, + Win32::Graphics::Direct3D::{Dxc, Fxc}, +}; -// This exists so that users who don't want to use dxc can disable the dxc_shader_compiler feature -// and not have to compile hassle_rs. // Currently this will use Dxc if it is chosen as the dx12 compiler at `Instance` creation time, and will // fallback to FXC if the Dxc libraries (dxil.dll and dxcompiler.dll) are not found, or if Fxc is chosen at' // `Instance` creation time. @@ -16,40 +14,41 @@ pub(super) fn compile_fxc( device: &super::Device, source: &str, source_name: Option<&CStr>, - raw_ep: &CStr, + raw_ep: &str, stage_bit: wgt::ShaderStages, - full_stage: &CStr, -) -> ( - Result, - log::Level, -) { + full_stage: &str, +) -> Result { profiling::scope!("compile_fxc"); let mut shader_data = None; - let mut compile_flags = Direct3D::Fxc::D3DCOMPILE_ENABLE_STRICTNESS; + let mut compile_flags = Fxc::D3DCOMPILE_ENABLE_STRICTNESS; if device .private_caps .instance_flags .contains(wgt::InstanceFlags::DEBUG) { - compile_flags |= - Direct3D::Fxc::D3DCOMPILE_DEBUG | Direct3D::Fxc::D3DCOMPILE_SKIP_OPTIMIZATION; + compile_flags |= Fxc::D3DCOMPILE_DEBUG | Fxc::D3DCOMPILE_SKIP_OPTIMIZATION; } + let raw_ep = std::ffi::CString::new(raw_ep).unwrap(); + let full_stage = std::ffi::CString::new(full_stage).unwrap(); + // If no name has been set, D3DCompile wants the null pointer. - let source_name = source_name.map(|cstr| cstr.as_ptr()).unwrap_or(ptr::null()); + let source_name = source_name + .map(|cstr| cstr.as_ptr().cast()) + .unwrap_or(core::ptr::null()); let mut error = None; let hr = unsafe { - profiling::scope!("Direct3D::Fxc::D3DCompile"); - Direct3D::Fxc::D3DCompile( + profiling::scope!("Fxc::D3DCompile"); + Fxc::D3DCompile( // TODO: Update low-level bindings to accept a slice here source.as_ptr().cast(), source.len(), - windows::core::PCSTR(source_name.cast()), + PCSTR(source_name), None, None, - windows::core::PCSTR(raw_ep.as_ptr().cast()), - windows::core::PCSTR(full_stage.as_ptr().cast()), + PCSTR(raw_ep.as_ptr().cast()), + PCSTR(full_stage.as_ptr().cast()), compile_flags, 0, &mut shader_data, @@ -57,13 +56,10 @@ pub(super) fn compile_fxc( ) }; - match hr.into_result() { + match hr { Ok(()) => { let shader_data = shader_data.unwrap(); - ( - Ok(super::CompiledShader::Fxc(shader_data)), - log::Level::Info, - ) + Ok(super::CompiledShader::Fxc(shader_data)) } Err(e) => { let mut full_msg = format!("FXC D3DCompile error ({e})"); @@ -77,234 +73,237 @@ pub(super) fn compile_fxc( }; let _ = write!(full_msg, ": {}", String::from_utf8_lossy(message)); } - ( - Err(crate::PipelineError::Linkage(stage_bit, full_msg)), - log::Level::Warn, - ) + Err(crate::PipelineError::Linkage(stage_bit, full_msg)) } } } -// The Dxc implementation is behind a feature flag so that users who don't want to use dxc can disable the feature. -#[cfg(feature = "dxc_shader_compiler")] -mod dxc { - use std::ffi::CStr; - use std::path::PathBuf; - - // Destructor order should be fine since _dxil and _dxc don't rely on each other. - pub(crate) struct DxcContainer { - compiler: hassle_rs::DxcCompiler, - library: hassle_rs::DxcLibrary, - validator: hassle_rs::DxcValidator, - // Has to be held onto for the lifetime of the device otherwise shaders will fail to compile. - _dxc: hassle_rs::Dxc, - // Also Has to be held onto for the lifetime of the device otherwise shaders will fail to validate. - _dxil: hassle_rs::Dxil, - } +trait DxcObj: Interface { + const CLSID: windows::core::GUID; +} +impl DxcObj for Dxc::IDxcCompiler3 { + const CLSID: windows::core::GUID = Dxc::CLSID_DxcCompiler; +} +impl DxcObj for Dxc::IDxcUtils { + const CLSID: windows::core::GUID = Dxc::CLSID_DxcUtils; +} +impl DxcObj for Dxc::IDxcValidator { + const CLSID: windows::core::GUID = Dxc::CLSID_DxcValidator; +} - pub(crate) fn get_dxc_container( - dxc_path: Option, - dxil_path: Option, - ) -> Result, crate::DeviceError> { - // Make sure that dxil.dll exists. - let dxil = match hassle_rs::Dxil::new(dxil_path) { - Ok(dxil) => dxil, - Err(e) => { - log::warn!("Failed to load dxil.dll. Defaulting to FXC instead: {}", e); - return Ok(None); - } - }; +#[derive(Debug)] +struct DxcLib { + lib: crate::dx12::DynLib, +} - // Needed for explicit validation. - let validator = dxil.create_validator()?; - - let dxc = match hassle_rs::Dxc::new(dxc_path) { - Ok(dxc) => dxc, - Err(e) => { - log::warn!( - "Failed to load dxcompiler.dll. Defaulting to FXC instead: {}", - e - ); - return Ok(None); +impl DxcLib { + fn new(lib_path: Option, lib_name: &'static str) -> Result { + let lib_path = if let Some(lib_path) = lib_path { + if lib_path.is_file() { + lib_path + } else { + lib_path.join(lib_name) } + } else { + PathBuf::from(lib_name) }; - let compiler = dxc.create_compiler()?; - let library = dxc.create_library()?; - - Ok(Some(DxcContainer { - _dxc: dxc, - compiler, - library, - _dxil: dxil, - validator, - })) + unsafe { crate::dx12::DynLib::new(lib_path).map(|lib| Self { lib }) } } - pub(crate) fn compile_dxc( - device: &crate::dx12::Device, - source: &str, - source_name: Option<&CStr>, - raw_ep: &str, - stage_bit: wgt::ShaderStages, - full_stage: String, - dxc_container: &DxcContainer, - ) -> ( - Result, - log::Level, - ) { - profiling::scope!("compile_dxc"); - let mut compile_flags = arrayvec::ArrayVec::<&str, 6>::new_const(); - compile_flags.push("-Ges"); // Direct3D::Fxc::D3DCOMPILE_ENABLE_STRICTNESS - compile_flags.push("-Vd"); // Disable implicit validation to work around bugs when dxil.dll isn't in the local directory. - compile_flags.push("-HV"); // Use HLSL 2018, Naga doesn't supported 2021 yet. - compile_flags.push("2018"); - - if device - .private_caps - .instance_flags - .contains(wgt::InstanceFlags::DEBUG) - { - compile_flags.push("-Zi"); // Direct3D::Fxc::D3DCOMPILE_SKIP_OPTIMIZATION - compile_flags.push("-Od"); // Direct3D::Fxc::D3DCOMPILE_DEBUG + pub fn create_instance(&self) -> Result { + type Fun = extern "system" fn( + rclsid: *const windows_core::GUID, + riid: *const windows_core::GUID, + ppv: *mut *mut core::ffi::c_void, + ) -> windows_core::HRESULT; + let func: libloading::Symbol = unsafe { self.lib.get(b"DxcCreateInstance\0") }?; + + let mut result__ = None; + (func)(&T::CLSID, &T::IID, <*mut _>::cast(&mut result__)) + .ok() + .into_device_result("DxcCreateInstance")?; + result__.ok_or(crate::DeviceError::Unexpected) + } +} + +// Destructor order should be fine since _dxil and _dxc don't rely on each other. +pub(super) struct DxcContainer { + compiler: Dxc::IDxcCompiler3, + utils: Dxc::IDxcUtils, + validator: Dxc::IDxcValidator, + // Has to be held onto for the lifetime of the device otherwise shaders will fail to compile. + _dxc: DxcLib, + // Also Has to be held onto for the lifetime of the device otherwise shaders will fail to validate. + _dxil: DxcLib, +} + +pub(super) fn get_dxc_container( + dxc_path: Option, + dxil_path: Option, +) -> Result, crate::DeviceError> { + let dxc = match DxcLib::new(dxc_path, "dxcompiler.dll") { + Ok(dxc) => dxc, + Err(e) => { + log::warn!( + "Failed to load dxcompiler.dll. Defaulting to FXC instead: {}", + e + ); + return Ok(None); } + }; - let blob = match dxc_container - .library - .create_blob_with_encoding_from_str(source) - .map_err(|e| crate::PipelineError::Linkage(stage_bit, format!("DXC blob error: {e}"))) - { - Ok(blob) => blob, - Err(e) => return (Err(e), log::Level::Error), - }; + let dxil = match DxcLib::new(dxil_path, "dxil.dll") { + Ok(dxil) => dxil, + Err(e) => { + log::warn!("Failed to load dxil.dll. Defaulting to FXC instead: {}", e); + return Ok(None); + } + }; - let source_name = source_name - .and_then(|cstr| cstr.to_str().ok()) - .unwrap_or(""); + let compiler = dxc.create_instance::()?; + let utils = dxc.create_instance::()?; + let validator = dxil.create_instance::()?; - let compiled = dxc_container.compiler.compile( - &blob, - source_name, - raw_ep, - &full_stage, - &compile_flags, - None, - &[], - ); - - let (result, log_level) = match compiled { - Ok(dxc_result) => match dxc_result.get_result() { - Ok(dxc_blob) => { - // Validate the shader. - match dxc_container.validator.validate(dxc_blob) { - Ok(validated_blob) => ( - Ok(crate::dx12::CompiledShader::Dxc(validated_blob.to_vec())), - log::Level::Info, - ), - Err(e) => ( - Err(crate::PipelineError::Linkage( - stage_bit, - format!( - "DXC validation error: {:?}\n{:?}", - get_error_string_from_dxc_result(&dxc_container.library, &e.0) - .unwrap_or_default(), - e.1 - ), - )), - log::Level::Error, - ), - } - } - Err(e) => ( - Err(crate::PipelineError::Linkage( - stage_bit, - format!("DXC compile error: {e}"), - )), - log::Level::Error, - ), - }, - Err(e) => ( - Err(crate::PipelineError::Linkage( - stage_bit, - format!( - "DXC compile error: {}", - get_error_string_from_dxc_result(&dxc_container.library, &e.0) - .unwrap_or_default() - ), - )), - log::Level::Error, - ), - }; + Ok(Some(DxcContainer { + compiler, + utils, + validator, + _dxc: dxc, + _dxil: dxil, + })) +} - (result, log_level) - } +/// Owned PCWSTR +#[allow(clippy::upper_case_acronyms)] +struct OPCWSTR { + inner: Vec, +} - impl From for crate::DeviceError { - fn from(value: hassle_rs::HassleError) -> Self { - match value { - hassle_rs::HassleError::Win32Error(e) => { - // TODO: This returns an HRESULT, should we try and use the associated Windows error message? - log::error!("Win32 error: {e:?}"); - crate::DeviceError::Lost - } - hassle_rs::HassleError::LoadLibraryError { filename, inner } => { - log::error!("Failed to load dxc library {filename:?}. Inner error: {inner:?}"); - crate::DeviceError::Lost - } - hassle_rs::HassleError::LibLoadingError(e) => { - log::error!("Failed to load dxc library. {e:?}"); - crate::DeviceError::Lost - } - hassle_rs::HassleError::WindowsOnly(e) => { - log::error!("Signing with dxil.dll is only supported on Windows. {e:?}"); - crate::DeviceError::Lost - } - // `ValidationError` and `CompileError` should never happen in a context involving `DeviceError` - hassle_rs::HassleError::ValidationError(_e) => unimplemented!(), - hassle_rs::HassleError::CompileError(_e) => unimplemented!(), - } - } +impl OPCWSTR { + fn new(s: &str) -> Self { + let mut inner: Vec<_> = s.encode_utf16().collect(); + inner.push(0); + Self { inner } } - fn get_error_string_from_dxc_result( - library: &hassle_rs::DxcLibrary, - error: &hassle_rs::DxcOperationResult, - ) -> Result { - error - .get_error_buffer() - .and_then(|error| library.get_blob_as_string(&hassle_rs::DxcBlob::from(error))) + fn ptr(&self) -> PCWSTR { + PCWSTR(self.inner.as_ptr()) } } -// These are stubs for when the `dxc_shader_compiler` feature is disabled. -#[cfg(not(feature = "dxc_shader_compiler"))] -mod dxc { - use std::ffi::CStr; - use std::path::PathBuf; - - pub(crate) struct DxcContainer {} - - pub(crate) fn get_dxc_container( - _dxc_path: Option, - _dxil_path: Option, - ) -> Result, crate::DeviceError> { - // Falls back to Fxc and logs an error. - log::error!("DXC shader compiler was requested on Instance creation, but the DXC feature is disabled. Enable the `dxc_shader_compiler` feature on wgpu_hal to use DXC."); - Ok(None) +fn get_output( + res: &Dxc::IDxcResult, + kind: Dxc::DXC_OUT_KIND, +) -> Result { + let mut result__: Option = None; + unsafe { res.GetOutput::(kind, &mut None, <*mut _>::cast(&mut result__)) } + .into_device_result("GetOutput")?; + result__.ok_or(crate::DeviceError::Unexpected) +} + +fn as_err_str(blob: &Dxc::IDxcBlobUtf8) -> Result<&str, crate::DeviceError> { + let ptr = unsafe { blob.GetStringPointer() }; + let len = unsafe { blob.GetStringLength() }; + core::str::from_utf8(unsafe { core::slice::from_raw_parts(ptr.0, len) }) + .map_err(|_| crate::DeviceError::Unexpected) +} + +pub(super) fn compile_dxc( + device: &crate::dx12::Device, + source: &str, + source_name: Option<&CStr>, + raw_ep: &str, + stage_bit: wgt::ShaderStages, + full_stage: &str, + dxc_container: &DxcContainer, +) -> Result { + profiling::scope!("compile_dxc"); + + let source_name = source_name.and_then(|cstr| cstr.to_str().ok()); + + let source_name = source_name.map(OPCWSTR::new); + let raw_ep = OPCWSTR::new(raw_ep); + let full_stage = OPCWSTR::new(full_stage); + + let mut compile_args = arrayvec::ArrayVec::::new_const(); + + if let Some(source_name) = source_name.as_ref() { + compile_args.push(source_name.ptr()) + } + + compile_args.extend([ + windows::core::w!("-E"), + raw_ep.ptr(), + windows::core::w!("-T"), + full_stage.ptr(), + windows::core::w!("-HV"), + windows::core::w!("2018"), // Use HLSL 2018, Naga doesn't supported 2021 yet. + windows::core::w!("-no-warnings"), + Dxc::DXC_ARG_ENABLE_STRICTNESS, + Dxc::DXC_ARG_SKIP_VALIDATION, // Disable implicit validation to work around bugs when dxil.dll isn't in the local directory. + ]); + + if device + .private_caps + .instance_flags + .contains(wgt::InstanceFlags::DEBUG) + { + compile_args.push(Dxc::DXC_ARG_DEBUG); + compile_args.push(Dxc::DXC_ARG_SKIP_OPTIMIZATIONS); + } + + let buffer = Dxc::DxcBuffer { + Ptr: source.as_ptr().cast(), + Size: source.len(), + Encoding: Dxc::DXC_CP_UTF8.0, + }; + + let compile_res: Dxc::IDxcResult = unsafe { + dxc_container + .compiler + .Compile(&buffer, Some(&compile_args), None) + } + .into_device_result("Compile")?; + + drop(compile_args); + drop(source_name); + drop(raw_ep); + drop(full_stage); + + let err_blob = get_output::(&compile_res, Dxc::DXC_OUT_ERRORS)?; + + let len = unsafe { err_blob.GetStringLength() }; + if len != 0 { + let err = as_err_str(&err_blob)?; + return Err(crate::PipelineError::Linkage( + stage_bit, + format!("DXC compile error: {err}"), + )); } - // It shouldn't be possible that this gets called with the `dxc_shader_compiler` feature disabled. - pub(crate) fn compile_dxc( - _device: &crate::dx12::Device, - _source: &str, - _source_name: Option<&CStr>, - _raw_ep: &str, - _stage_bit: wgt::ShaderStages, - _full_stage: String, - _dxc_container: &DxcContainer, - ) -> ( - Result, - log::Level, - ) { - unimplemented!("Something went really wrong, please report this. Attempted to compile shader with DXC, but the DXC feature is disabled. Enable the `dxc_shader_compiler` feature on wgpu_hal to use DXC."); + let blob = get_output::(&compile_res, Dxc::DXC_OUT_OBJECT)?; + + let err_blob = { + let res = unsafe { + dxc_container + .validator + .Validate(&blob, Dxc::DxcValidatorFlags_InPlaceEdit) + } + .into_device_result("Validate")?; + + unsafe { res.GetErrorBuffer() }.into_device_result("GetErrorBuffer")? + }; + + let size = unsafe { err_blob.GetBufferSize() }; + if size != 0 { + let err_blob = unsafe { dxc_container.utils.GetBlobAsUtf8(&err_blob) } + .into_device_result("GetBlobAsUtf8")?; + let err = as_err_str(&err_blob)?; + return Err(crate::PipelineError::Linkage( + stage_bit, + format!("DXC validation error: {err}"), + )); } + + Ok(crate::dx12::CompiledShader::Dxc(blob)) } diff --git a/wgpu-hal/src/dx12/suballocation.rs b/wgpu-hal/src/dx12/suballocation.rs index d840e118f1..bdb3e85129 100644 --- a/wgpu-hal/src/dx12/suballocation.rs +++ b/wgpu-hal/src/dx12/suballocation.rs @@ -52,14 +52,14 @@ pub(crate) fn create_buffer_resource( device: &crate::dx12::Device, desc: &crate::BufferDescriptor, raw_desc: Direct3D12::D3D12_RESOURCE_DESC, - resource: &mut Option, -) -> Result, crate::DeviceError> { +) -> Result<(Direct3D12::ID3D12Resource, Option), crate::DeviceError> { let is_cpu_read = desc.usage.contains(crate::BufferUses::MAP_READ); let is_cpu_write = desc.usage.contains(crate::BufferUses::MAP_WRITE); // Workaround for Intel Xe drivers if !device.private_caps.suballocation_supported { - return create_committed_buffer_resource(device, desc, raw_desc, resource).map(|()| None); + return create_committed_buffer_resource(device, desc, raw_desc) + .map(|resource| (resource, None)); } let location = match (is_cpu_read, is_cpu_write) { @@ -80,6 +80,7 @@ pub(crate) fn create_buffer_resource( location, ); let allocation = allocator.allocator.allocate(&allocation_desc)?; + let mut resource = None; unsafe { device.raw.CreatePlacedResource( @@ -88,32 +89,30 @@ pub(crate) fn create_buffer_resource( &raw_desc, Direct3D12::D3D12_RESOURCE_STATE_COMMON, None, - resource, + &mut resource, ) } .into_device_result("Placed buffer creation")?; - if resource.is_none() { - return Err(crate::DeviceError::ResourceCreationFailed); - } + let resource = resource.ok_or(crate::DeviceError::Unexpected)?; device .counters .buffer_memory .add(allocation.size() as isize); - Ok(Some(AllocationWrapper { allocation })) + Ok((resource, Some(AllocationWrapper { allocation }))) } pub(crate) fn create_texture_resource( device: &crate::dx12::Device, desc: &crate::TextureDescriptor, raw_desc: Direct3D12::D3D12_RESOURCE_DESC, - resource: &mut Option, -) -> Result, crate::DeviceError> { +) -> Result<(Direct3D12::ID3D12Resource, Option), crate::DeviceError> { // Workaround for Intel Xe drivers if !device.private_caps.suballocation_supported { - return create_committed_texture_resource(device, desc, raw_desc, resource).map(|()| None); + return create_committed_texture_resource(device, desc, raw_desc) + .map(|resource| (resource, None)); } let location = MemoryLocation::GpuOnly; @@ -128,6 +127,7 @@ pub(crate) fn create_texture_resource( location, ); let allocation = allocator.allocator.allocate(&allocation_desc)?; + let mut resource = None; unsafe { device.raw.CreatePlacedResource( @@ -136,21 +136,19 @@ pub(crate) fn create_texture_resource( &raw_desc, Direct3D12::D3D12_RESOURCE_STATE_COMMON, None, // clear value - resource, + &mut resource, ) } .into_device_result("Placed texture creation")?; - if resource.is_none() { - return Err(crate::DeviceError::ResourceCreationFailed); - } + let resource = resource.ok_or(crate::DeviceError::Unexpected)?; device .counters .texture_memory .add(allocation.size() as isize); - Ok(Some(AllocationWrapper { allocation })) + Ok((resource, Some(AllocationWrapper { allocation }))) } pub(crate) fn free_buffer_allocation( @@ -226,8 +224,7 @@ pub(crate) fn create_committed_buffer_resource( device: &crate::dx12::Device, desc: &crate::BufferDescriptor, raw_desc: Direct3D12::D3D12_RESOURCE_DESC, - resource: &mut Option, -) -> Result<(), crate::DeviceError> { +) -> Result { let is_cpu_read = desc.usage.contains(crate::BufferUses::MAP_READ); let is_cpu_write = desc.usage.contains(crate::BufferUses::MAP_WRITE); @@ -250,6 +247,8 @@ pub(crate) fn create_committed_buffer_resource( VisibleNodeMask: 0, }; + let mut resource = None; + unsafe { device.raw.CreateCommittedResource( &heap_properties, @@ -261,24 +260,19 @@ pub(crate) fn create_committed_buffer_resource( &raw_desc, Direct3D12::D3D12_RESOURCE_STATE_COMMON, None, - resource, + &mut resource, ) } .into_device_result("Committed buffer creation")?; - if resource.is_none() { - return Err(crate::DeviceError::ResourceCreationFailed); - } - - Ok(()) + resource.ok_or(crate::DeviceError::Unexpected) } pub(crate) fn create_committed_texture_resource( device: &crate::dx12::Device, _desc: &crate::TextureDescriptor, raw_desc: Direct3D12::D3D12_RESOURCE_DESC, - resource: &mut Option, -) -> Result<(), crate::DeviceError> { +) -> Result { let heap_properties = Direct3D12::D3D12_HEAP_PROPERTIES { Type: Direct3D12::D3D12_HEAP_TYPE_CUSTOM, CPUPageProperty: Direct3D12::D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE, @@ -290,6 +284,8 @@ pub(crate) fn create_committed_texture_resource( VisibleNodeMask: 0, }; + let mut resource = None; + unsafe { device.raw.CreateCommittedResource( &heap_properties, @@ -301,14 +297,10 @@ pub(crate) fn create_committed_texture_resource( &raw_desc, Direct3D12::D3D12_RESOURCE_STATE_COMMON, None, // clear value - resource, + &mut resource, ) } .into_device_result("Committed texture creation")?; - if resource.is_none() { - return Err(crate::DeviceError::ResourceCreationFailed); - } - - Ok(()) + resource.ok_or(crate::DeviceError::Unexpected) } diff --git a/wgpu-hal/src/dynamic/command.rs b/wgpu-hal/src/dynamic/command.rs index 6c0f1cb02d..4ecdf74723 100644 --- a/wgpu-hal/src/dynamic/command.rs +++ b/wgpu-hal/src/dynamic/command.rs @@ -61,7 +61,7 @@ pub trait DynCommandEncoder: DynResource + std::fmt::Debug { &mut self, layout: &dyn DynPipelineLayout, index: u32, - group: &dyn DynBindGroup, + group: Option<&dyn DynBindGroup>, dynamic_offsets: &[wgt::DynamicOffset], ); @@ -282,9 +282,15 @@ impl DynCommandEncoder for C { &mut self, layout: &dyn DynPipelineLayout, index: u32, - group: &dyn DynBindGroup, + group: Option<&dyn DynBindGroup>, dynamic_offsets: &[wgt::DynamicOffset], ) { + if group.is_none() { + // TODO: Handle group None correctly. + return; + } + let group = group.unwrap(); + let layout = layout.expect_downcast_ref(); let group = group.expect_downcast_ref(); unsafe { C::set_bind_group(self, layout, index, group, dynamic_offsets) }; diff --git a/wgpu-hal/src/dynamic/device.rs b/wgpu-hal/src/dynamic/device.rs index 1386196d60..f044a001de 100644 --- a/wgpu-hal/src/dynamic/device.rs +++ b/wgpu-hal/src/dynamic/device.rs @@ -24,6 +24,7 @@ pub trait DynDevice: DynResource { ) -> Result, DeviceError>; unsafe fn destroy_buffer(&self, buffer: Box); + unsafe fn add_raw_buffer(&self, buffer: &dyn DynBuffer); unsafe fn map_buffer( &self, @@ -41,6 +42,8 @@ pub trait DynDevice: DynResource { desc: &TextureDescriptor, ) -> Result, DeviceError>; unsafe fn destroy_texture(&self, texture: Box); + unsafe fn add_raw_texture(&self, texture: &dyn DynTexture); + unsafe fn create_texture_view( &self, texture: &dyn DynTexture, @@ -177,6 +180,10 @@ impl DynDevice for D { unsafe fn destroy_buffer(&self, buffer: Box) { unsafe { D::destroy_buffer(self, buffer.unbox()) }; } + unsafe fn add_raw_buffer(&self, buffer: &dyn DynBuffer) { + let buffer = buffer.expect_downcast_ref(); + unsafe { D::add_raw_buffer(self, buffer) }; + } unsafe fn map_buffer( &self, @@ -217,6 +224,11 @@ impl DynDevice for D { unsafe { D::destroy_texture(self, texture.unbox()) }; } + unsafe fn add_raw_texture(&self, texture: &dyn DynTexture) { + let texture = texture.expect_downcast_ref(); + unsafe { D::add_raw_texture(self, texture) }; + } + unsafe fn create_texture_view( &self, texture: &dyn DynTexture, diff --git a/wgpu-hal/src/empty.rs b/wgpu-hal/src/empty.rs index 4d8868c360..72d9784d65 100644 --- a/wgpu-hal/src/empty.rs +++ b/wgpu-hal/src/empty.rs @@ -168,6 +168,8 @@ impl crate::Device for Context { Ok(Resource) } unsafe fn destroy_buffer(&self, buffer: Resource) {} + unsafe fn add_raw_buffer(&self, _buffer: &Resource) {} + unsafe fn map_buffer( &self, buffer: &Resource, @@ -183,6 +185,8 @@ impl crate::Device for Context { Ok(Resource) } unsafe fn destroy_texture(&self, texture: Resource) {} + unsafe fn add_raw_texture(&self, _texture: &Resource) {} + unsafe fn create_texture_view( &self, texture: &Resource, diff --git a/wgpu-hal/src/gles/adapter.rs b/wgpu-hal/src/gles/adapter.rs index 97d692c989..a5bf358763 100644 --- a/wgpu-hal/src/gles/adapter.rs +++ b/wgpu-hal/src/gles/adapter.rs @@ -841,6 +841,16 @@ impl super::Adapter { alignments: crate::Alignments { buffer_copy_offset: wgt::BufferSize::new(4).unwrap(), buffer_copy_pitch: wgt::BufferSize::new(4).unwrap(), + // #6151: `wgpu_hal::gles` doesn't ask Naga to inject bounds + // checks in GLSL, and it doesn't request extensions like + // `KHR_robust_buffer_access_behavior` that would provide + // them, so we can't really implement the checks promised by + // [`crate::BufferBinding`]. + // + // Since this is a pre-existing condition, for the time + // being, provide 1 as the value here, to cause as little + // trouble as possible. + uniform_bounds_check_alignment: wgt::BufferSize::new(1).unwrap(), }, }, }) @@ -1097,7 +1107,7 @@ impl crate::Adapter for super::Adapter { Tf::Rgba8Sint => renderable | storage, Tf::Rgb10a2Uint => renderable, Tf::Rgb10a2Unorm => filterable_renderable, - Tf::Rg11b10UFloat => filterable | float_renderable, + Tf::Rg11b10Ufloat => filterable | float_renderable, Tf::R64Uint => empty, Tf::Rg32Uint => renderable, Tf::Rg32Sint => renderable, diff --git a/wgpu-hal/src/gles/command.rs b/wgpu-hal/src/gles/command.rs index c002e76c1b..2df3c1a991 100644 --- a/wgpu-hal/src/gles/command.rs +++ b/wgpu-hal/src/gles/command.rs @@ -1,6 +1,9 @@ use super::{conv, Command as C}; use arrayvec::ArrayVec; -use std::{mem, ops::Range}; +use std::{ + mem::{self, size_of, size_of_val}, + ops::Range, +}; #[derive(Clone, Copy, Debug, Default)] struct TextureSlotDesc { @@ -82,7 +85,7 @@ impl super::CommandBuffer { fn add_push_constant_data(&mut self, data: &[u32]) -> Range { let data_raw = - unsafe { std::slice::from_raw_parts(data.as_ptr().cast(), mem::size_of_val(data)) }; + unsafe { std::slice::from_raw_parts(data.as_ptr().cast(), size_of_val(data)) }; let start = self.data_bytes.len(); assert!(start < u32::MAX as usize); self.data_bytes.extend_from_slice(data_raw); @@ -1083,7 +1086,7 @@ impl crate::CommandEncoder for super::CommandEncoder { self.prepare_draw(0); for draw in 0..draw_count as wgt::BufferAddress { let indirect_offset = - offset + draw * mem::size_of::() as wgt::BufferAddress; + offset + draw * size_of::() as wgt::BufferAddress; #[allow(clippy::clone_on_copy)] // False positive when cloning glow::UniformLocation self.cmd_buffer.commands.push(C::DrawIndirect { topology: self.state.topology, @@ -1105,8 +1108,8 @@ impl crate::CommandEncoder for super::CommandEncoder { wgt::IndexFormat::Uint32 => glow::UNSIGNED_INT, }; for draw in 0..draw_count as wgt::BufferAddress { - let indirect_offset = offset - + draw * mem::size_of::() as wgt::BufferAddress; + let indirect_offset = + offset + draw * size_of::() as wgt::BufferAddress; #[allow(clippy::clone_on_copy)] // False positive when cloning glow::UniformLocation self.cmd_buffer.commands.push(C::DrawIndexedIndirect { topology: self.state.topology, diff --git a/wgpu-hal/src/gles/conv.rs b/wgpu-hal/src/gles/conv.rs index 7e2050a62e..be6cfea203 100644 --- a/wgpu-hal/src/gles/conv.rs +++ b/wgpu-hal/src/gles/conv.rs @@ -45,7 +45,7 @@ impl super::AdapterShared { glow::RGBA, glow::UNSIGNED_INT_2_10_10_10_REV, ), - Tf::Rg11b10UFloat => ( + Tf::Rg11b10Ufloat => ( glow::R11F_G11F_B10F, glow::RGB, glow::UNSIGNED_INT_10F_11F_11F_REV, diff --git a/wgpu-hal/src/gles/device.rs b/wgpu-hal/src/gles/device.rs index ad092307e9..15292d95c5 100644 --- a/wgpu-hal/src/gles/device.rs +++ b/wgpu-hal/src/gles/device.rs @@ -110,22 +110,21 @@ impl super::Device { /// /// - `name` must be created respecting `desc` /// - `name` must be a texture - /// - If `drop_guard` is [`None`], wgpu-hal will take ownership of the texture. If `drop_guard` is - /// [`Some`], the texture must be valid until the drop implementation - /// of the drop guard is called. + /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the texture. If + /// `drop_callback` is [`Some`], the texture must be valid until the callback is called. #[cfg(any(native, Emscripten))] pub unsafe fn texture_from_raw( &self, name: std::num::NonZeroU32, desc: &crate::TextureDescriptor, - drop_guard: Option, + drop_callback: Option, ) -> super::Texture { super::Texture { inner: super::TextureInner::Texture { raw: glow::NativeTexture(name), target: super::Texture::get_info_from_desc(desc), }, - drop_guard, + drop_guard: crate::DropGuard::from_option(drop_callback), mip_level_count: desc.mip_level_count, array_layer_count: desc.array_layer_count(), format: desc.format, @@ -138,21 +137,20 @@ impl super::Device { /// /// - `name` must be created respecting `desc` /// - `name` must be a renderbuffer - /// - If `drop_guard` is [`None`], wgpu-hal will take ownership of the renderbuffer. If `drop_guard` is - /// [`Some`], the renderbuffer must be valid until the drop implementation - /// of the drop guard is called. + /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the renderbuffer. If + /// `drop_callback` is [`Some`], the renderbuffer must be valid until the callback is called. #[cfg(any(native, Emscripten))] pub unsafe fn texture_from_raw_renderbuffer( &self, name: std::num::NonZeroU32, desc: &crate::TextureDescriptor, - drop_guard: Option, + drop_callback: Option, ) -> super::Texture { super::Texture { inner: super::TextureInner::Renderbuffer { raw: glow::NativeRenderbuffer(name), }, - drop_guard, + drop_guard: crate::DropGuard::from_option(drop_callback), mip_level_count: desc.mip_level_count, array_layer_count: desc.array_layer_count(), format: desc.format, @@ -649,6 +647,10 @@ impl crate::Device for super::Device { self.counters.buffers.sub(1); } + unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) { + self.counters.buffers.add(1); + } + unsafe fn map_buffer( &self, buffer: &super::Buffer, @@ -984,6 +986,10 @@ impl crate::Device for super::Device { self.counters.textures.sub(1); } + unsafe fn add_raw_texture(&self, _texture: &super::Texture) { + self.counters.textures.add(1); + } + unsafe fn create_texture_view( &self, texture: &super::Texture, diff --git a/wgpu-hal/src/gles/egl.rs b/wgpu-hal/src/gles/egl.rs index 89baa52a6f..42aec2b253 100644 --- a/wgpu-hal/src/gles/egl.rs +++ b/wgpu-hal/src/gles/egl.rs @@ -1,8 +1,10 @@ use glow::HasContext; use once_cell::sync::Lazy; -use parking_lot::{Mutex, MutexGuard, RwLock}; +use parking_lot::{MappedMutexGuard, Mutex, MutexGuard, RwLock}; -use std::{collections::HashMap, ffi, os::raw, ptr, rc::Rc, sync::Arc, time::Duration}; +use std::{ + collections::HashMap, ffi, mem::ManuallyDrop, os::raw, ptr, rc::Rc, sync::Arc, time::Duration, +}; /// The amount of time to wait while trying to obtain a lock to the adapter context const CONTEXT_LOCK_TIMEOUT_SECS: u64 = 1; @@ -141,7 +143,7 @@ impl Drop for DisplayOwner { match self.display { DisplayRef::X11(ptr) => unsafe { let func: libloading::Symbol = - self.library.get(b"XCloseDisplay").unwrap(); + self.library.get(b"XCloseDisplay\0").unwrap(); func(ptr.as_ptr()); }, DisplayRef::Wayland => {} @@ -153,7 +155,7 @@ fn open_x_display() -> Option { log::debug!("Loading X11 library to get the current display"); unsafe { let library = find_library(&["libX11.so.6", "libX11.so"])?; - let func: libloading::Symbol = library.get(b"XOpenDisplay").unwrap(); + let func: libloading::Symbol = library.get(b"XOpenDisplay\0").unwrap(); let result = func(ptr::null()); ptr::NonNull::new(result).map(|ptr| DisplayOwner { display: DisplayRef::X11(ptr), @@ -180,9 +182,9 @@ fn test_wayland_display() -> Option { let library = unsafe { let client_library = find_library(&["libwayland-client.so.0", "libwayland-client.so"])?; let wl_display_connect: libloading::Symbol = - client_library.get(b"wl_display_connect").unwrap(); + client_library.get(b"wl_display_connect\0").unwrap(); let wl_display_disconnect: libloading::Symbol = - client_library.get(b"wl_display_disconnect").unwrap(); + client_library.get(b"wl_display_disconnect\0").unwrap(); let display = ptr::NonNull::new(wl_display_connect(ptr::null()))?; wl_display_disconnect(display.as_ptr()); find_library(&["libwayland-egl.so.1", "libwayland-egl.so"])? @@ -295,6 +297,7 @@ impl EglContext { .make_current(self.display, self.pbuffer, self.pbuffer, Some(self.raw)) .unwrap(); } + fn unmake_current(&self) { self.instance .make_current(self.display, None, None, None) @@ -305,7 +308,7 @@ impl EglContext { /// A wrapper around a [`glow::Context`] and the required EGL context that uses locking to guarantee /// exclusive access when shared with multiple threads. pub struct AdapterContext { - glow: Mutex, + glow: Mutex>, egl: Option, } @@ -346,14 +349,39 @@ impl AdapterContext { } } +impl Drop for AdapterContext { + fn drop(&mut self) { + struct CurrentGuard<'a>(&'a EglContext); + impl Drop for CurrentGuard<'_> { + fn drop(&mut self) { + self.0.unmake_current(); + } + } + + // Context must be current when dropped. See safety docs on + // `glow::HasContext`. + // + // NOTE: This is only set to `None` by `Adapter::new_external` which + // requires the context to be current when anything that may be holding + // the `Arc` is dropped. + let _guard = self.egl.as_ref().map(|egl| { + egl.make_current(); + CurrentGuard(egl) + }); + let glow = self.glow.get_mut(); + // SAFETY: Field not used after this. + unsafe { ManuallyDrop::drop(glow) }; + } +} + struct EglContextLock<'a> { instance: &'a Arc, display: khronos_egl::Display, } -/// A guard containing a lock to an [`AdapterContext`] +/// A guard containing a lock to an [`AdapterContext`], while the GL context is kept current. pub struct AdapterContextLock<'a> { - glow: MutexGuard<'a, glow::Context>, + glow: MutexGuard<'a, ManuallyDrop>, egl: Option>, } @@ -387,10 +415,12 @@ impl AdapterContext { /// /// > **Note:** Calling this function **will** still lock the [`glow::Context`] which adds an /// > extra safe-guard against accidental concurrent access to the context. - pub unsafe fn get_without_egl_lock(&self) -> MutexGuard { - self.glow + pub unsafe fn get_without_egl_lock(&self) -> MappedMutexGuard { + let guard = self + .glow .try_lock_for(Duration::from_secs(CONTEXT_LOCK_TIMEOUT_SECS)) - .expect("Could not lock adapter context. This is most-likely a deadlock.") + .expect("Could not lock adapter context. This is most-likely a deadlock."); + MutexGuard::map(guard, |glow| &mut **glow) } /// Obtain a lock to the EGL context and get handle to the [`glow::Context`] that can be used to @@ -1052,6 +1082,10 @@ impl crate::Instance for Instance { unsafe { gl.debug_message_callback(super::gl_debug_message_callback) }; } + // Wrap in ManuallyDrop to make it easier to "current" the GL context before dropping this + // GLOW context, which could also happen if a panic occurs after we uncurrent the context + // below but before AdapterContext is constructed. + let gl = ManuallyDrop::new(gl); inner.egl.unmake_current(); unsafe { @@ -1073,13 +1107,15 @@ impl super::Adapter { /// - The underlying OpenGL ES context must be current. /// - The underlying OpenGL ES context must be current when interfacing with any objects returned by /// wgpu-hal from this adapter. + /// - The underlying OpenGL ES context must be current when dropping this adapter and when + /// dropping any objects returned from this adapter. pub unsafe fn new_external( fun: impl FnMut(&str) -> *const ffi::c_void, ) -> Option> { let context = unsafe { glow::Context::from_loader_function(fun) }; unsafe { Self::expose(AdapterContext { - glow: Mutex::new(context), + glow: Mutex::new(ManuallyDrop::new(context)), egl: None, }) } @@ -1260,7 +1296,7 @@ impl crate::Surface for Surface { (WindowKind::Wayland, Rwh::Wayland(handle)) => { let library = &self.wsi.display_owner.as_ref().unwrap().library; let wl_egl_window_create: libloading::Symbol = - unsafe { library.get(b"wl_egl_window_create") }.unwrap(); + unsafe { library.get(b"wl_egl_window_create\0") }.unwrap(); let window = unsafe { wl_egl_window_create(handle.surface.as_ptr(), 640, 480) } .cast(); @@ -1369,7 +1405,7 @@ impl crate::Surface for Surface { if let Some(window) = wl_window { let library = &self.wsi.display_owner.as_ref().unwrap().library; let wl_egl_window_resize: libloading::Symbol = - unsafe { library.get(b"wl_egl_window_resize") }.unwrap(); + unsafe { library.get(b"wl_egl_window_resize\0") }.unwrap(); unsafe { wl_egl_window_resize( window, @@ -1441,7 +1477,7 @@ impl crate::Surface for Surface { .expect("unsupported window") .library; let wl_egl_window_destroy: libloading::Symbol = - unsafe { library.get(b"wl_egl_window_destroy") }.unwrap(); + unsafe { library.get(b"wl_egl_window_destroy\0") }.unwrap(); unsafe { wl_egl_window_destroy(window) }; } } diff --git a/wgpu-hal/src/gles/queue.rs b/wgpu-hal/src/gles/queue.rs index 3752b0869d..39315f72b7 100644 --- a/wgpu-hal/src/gles/queue.rs +++ b/wgpu-hal/src/gles/queue.rs @@ -2,7 +2,8 @@ use super::{conv::is_layered_target, Command as C, PrivateCapabilities}; use arrayvec::ArrayVec; use glow::HasContext; use std::{ - mem, slice, + mem::size_of, + slice, sync::{atomic::Ordering, Arc}, }; @@ -501,6 +502,22 @@ impl super::Queue { v, ); }, + #[cfg(web_sys_unstable_apis)] + wgt::ExternalImageSource::VideoFrame(ref v) => unsafe { + gl.tex_sub_image_3d_with_video_frame( + dst_target, + copy.dst_base.mip_level as i32, + copy.dst_base.origin.x as i32, + copy.dst_base.origin.y as i32, + z_offset as i32, + copy.size.width as i32, + copy.size.height as i32, + copy.size.depth as i32, + format_desc.external, + format_desc.data_type, + v, + ) + }, wgt::ExternalImageSource::ImageData(ref i) => unsafe { gl.tex_sub_image_3d_with_image_data( dst_target, @@ -576,6 +593,20 @@ impl super::Queue { v, ) }, + #[cfg(web_sys_unstable_apis)] + wgt::ExternalImageSource::VideoFrame(ref v) => unsafe { + gl.tex_sub_image_2d_with_video_frame_and_width_and_height( + dst_target, + copy.dst_base.mip_level as i32, + copy.dst_base.origin.x as i32, + copy.dst_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.external, + format_desc.data_type, + v, + ) + }, wgt::ExternalImageSource::ImageData(ref i) => unsafe { gl.tex_sub_image_2d_with_image_data_and_width_and_height( dst_target, @@ -1012,7 +1043,7 @@ impl super::Queue { let query_data = unsafe { slice::from_raw_parts( temp_query_results.as_ptr().cast::(), - temp_query_results.len() * mem::size_of::(), + temp_query_results.len() * size_of::(), ) }; match dst.raw { @@ -1576,7 +1607,7 @@ impl super::Queue { // // This function is absolutely sketchy and we really should be using bytemuck. unsafe fn get_data(data: &[u8], offset: u32) -> &[T; COUNT] { - let data_required = mem::size_of::() * COUNT; + let data_required = size_of::() * COUNT; let raw = &data[(offset as usize)..][..data_required]; diff --git a/wgpu-hal/src/gles/wgl.rs b/wgpu-hal/src/gles/wgl.rs index 68bedb11d2..2d6c91aee0 100644 --- a/wgpu-hal/src/gles/wgl.rs +++ b/wgpu-hal/src/gles/wgl.rs @@ -1,15 +1,7 @@ -use glow::HasContext; -use glutin_wgl_sys::wgl_extra::{ - Wgl, CONTEXT_CORE_PROFILE_BIT_ARB, CONTEXT_DEBUG_BIT_ARB, CONTEXT_FLAGS_ARB, - CONTEXT_PROFILE_MASK_ARB, -}; -use once_cell::sync::Lazy; -use parking_lot::{Mutex, MutexGuard, RwLock}; -use raw_window_handle::{RawDisplayHandle, RawWindowHandle}; use std::{ collections::HashSet, ffi::{c_void, CStr, CString}, - mem, + mem::{self, size_of, size_of_val, ManuallyDrop}, os::raw::c_int, ptr, sync::{ @@ -19,6 +11,15 @@ use std::{ thread, time::Duration, }; + +use glow::HasContext; +use glutin_wgl_sys::wgl_extra::{ + Wgl, CONTEXT_CORE_PROFILE_BIT_ARB, CONTEXT_DEBUG_BIT_ARB, CONTEXT_FLAGS_ARB, + CONTEXT_PROFILE_MASK_ARB, +}; +use once_cell::sync::Lazy; +use parking_lot::{Mutex, MutexGuard, RwLock}; +use raw_window_handle::{RawDisplayHandle, RawWindowHandle}; use wgt::InstanceFlags; use windows::{ core::{Error, PCSTR}, @@ -48,7 +49,10 @@ impl AdapterContext { } pub fn raw_context(&self) -> *mut c_void { - self.inner.lock().context.context.0 + match self.inner.lock().context { + Some(ref wgl) => wgl.context.0, + None => ptr::null_mut(), + } } /// Obtain a lock to the WGL context and get handle to the [`glow::Context`] that can be used to @@ -62,7 +66,9 @@ impl AdapterContext { .try_lock_for(Duration::from_secs(CONTEXT_LOCK_TIMEOUT_SECS)) .expect("Could not lock adapter context. This is most-likely a deadlock."); - inner.context.make_current(inner.device.dc).unwrap(); + if let Some(wgl) = &inner.context { + wgl.make_current(inner.device.dc).unwrap() + }; AdapterContextLock { inner } } @@ -79,14 +85,15 @@ impl AdapterContext { .try_lock_for(Duration::from_secs(CONTEXT_LOCK_TIMEOUT_SECS)) .expect("Could not lock adapter context. This is most-likely a deadlock."); - inner - .context - .make_current(device) - .map(|()| AdapterContextLock { inner }) + if let Some(wgl) = &inner.context { + wgl.make_current(device)?; + } + + Ok(AdapterContextLock { inner }) } } -/// A guard containing a lock to an [`AdapterContext`] +/// A guard containing a lock to an [`AdapterContext`], while the GL context is kept current. pub struct AdapterContextLock<'a> { inner: MutexGuard<'a, Inner>, } @@ -101,7 +108,9 @@ impl<'a> std::ops::Deref for AdapterContextLock<'a> { impl<'a> Drop for AdapterContextLock<'a> { fn drop(&mut self) { - self.inner.context.unmake_current().unwrap(); + if let Some(wgl) = &self.inner.context { + wgl.unmake_current().unwrap() + } } } @@ -134,9 +143,33 @@ unsafe impl Send for WglContext {} unsafe impl Sync for WglContext {} struct Inner { - gl: glow::Context, + gl: ManuallyDrop, device: InstanceDevice, - context: WglContext, + context: Option, +} + +impl Drop for Inner { + fn drop(&mut self) { + struct CurrentGuard<'a>(&'a WglContext); + impl Drop for CurrentGuard<'_> { + fn drop(&mut self) { + self.0.unmake_current().unwrap(); + } + } + + // Context must be current when dropped. See safety docs on + // `glow::HasContext`. + // + // NOTE: This is only set to `None` by `Adapter::new_external` which + // requires the context to be current when anything that may be holding + // the `Arc` is dropped. + let _guard = self.context.as_ref().map(|wgl| { + wgl.make_current(self.device.dc).unwrap(); + CurrentGuard(wgl) + }); + // SAFETY: Field not used after this. + unsafe { ManuallyDrop::drop(&mut self.gl) }; + } } unsafe impl Send for Inner {} @@ -178,7 +211,7 @@ unsafe fn setup_pixel_format(dc: Gdi::HDC) -> Result<(), crate::InstanceError> { { let format = OpenGL::PIXELFORMATDESCRIPTOR { nVersion: 1, - nSize: mem::size_of::() as u16, + nSize: size_of::() as u16, dwFlags: OpenGL::PFD_DRAW_TO_WINDOW | OpenGL::PFD_SUPPORT_OPENGL | OpenGL::PFD_DOUBLEBUFFER, @@ -214,12 +247,7 @@ unsafe fn setup_pixel_format(dc: Gdi::HDC) -> Result<(), crate::InstanceError> { } let mut format = Default::default(); if unsafe { - OpenGL::DescribePixelFormat( - dc, - index, - mem::size_of_val(&format) as u32, - Some(&mut format), - ) + OpenGL::DescribePixelFormat(dc, index, size_of_val(&format) as u32, Some(&mut format)) } == 0 { return Err(crate::InstanceError::with_source( @@ -262,7 +290,7 @@ fn create_global_window_class() -> Result { } let window_class = WindowsAndMessaging::WNDCLASSEXA { - cbSize: mem::size_of::() as u32, + cbSize: size_of::() as u32, style: WindowsAndMessaging::CS_OWNDC, lpfnWndProc: Some(wnd_proc), cbClsExtra: 0, @@ -497,6 +525,10 @@ impl crate::Instance for Instance { unsafe { gl.debug_message_callback(super::gl_debug_message_callback) }; } + // Wrap in ManuallyDrop to make it easier to "current" the GL context before dropping this + // GLOW context, which could also happen if a panic occurs after we uncurrent the context + // below but before Inner is constructed. + let gl = ManuallyDrop::new(gl); context.unmake_current().map_err(|e| { crate::InstanceError::with_source( String::from("unable to unset the current WGL context"), @@ -508,7 +540,7 @@ impl crate::Instance for Instance { inner: Arc::new(Mutex::new(Inner { device, gl, - context, + context: Some(context), })), srgb_capable, }) @@ -550,6 +582,43 @@ impl crate::Instance for Instance { } } +impl super::Adapter { + /// Creates a new external adapter using the specified loader function. + /// + /// # Safety + /// + /// - The underlying OpenGL ES context must be current. + /// - The underlying OpenGL ES context must be current when interfacing with any objects returned by + /// wgpu-hal from this adapter. + /// - The underlying OpenGL ES context must be current when dropping this adapter and when + /// dropping any objects returned from this adapter. + pub unsafe fn new_external( + fun: impl FnMut(&str) -> *const c_void, + ) -> Option> { + let context = unsafe { glow::Context::from_loader_function(fun) }; + unsafe { + Self::expose(AdapterContext { + inner: Arc::new(Mutex::new(Inner { + gl: ManuallyDrop::new(context), + device: create_instance_device().ok()?, + context: None, + })), + }) + } + } + + pub fn adapter_context(&self) -> &AdapterContext { + &self.shared.context + } +} + +impl super::Device { + /// Returns the underlying WGL context. + pub fn context(&self) -> &AdapterContext { + &self.shared.context + } +} + struct DeviceContextHandle { device: Gdi::HDC, window: Foundation::HWND, diff --git a/wgpu-hal/src/lib.rs b/wgpu-hal/src/lib.rs index b62a6b5962..6578252c1a 100644 --- a/wgpu-hal/src/lib.rs +++ b/wgpu-hal/src/lib.rs @@ -51,8 +51,8 @@ //! must use [`CommandEncoder::transition_buffers`] between those two //! operations. //! -//! - Pipeline layouts are *explicitly specified* when setting bind -//! group. Incompatible layouts disturb groups bound at higher indices. +//! - Pipeline layouts are *explicitly specified* when setting bind groups. +//! Incompatible layouts disturb groups bound at higher indices. //! //! - The API *accepts collections as iterators*, to avoid forcing the user to //! store data in particular containers. The implementation doesn't guarantee @@ -303,8 +303,35 @@ pub type MemoryRange = Range; pub type FenceValue = u64; pub type AtomicFenceValue = std::sync::atomic::AtomicU64; -/// Drop guard to signal wgpu-hal is no longer using an externally created object. -pub type DropGuard = Box; +/// A callback to signal that wgpu is no longer using a resource. +#[cfg(any(gles, vulkan))] +pub type DropCallback = Box; + +#[cfg(any(gles, vulkan))] +pub struct DropGuard { + callback: DropCallback, +} + +#[cfg(all(any(gles, vulkan), any(native, Emscripten)))] +impl DropGuard { + fn from_option(callback: Option) -> Option { + callback.map(|callback| Self { callback }) + } +} + +#[cfg(any(gles, vulkan))] +impl Drop for DropGuard { + fn drop(&mut self) { + (self.callback)(); + } +} + +#[cfg(any(gles, vulkan))] +impl fmt::Debug for DropGuard { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("DropGuard").finish() + } +} #[derive(Clone, Debug, PartialEq, Eq, Error)] pub enum DeviceError { @@ -318,6 +345,18 @@ pub enum DeviceError { Unexpected, } +#[allow(dead_code)] // may be unused on some platforms +#[cold] +fn hal_usage_error(txt: T) -> ! { + panic!("wgpu-hal invariant was violated (usage error): {txt}") +} + +#[allow(dead_code)] // may be unused on some platforms +#[cold] +fn hal_internal_error(txt: T) -> ! { + panic!("wgpu-hal ran into a preventable internal error: {txt}") +} + #[derive(Clone, Debug, Eq, PartialEq, Error)] pub enum ShaderError { #[error("Compilation failed: {0:?}")] @@ -684,6 +723,9 @@ pub trait Device: WasmNotSendSync { /// - The given `buffer` must not currently be mapped. unsafe fn destroy_buffer(&self, buffer: ::Buffer); + /// A hook for when a wgpu-core buffer is created from a raw wgpu-hal buffer. + unsafe fn add_raw_buffer(&self, buffer: &::Buffer); + /// Return a pointer to CPU memory mapping the contents of `buffer`. /// /// Buffer mappings are persistent: the buffer may remain mapped on the CPU @@ -775,6 +817,10 @@ pub trait Device: WasmNotSendSync { desc: &TextureDescriptor, ) -> Result<::Texture, DeviceError>; unsafe fn destroy_texture(&self, texture: ::Texture); + + /// A hook for when a wgpu-core texture is created from a raw wgpu-hal texture. + unsafe fn add_raw_texture(&self, texture: &::Texture); + unsafe fn create_texture_view( &self, texture: &::Texture, @@ -1203,8 +1249,40 @@ pub trait CommandEncoder: WasmNotSendSync + fmt::Debug { // pass common - /// Sets the bind group at `index` to `group`, assuming the layout - /// of all the preceding groups to be taken from `layout`. + /// Sets the bind group at `index` to `group`. + /// + /// If this is not the first call to `set_bind_group` within the current + /// render or compute pass: + /// + /// - If `layout` contains `n` bind group layouts, then any previously set + /// bind groups at indices `n` or higher are cleared. + /// + /// - If the first `m` bind group layouts of `layout` are equal to those of + /// the previously passed layout, but no more, then any previously set + /// bind groups at indices `m` or higher are cleared. + /// + /// It follows from the above that passing the same layout as before doesn't + /// clear any bind groups. + /// + /// # Safety + /// + /// - This [`CommandEncoder`] must be within a render or compute pass. + /// + /// - `index` must be the valid index of some bind group layout in `layout`. + /// Call this the "relevant bind group layout". + /// + /// - The layout of `group` must be equal to the relevant bind group layout. + /// + /// - The length of `dynamic_offsets` must match the number of buffer + /// bindings [with dynamic offsets][hdo] in the relevant bind group + /// layout. + /// + /// - If those buffer bindings are ordered by increasing [`binding` number] + /// and paired with elements from `dynamic_offsets`, then each offset must + /// be a valid offset for the binding's corresponding buffer in `group`. + /// + /// [hdo]: wgt::BindingType::Buffer::has_dynamic_offset + /// [`binding` number]: wgt::BindGroupLayoutEntry::binding unsafe fn set_bind_group( &mut self, layout: &::PipelineLayout, @@ -1256,11 +1334,43 @@ pub trait CommandEncoder: WasmNotSendSync + fmt::Debug { // render passes - // Begins a render pass, clears all active bindings. + /// Begin a new render pass, clearing all active bindings. + /// + /// This clears any bindings established by the following calls: + /// + /// - [`set_bind_group`](CommandEncoder::set_bind_group) + /// - [`set_push_constants`](CommandEncoder::set_push_constants) + /// - [`begin_query`](CommandEncoder::begin_query) + /// - [`set_render_pipeline`](CommandEncoder::set_render_pipeline) + /// - [`set_index_buffer`](CommandEncoder::set_index_buffer) + /// - [`set_vertex_buffer`](CommandEncoder::set_vertex_buffer) + /// + /// # Safety + /// + /// - All prior calls to [`begin_render_pass`] on this [`CommandEncoder`] must have been followed + /// by a call to [`end_render_pass`]. + /// + /// - All prior calls to [`begin_compute_pass`] on this [`CommandEncoder`] must have been followed + /// by a call to [`end_compute_pass`]. + /// + /// [`begin_render_pass`]: CommandEncoder::begin_render_pass + /// [`begin_compute_pass`]: CommandEncoder::begin_compute_pass + /// [`end_render_pass`]: CommandEncoder::end_render_pass + /// [`end_compute_pass`]: CommandEncoder::end_compute_pass unsafe fn begin_render_pass( &mut self, desc: &RenderPassDescriptor<::QuerySet, ::TextureView>, ); + + /// End the current render pass. + /// + /// # Safety + /// + /// - There must have been a prior call to [`begin_render_pass`] on this [`CommandEncoder`] + /// that has not been followed by a call to [`end_render_pass`]. + /// + /// [`begin_render_pass`]: CommandEncoder::begin_render_pass + /// [`end_render_pass`]: CommandEncoder::end_render_pass unsafe fn end_render_pass(&mut self); unsafe fn set_render_pipeline(&mut self, pipeline: &::RenderPipeline); @@ -1326,11 +1436,41 @@ pub trait CommandEncoder: WasmNotSendSync + fmt::Debug { // compute passes - // Begins a compute pass, clears all active bindings. + /// Begin a new compute pass, clearing all active bindings. + /// + /// This clears any bindings established by the following calls: + /// + /// - [`set_bind_group`](CommandEncoder::set_bind_group) + /// - [`set_push_constants`](CommandEncoder::set_push_constants) + /// - [`begin_query`](CommandEncoder::begin_query) + /// - [`set_compute_pipeline`](CommandEncoder::set_compute_pipeline) + /// + /// # Safety + /// + /// - All prior calls to [`begin_render_pass`] on this [`CommandEncoder`] must have been followed + /// by a call to [`end_render_pass`]. + /// + /// - All prior calls to [`begin_compute_pass`] on this [`CommandEncoder`] must have been followed + /// by a call to [`end_compute_pass`]. + /// + /// [`begin_render_pass`]: CommandEncoder::begin_render_pass + /// [`begin_compute_pass`]: CommandEncoder::begin_compute_pass + /// [`end_render_pass`]: CommandEncoder::end_render_pass + /// [`end_compute_pass`]: CommandEncoder::end_compute_pass unsafe fn begin_compute_pass( &mut self, desc: &ComputePassDescriptor<::QuerySet>, ); + + /// End the current compute pass. + /// + /// # Safety + /// + /// - There must have been a prior call to [`begin_compute_pass`] on this [`CommandEncoder`] + /// that has not been followed by a call to [`end_compute_pass`]. + /// + /// [`begin_compute_pass`]: CommandEncoder::begin_compute_pass + /// [`end_compute_pass`]: CommandEncoder::end_compute_pass unsafe fn end_compute_pass(&mut self); unsafe fn set_compute_pipeline(&mut self, pipeline: &::ComputePipeline); @@ -1608,9 +1748,27 @@ pub struct InstanceDescriptor<'a> { pub struct Alignments { /// The alignment of the start of the buffer used as a GPU copy source. pub buffer_copy_offset: wgt::BufferSize, + /// The alignment of the row pitch of the texture data stored in a buffer that is /// used in a GPU copy operation. pub buffer_copy_pitch: wgt::BufferSize, + + /// The finest alignment of bound range checking for uniform buffers. + /// + /// When `wgpu_hal` restricts shader references to the [accessible + /// region][ar] of a [`Uniform`] buffer, the size of the accessible region + /// is the bind group binding's stated [size], rounded up to the next + /// multiple of this value. + /// + /// We don't need an analogous field for storage buffer bindings, because + /// all our backends promise to enforce the size at least to a four-byte + /// alignment, and `wgpu_hal` requires bound range lengths to be a multiple + /// of four anyway. + /// + /// [ar]: struct.BufferBinding.html#accessible-region + /// [`Uniform`]: wgt::BufferBindingType::Uniform + /// [size]: BufferBinding::size + pub uniform_bounds_check_alignment: wgt::BufferSize, } #[derive(Clone, Debug)] @@ -1780,6 +1938,40 @@ pub struct PipelineLayoutDescriptor<'a, B: DynBindGroupLayout + ?Sized> { pub push_constant_ranges: &'a [wgt::PushConstantRange], } +/// A region of a buffer made visible to shaders via a [`BindGroup`]. +/// +/// [`BindGroup`]: Api::BindGroup +/// +/// ## Accessible region +/// +/// `wgpu_hal` guarantees that shaders compiled with +/// [`ShaderModuleDescriptor::runtime_checks`] set to `true` cannot read or +/// write data via this binding outside the *accessible region* of [`buffer`]: +/// +/// - The accessible region starts at [`offset`]. +/// +/// - For [`Storage`] bindings, the size of the accessible region is [`size`], +/// which must be a multiple of 4. +/// +/// - For [`Uniform`] bindings, the size of the accessible region is [`size`] +/// rounded up to the next multiple of +/// [`Alignments::uniform_bounds_check_alignment`]. +/// +/// Note that this guarantee is stricter than WGSL's requirements for +/// [out-of-bounds accesses][woob], as WGSL allows them to return values from +/// elsewhere in the buffer. But this guarantee is necessary anyway, to permit +/// `wgpu-core` to avoid clearing uninitialized regions of buffers that will +/// never be read by the application before they are overwritten. This +/// optimization consults bind group buffer binding regions to determine which +/// parts of which buffers shaders might observe. This optimization is only +/// sound if shader access is bounds-checked. +/// +/// [`buffer`]: BufferBinding::buffer +/// [`offset`]: BufferBinding::offset +/// [`size`]: BufferBinding::size +/// [`Storage`]: wgt::BufferBindingType::Storage +/// [`Uniform`]: wgt::BufferBindingType::Uniform +/// [woob]: https://gpuweb.github.io/gpuweb/wgsl/#out-of-bounds-access-sec #[derive(Debug)] pub struct BufferBinding<'a, B: DynBuffer + ?Sized> { /// The buffer being bound. @@ -1898,6 +2090,26 @@ pub enum ShaderInput<'a> { pub struct ShaderModuleDescriptor<'a> { pub label: Label<'a>, + + /// Enforce bounds checks in shaders, even if the underlying driver doesn't + /// support doing so natively. + /// + /// When this is `true`, `wgpu_hal` promises that shaders can only read or + /// write the [accessible region][ar] of a bindgroup's buffer bindings. If + /// the underlying graphics platform cannot implement these bounds checks + /// itself, `wgpu_hal` will inject bounds checks before presenting the + /// shader to the platform. + /// + /// When this is `false`, `wgpu_hal` only enforces such bounds checks if the + /// underlying platform provides a way to do so itself. `wgpu_hal` does not + /// itself add any bounds checks to generated shader code. + /// + /// Note that `wgpu_hal` users may try to initialize only those portions of + /// buffers that they anticipate might be read from. Passing `false` here + /// may allow shaders to see wider regions of the buffers than expected, + /// making such deferred initialization visible to the application. + /// + /// [ar]: struct.BufferBinding.html#accessible-region pub runtime_checks: bool, } diff --git a/wgpu-hal/src/metal/adapter.rs b/wgpu-hal/src/metal/adapter.rs index a4b7d2b55b..136476114a 100644 --- a/wgpu-hal/src/metal/adapter.rs +++ b/wgpu-hal/src/metal/adapter.rs @@ -178,7 +178,7 @@ impl crate::Adapter for super::Adapter { flags.set(Tfc::STORAGE, pc.format_rgb10a2_unorm_all); flags } - Tf::Rg11b10UFloat => { + Tf::Rg11b10Ufloat => { let mut flags = all_caps; flags.set(Tfc::STORAGE, pc.format_rg11b10_all); flags @@ -998,6 +998,10 @@ impl super::PrivateCapabilities { alignments: crate::Alignments { buffer_copy_offset: wgt::BufferSize::new(self.buffer_alignment).unwrap(), buffer_copy_pitch: wgt::BufferSize::new(4).unwrap(), + // This backend has Naga incorporate bounds checks into the + // Metal Shading Language it generates, so from `wgpu_hal`'s + // users' point of view, references are tightly checked. + uniform_bounds_check_alignment: wgt::BufferSize::new(1).unwrap(), }, downlevel, } @@ -1037,7 +1041,7 @@ impl super::PrivateCapabilities { Tf::Rgba8Sint => RGBA8Sint, Tf::Rgb10a2Uint => RGB10A2Uint, Tf::Rgb10a2Unorm => RGB10A2Unorm, - Tf::Rg11b10UFloat => RG11B10Float, + Tf::Rg11b10UFloat => RG11B10float, Tf::R64Uint => RG32Uint, Tf::Rg32Uint => RG32Uint, Tf::Rg32Sint => RG32Sint, diff --git a/wgpu-hal/src/metal/command.rs b/wgpu-hal/src/metal/command.rs index 7eea069a81..069013570f 100644 --- a/wgpu-hal/src/metal/command.rs +++ b/wgpu-hal/src/metal/command.rs @@ -1,6 +1,6 @@ use super::{conv, AsNative, TimestampQuerySupport}; use crate::CommandEncoder as _; -use std::{borrow::Cow, mem, ops::Range}; +use std::{borrow::Cow, mem::size_of, ops::Range}; // has to match `Temp::binding_sizes` const WORD_SIZE: usize = 4; @@ -1083,7 +1083,7 @@ impl crate::CommandEncoder for super::CommandEncoder { let encoder = self.state.render.as_ref().unwrap(); for _ in 0..draw_count { encoder.draw_primitives_indirect(self.state.raw_primitive_type, &buffer.raw, offset); - offset += mem::size_of::() as wgt::BufferAddress; + offset += size_of::() as wgt::BufferAddress; } } @@ -1104,7 +1104,7 @@ impl crate::CommandEncoder for super::CommandEncoder { &buffer.raw, offset, ); - offset += mem::size_of::() as wgt::BufferAddress; + offset += size_of::() as wgt::BufferAddress; } } diff --git a/wgpu-hal/src/metal/device.rs b/wgpu-hal/src/metal/device.rs index 077c10f517..347a97a086 100644 --- a/wgpu-hal/src/metal/device.rs +++ b/wgpu-hal/src/metal/device.rs @@ -356,6 +356,10 @@ impl crate::Device for super::Device { self.counters.buffers.sub(1); } + unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) { + self.counters.buffers.add(1); + } + unsafe fn map_buffer( &self, buffer: &super::Buffer, @@ -436,6 +440,10 @@ impl crate::Device for super::Device { self.counters.textures.sub(1); } + unsafe fn add_raw_texture(&self, _texture: &super::Texture) { + self.counters.textures.add(1); + } + unsafe fn create_texture_view( &self, texture: &super::Texture, diff --git a/wgpu-hal/src/metal/mod.rs b/wgpu-hal/src/metal/mod.rs index 62d409a8ff..1935e843ec 100644 --- a/wgpu-hal/src/metal/mod.rs +++ b/wgpu-hal/src/metal/mod.rs @@ -96,9 +96,7 @@ crate::impl_dyn_resource!( TextureView ); -pub struct Instance { - managed_metal_layer_delegate: surface::HalManagedMetalLayerDelegate, -} +pub struct Instance {} impl Instance { pub fn create_surface_from_layer(&self, layer: &metal::MetalLayerRef) -> Surface { @@ -113,9 +111,7 @@ impl crate::Instance for Instance { profiling::scope!("Init Metal Backend"); // We do not enable metal validation based on the validation flags as it affects the entire // process. Instead, we enable the validation inside the test harness itself in tests/src/native.rs. - Ok(Instance { - managed_metal_layer_delegate: surface::HalManagedMetalLayerDelegate::new(), - }) + Ok(Instance {}) } unsafe fn create_surface( @@ -126,16 +122,12 @@ impl crate::Instance for Instance { match window_handle { #[cfg(target_os = "ios")] raw_window_handle::RawWindowHandle::UiKit(handle) => { - let _ = &self.managed_metal_layer_delegate; - Ok(unsafe { Surface::from_view(handle.ui_view.as_ptr(), None) }) + Ok(unsafe { Surface::from_view(handle.ui_view.cast()) }) } #[cfg(target_os = "macos")] - raw_window_handle::RawWindowHandle::AppKit(handle) => Ok(unsafe { - Surface::from_view( - handle.ns_view.as_ptr(), - Some(&self.managed_metal_layer_delegate), - ) - }), + raw_window_handle::RawWindowHandle::AppKit(handle) => { + Ok(unsafe { Surface::from_view(handle.ns_view.cast()) }) + } _ => Err(crate::InstanceError::new(format!( "window handle {window_handle:?} is not a Metal-compatible handle" ))), @@ -367,7 +359,6 @@ pub struct Device { } pub struct Surface { - view: Option>, render_layer: Mutex, swapchain_format: RwLock>, extent: RwLock, diff --git a/wgpu-hal/src/metal/surface.rs b/wgpu-hal/src/metal/surface.rs index 115e4208a5..668b602474 100644 --- a/wgpu-hal/src/metal/surface.rs +++ b/wgpu-hal/src/metal/surface.rs @@ -1,26 +1,30 @@ #![allow(clippy::let_unit_value)] // `let () =` being used to constrain result type -use std::{os::raw::c_void, ptr::NonNull, sync::Once, thread}; +use std::ffi::c_uint; +use std::mem::ManuallyDrop; +use std::ptr::NonNull; +use std::sync::Once; +use std::thread; use core_graphics_types::{ base::CGFloat, geometry::{CGRect, CGSize}, }; +use metal::foreign_types::ForeignType; use objc::{ class, declare::ClassDecl, msg_send, - rc::autoreleasepool, + rc::{autoreleasepool, StrongPtr}, runtime::{Class, Object, Sel, BOOL, NO, YES}, sel, sel_impl, }; use parking_lot::{Mutex, RwLock}; -#[cfg(target_os = "macos")] #[link(name = "QuartzCore", kind = "framework")] extern "C" { #[allow(non_upper_case_globals)] - static kCAGravityTopLeft: *mut Object; + static kCAGravityResize: *mut Object; } extern "C" fn layer_should_inherit_contents_scale_from_window( @@ -46,6 +50,7 @@ impl HalManagedMetalLayerDelegate { type Fun = extern "C" fn(&Class, Sel, *mut Object, CGFloat, *mut Object) -> BOOL; let mut decl = ClassDecl::new(&class_name, class!(NSObject)).unwrap(); unsafe { + // decl.add_class_method::( sel!(layer:shouldInheritContentsScale:fromWindow:), layer_should_inherit_contents_scale_from_window, @@ -58,9 +63,8 @@ impl HalManagedMetalLayerDelegate { } impl super::Surface { - fn new(view: Option>, layer: metal::MetalLayer) -> Self { + fn new(layer: metal::MetalLayer) -> Self { Self { - view, render_layer: Mutex::new(layer), swapchain_format: RwLock::new(None), extent: RwLock::new(wgt::Extent3d::default()), @@ -71,86 +75,183 @@ impl super::Surface { /// If not called on the main thread, this will panic. #[allow(clippy::transmute_ptr_to_ref)] - pub unsafe fn from_view( - view: *mut c_void, - delegate: Option<&HalManagedMetalLayerDelegate>, - ) -> Self { - let view = view.cast::(); - let render_layer = { - let layer = unsafe { Self::get_metal_layer(view, delegate) }; - let layer = layer.cast::(); - // SAFETY: This pointer… - // - // - …is properly aligned. - // - …is dereferenceable to a `MetalLayerRef` as an invariant of the `metal` - // field. - // - …points to an _initialized_ `MetalLayerRef`. - // - …is only ever aliased via an immutable reference that lives within this - // lexical scope. - unsafe { &*layer } - } - .to_owned(); - let _: *mut c_void = msg_send![view, retain]; - Self::new(NonNull::new(view), render_layer) + pub unsafe fn from_view(view: NonNull) -> Self { + let layer = unsafe { Self::get_metal_layer(view) }; + let layer = ManuallyDrop::new(layer); + // SAFETY: The layer is an initialized instance of `CAMetalLayer`, and + // we transfer the retain count to `MetalLayer` using `ManuallyDrop`. + let layer = unsafe { metal::MetalLayer::from_ptr(layer.cast()) }; + Self::new(layer) } pub unsafe fn from_layer(layer: &metal::MetalLayerRef) -> Self { let class = class!(CAMetalLayer); let proper_kind: BOOL = msg_send![layer, isKindOfClass: class]; assert_eq!(proper_kind, YES); - Self::new(None, layer.to_owned()) + Self::new(layer.to_owned()) } - /// If not called on the main thread, this will panic. - pub(crate) unsafe fn get_metal_layer( - view: *mut Object, - delegate: Option<&HalManagedMetalLayerDelegate>, - ) -> *mut Object { - if view.is_null() { - panic!("window does not have a valid contentView"); - } - + /// Get or create a new `CAMetalLayer` associated with the given `NSView` + /// or `UIView`. + /// + /// # Panics + /// + /// If called from a thread that is not the main thread, this will panic. + /// + /// # Safety + /// + /// The `view` must be a valid instance of `NSView` or `UIView`. + pub(crate) unsafe fn get_metal_layer(view: NonNull) -> StrongPtr { let is_main_thread: BOOL = msg_send![class!(NSThread), isMainThread]; if is_main_thread == NO { panic!("get_metal_layer cannot be called in non-ui thread."); } - let main_layer: *mut Object = msg_send![view, layer]; - let class = class!(CAMetalLayer); - let is_valid_layer: BOOL = msg_send![main_layer, isKindOfClass: class]; + // Ensure that the view is layer-backed. + // Views are always layer-backed in UIKit. + #[cfg(target_os = "macos")] + let () = msg_send![view.as_ptr(), setWantsLayer: YES]; - if is_valid_layer == YES { - main_layer + let root_layer: *mut Object = msg_send![view.as_ptr(), layer]; + // `-[NSView layer]` can return `NULL`, while `-[UIView layer]` should + // always be available. + assert!(!root_layer.is_null(), "failed making the view layer-backed"); + + // NOTE: We explicitly do not touch properties such as + // `layerContentsPlacement`, `needsDisplayOnBoundsChange` and + // `contentsGravity` etc. on the root layer, both since we would like + // to give the user full control over them, and because the default + // values suit us pretty well (especially the contents placement being + // `NSViewLayerContentsRedrawDuringViewResize`, which allows the view + // to receive `drawRect:`/`updateLayer` calls). + + let is_metal_layer: BOOL = msg_send![root_layer, isKindOfClass: class!(CAMetalLayer)]; + if is_metal_layer == YES { + // The view has a `CAMetalLayer` as the root layer, which can + // happen for example if user overwrote `-[NSView layerClass]` or + // the view is `MTKView`. + // + // This is easily handled: We take "ownership" over the layer, and + // render directly into that; after all, the user passed a view + // with an explicit Metal layer to us, so this is very likely what + // they expect us to do. + unsafe { StrongPtr::retain(root_layer) } } else { - // If the main layer is not a CAMetalLayer, we create a CAMetalLayer and use it. - let new_layer: *mut Object = msg_send![class, new]; - let frame: CGRect = msg_send![main_layer, bounds]; + // The view does not have a `CAMetalLayer` as the root layer (this + // is the default for most views). + // + // This case is trickier! We cannot use the existing layer with + // Metal, so we must do something else. There are a few options: + // + // 1. Panic here, and require the user to pass a view with a + // `CAMetalLayer` layer. + // + // While this would "work", it doesn't solve the problem, and + // instead passes the ball onwards to the user and ecosystem to + // figure it out. + // + // 2. Override the existing layer with a newly created layer. + // + // If we overlook that this does not work in UIKit since + // `UIView`'s `layer` is `readonly`, and that as such we will + // need to do something different there anyhow, this is + // actually a fairly good solution, and was what the original + // implementation did. + // + // It has some problems though, due to: + // + // a. `wgpu` in our API design choosing not to register a + // callback with `-[CALayerDelegate displayLayer:]`, but + // instead leaves it up to the user to figure out when to + // redraw. That is, we rely on other libraries' callbacks + // telling us when to render. + // + // (If this were an API only for Metal, we would probably + // make the user provide a `render` closure that we'd call + // in the right situations. But alas, we have to be + // cross-platform here). + // + // b. Overwriting the `layer` on `NSView` makes the view + // "layer-hosting", see [wantsLayer], which disables drawing + // functionality on the view like `drawRect:`/`updateLayer`. + // + // These two in combination makes it basically impossible for + // crates like Winit to provide a robust rendering callback + // that integrates with the system's built-in mechanisms for + // redrawing, exactly because overwriting the layer would be + // implicitly disabling those mechanisms! + // + // [wantsLayer]: https://developer.apple.com/documentation/appkit/nsview/1483695-wantslayer?language=objc + // + // 3. Create a sublayer. + // + // `CALayer` has the concept of "sublayers", which we can use + // instead of overriding the layer. + // + // This is also the recommended solution on UIKit, so it's nice + // that we can use (almost) the same implementation for these. + // + // It _might_, however, perform ever so slightly worse than + // overriding the layer directly. + // + // 4. Create a new `MTKView` (or a custom view), and add it as a + // subview. + // + // Similar to creating a sublayer (see above), but also + // provides a bunch of event handling that we don't need. + // + // Option 3 seems like the most robust solution, so this is what + // we're going to do. + + // Create a new sublayer. + let new_layer: *mut Object = msg_send![class!(CAMetalLayer), new]; + let () = msg_send![root_layer, addSublayer: new_layer]; + + // Automatically resize the sublayer's frame to match the + // superlayer's bounds. + // + // Note that there is a somewhat hidden design decision in this: + // We define the `width` and `height` in `configure` to control + // the `drawableSize` of the layer, while `bounds` and `frame` are + // outside of the user's direct control - instead, though, they + // can control the size of the view (or root layer), and get the + // desired effect that way. + // + // We _could_ also let `configure` set the `bounds` size, however + // that would be inconsistent with using the root layer directly + // (as we may do, see above). + let width_sizable = 1 << 1; // kCALayerWidthSizable + let height_sizable = 1 << 4; // kCALayerHeightSizable + let mask: c_uint = width_sizable | height_sizable; + let () = msg_send![new_layer, setAutoresizingMask: mask]; + + // Specify the relative size that the auto resizing mask above + // will keep (i.e. tell it to fill out its superlayer). + let frame: CGRect = msg_send![root_layer, bounds]; let () = msg_send![new_layer, setFrame: frame]; - #[cfg(target_os = "ios")] - { - // Unlike NSView, UIView does not allow to replace main layer. - let () = msg_send![main_layer, addSublayer: new_layer]; - // On iOS, "from_view" may be called before the application initialization is complete, - // `msg_send![view, window]` and `msg_send![window, screen]` will get null. - let screen: *mut Object = msg_send![class!(UIScreen), mainScreen]; - let scale_factor: CGFloat = msg_send![screen, nativeScale]; - let () = msg_send![view, setContentScaleFactor: scale_factor]; - }; - #[cfg(target_os = "macos")] - { - let () = msg_send![view, setLayer: new_layer]; - let () = msg_send![view, setWantsLayer: YES]; - let () = msg_send![new_layer, setContentsGravity: unsafe { kCAGravityTopLeft }]; - let window: *mut Object = msg_send![view, window]; - if !window.is_null() { - let scale_factor: CGFloat = msg_send![window, backingScaleFactor]; - let () = msg_send![new_layer, setContentsScale: scale_factor]; - } - }; - if let Some(delegate) = delegate { - let () = msg_send![new_layer, setDelegate: delegate.0]; - } - new_layer + + // The gravity to use when the layer's `drawableSize` isn't the + // same as the bounds rectangle. + // + // The desired content gravity is `kCAGravityResize`, because it + // masks / alleviates issues with resizing when + // `present_with_transaction` is disabled, and behaves better when + // moving the window between monitors. + // + // Unfortunately, it also makes it harder to see changes to + // `width` and `height` in `configure`. When debugging resize + // issues, swap this for `kCAGravityTopLeft` instead. + let _: () = msg_send![new_layer, setContentsGravity: unsafe { kCAGravityResize }]; + + // Set initial scale factor of the layer. This is kept in sync by + // `configure` (on UIKit), and the delegate below (on AppKit). + let scale_factor: CGFloat = msg_send![root_layer, contentsScale]; + let () = msg_send![new_layer, setContentsScale: scale_factor]; + + let delegate = HalManagedMetalLayerDelegate::new(); + let () = msg_send![new_layer, setDelegate: delegate.0]; + + unsafe { StrongPtr::new(new_layer) } } } @@ -171,16 +272,6 @@ impl super::Surface { } } -impl Drop for super::Surface { - fn drop(&mut self) { - if let Some(view) = self.view { - unsafe { - let () = msg_send![view.as_ptr(), release]; - } - } - } -} - impl crate::Surface for super::Surface { type A = super::Api; @@ -210,19 +301,30 @@ impl crate::Surface for super::Surface { _ => (), } - let device_raw = device.shared.device.lock(); - // On iOS, unless the user supplies a view with a CAMetalLayer, we - // create one as a sublayer. However, when the view changes size, - // its sublayers are not automatically resized, and we must resize - // it here. The drawable size and the layer size don't correlate - #[cfg(target_os = "ios")] + // AppKit / UIKit automatically sets the correct scale factor for + // layers attached to a view. Our layer, however, may not be directly + // attached to a view; in those cases, we need to set the scale + // factor ourselves. + // + // For AppKit, we do so by adding a delegate on the layer with the + // `layer:shouldInheritContentsScale:fromWindow:` method returning + // `true` - this tells the system to automatically update the scale + // factor when it changes. + // + // For UIKit, we manually update the scale factor from the super layer + // here, if there is one. + // + // TODO: Is there a way that we could listen to such changes instead? + #[cfg(not(target_os = "macos"))] { - if let Some(view) = self.view { - let main_layer: *mut Object = msg_send![view.as_ptr(), layer]; - let bounds: CGRect = msg_send![main_layer, bounds]; - let () = msg_send![*render_layer, setFrame: bounds]; + let superlayer: *mut Object = msg_send![render_layer.as_ptr(), superlayer]; + if !superlayer.is_null() { + let scale_factor: CGFloat = msg_send![superlayer, contentsScale]; + let () = msg_send![render_layer.as_ptr(), setContentsScale: scale_factor]; } } + + let device_raw = device.shared.device.lock(); render_layer.set_device(&device_raw); render_layer.set_pixel_format(caps.map_format(config.format)); render_layer.set_framebuffer_only(framebuffer_only); diff --git a/wgpu-hal/src/vulkan/adapter.rs b/wgpu-hal/src/vulkan/adapter.rs index f323456eaa..ab6ae02c6f 100644 --- a/wgpu-hal/src/vulkan/adapter.rs +++ b/wgpu-hal/src/vulkan/adapter.rs @@ -1,6 +1,6 @@ use super::conv; -use ash::{amd, ext, khr, vk}; +use ash::{amd, ext, google, khr, vk}; use parking_lot::Mutex; use std::{collections::BTreeMap, ffi::CStr, sync::Arc}; @@ -342,9 +342,6 @@ impl PhysicalDeviceFeatures { None }, robustness2: if enabled_extensions.contains(&ext::robustness2::NAME) { - // Note: enabling `robust_buffer_access2` isn't requires, strictly speaking - // since we can enable `robust_buffer_access` all the time. But it improves - // program portability, so we opt into it if they are supported. Some( vk::PhysicalDeviceRobustness2FeaturesEXT::default() .robust_buffer_access2(private_caps.robust_buffer_access2) @@ -771,6 +768,11 @@ impl PhysicalDeviceFeatures { ); } + features.set( + F::VULKAN_GOOGLE_DISPLAY_TIMING, + caps.supports_extension(google::display_timing::NAME), + ); + (features, dl_flags) } @@ -837,6 +839,10 @@ pub struct PhysicalDeviceProperties { /// `VK_EXT_subgroup_size_control` extension, promoted to Vulkan 1.3. subgroup_size_control: Option>, + /// Additional `vk::PhysicalDevice` properties from the + /// `VK_EXT_robustness2` extension. + robustness2: Option>, + /// The device API version. /// /// Which is the version of Vulkan supported for device-level functionality. @@ -1004,6 +1010,11 @@ impl PhysicalDeviceProperties { extensions.push(khr::shader_atomic_int64::NAME); } + // Require VK_GOOGLE_display_timing if the associated feature was requested + if requested_features.contains(wgt::Features::VULKAN_GOOGLE_DISPLAY_TIMING) { + extensions.push(google::display_timing::NAME); + } + extensions } @@ -1087,13 +1098,38 @@ impl PhysicalDeviceProperties { } } - fn to_hal_alignments(&self) -> crate::Alignments { + /// Return a `wgpu_hal::Alignments` structure describing this adapter. + /// + /// The `using_robustness2` argument says how this adapter will implement + /// `wgpu_hal`'s guarantee that shaders can only read the [accessible + /// region][ar] of bindgroup's buffer bindings: + /// + /// - If this adapter will depend on `VK_EXT_robustness2`'s + /// `robustBufferAccess2` feature to apply bounds checks to shader buffer + /// access, `using_robustness2` must be `true`. + /// + /// - Otherwise, this adapter must use Naga to inject bounds checks on + /// buffer accesses, and `using_robustness2` must be `false`. + /// + /// [ar]: ../../struct.BufferBinding.html#accessible-region + fn to_hal_alignments(&self, using_robustness2: bool) -> crate::Alignments { let limits = &self.properties.limits; crate::Alignments { buffer_copy_offset: wgt::BufferSize::new(limits.optimal_buffer_copy_offset_alignment) .unwrap(), buffer_copy_pitch: wgt::BufferSize::new(limits.optimal_buffer_copy_row_pitch_alignment) .unwrap(), + uniform_bounds_check_alignment: { + let alignment = if using_robustness2 { + self.robustness2 + .unwrap() // if we're using it, we should have its properties + .robust_uniform_buffer_access_size_alignment + } else { + // If the `robustness2` properties are unavailable, then `robustness2` is not available either Naga-injected bounds checks are precise. + 1 + }; + wgt::BufferSize::new(alignment).unwrap() + }, } } } @@ -1123,6 +1159,7 @@ impl super::InstanceShared { let supports_subgroup_size_control = capabilities.device_api_version >= vk::API_VERSION_1_3 || capabilities.supports_extension(ext::subgroup_size_control::NAME); + let supports_robustness2 = capabilities.supports_extension(ext::robustness2::NAME); let supports_acceleration_structure = capabilities.supports_extension(khr::acceleration_structure::NAME); @@ -1170,6 +1207,13 @@ impl super::InstanceShared { properties2 = properties2.push_next(next); } + if supports_robustness2 { + let next = capabilities + .robustness2 + .insert(vk::PhysicalDeviceRobustness2PropertiesEXT::default()); + properties2 = properties2.push_next(next); + } + unsafe { get_device_properties.get_physical_device_properties2(phd, &mut properties2) }; @@ -1181,6 +1225,7 @@ impl super::InstanceShared { capabilities .supported_extensions .retain(|&x| x.extension_name_as_c_str() != Ok(ext::robustness2::NAME)); + capabilities.robustness2 = None; } }; capabilities @@ -1497,7 +1542,7 @@ impl super::Instance { }; let capabilities = crate::Capabilities { limits: phd_capabilities.to_wgpu_limits(), - alignments: phd_capabilities.to_hal_alignments(), + alignments: phd_capabilities.to_hal_alignments(private_caps.robust_buffer_access2), downlevel: wgt::DownlevelCapabilities { flags: downlevel_flags, limits: wgt::DownlevelLimits {}, @@ -1593,11 +1638,13 @@ impl super::Adapter { /// - `raw_device` must be created from this adapter. /// - `raw_device` must be created using `family_index`, `enabled_extensions` and `physical_device_features()` /// - `enabled_extensions` must be a superset of `required_device_extensions()`. + /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of `raw_device`. If + /// `drop_callback` is [`Some`], `raw_device` must be valid until the callback is called. #[allow(clippy::too_many_arguments)] pub unsafe fn device_from_raw( &self, raw_device: ash::Device, - handle_is_owned: bool, + drop_callback: Option, enabled_extensions: &[&'static CStr], features: wgt::Features, memory_hints: &wgt::MemoryHints, @@ -1767,7 +1814,7 @@ impl super::Adapter { capabilities: Some(capabilities.iter().cloned().collect()), bounds_check_policies: naga::proc::BoundsCheckPolicies { index: naga::proc::BoundsCheckPolicy::Restrict, - buffer: if self.private_caps.robust_buffer_access { + buffer: if self.private_caps.robust_buffer_access2 { naga::proc::BoundsCheckPolicy::Unchecked } else { naga::proc::BoundsCheckPolicy::Restrict @@ -1812,12 +1859,14 @@ impl super::Adapter { 0, 0, 0, 0, ]; + let drop_guard = crate::DropGuard::from_option(drop_callback); + let shared = Arc::new(super::DeviceShared { raw: raw_device, family_index, queue_index, raw_queue, - handle_is_owned, + drop_guard, instance: Arc::clone(&self.instance), physical_device: self.raw, enabled_extensions: enabled_extensions.into(), @@ -1996,7 +2045,7 @@ impl crate::Adapter for super::Adapter { vk::Result::ERROR_TOO_MANY_OBJECTS => crate::DeviceError::OutOfMemory, vk::Result::ERROR_INITIALIZATION_FAILED => crate::DeviceError::Lost, vk::Result::ERROR_EXTENSION_NOT_PRESENT | vk::Result::ERROR_FEATURE_NOT_PRESENT => { - super::hal_usage_error(err) + crate::hal_usage_error(err) } other => super::map_host_device_oom_and_lost_err(other), } @@ -2005,7 +2054,7 @@ impl crate::Adapter for super::Adapter { unsafe { self.device_from_raw( raw_device, - true, + None, &enabled_extensions, features, memory_hints, diff --git a/wgpu-hal/src/vulkan/command.rs b/wgpu-hal/src/vulkan/command.rs index dd89f7dae2..6b02e35f4e 100644 --- a/wgpu-hal/src/vulkan/command.rs +++ b/wgpu-hal/src/vulkan/command.rs @@ -3,7 +3,11 @@ use super::conv; use arrayvec::ArrayVec; use ash::vk; -use std::{mem, ops::Range, slice}; +use std::{ + mem::{self, size_of}, + ops::Range, + slice, +}; const ALLOCATION_GRANULARITY: u32 = 16; const DST_IMAGE_LAYOUT: vk::ImageLayout = vk::ImageLayout::TRANSFER_DST_OPTIMAL; @@ -499,6 +503,9 @@ impl crate::CommandEncoder for super::CommandEncoder { for triangles in in_geometries { let mut triangle_data = vk::AccelerationStructureGeometryTrianglesDataKHR::default() + // IndexType::NONE_KHR is not set by default (due to being provided by VK_KHR_acceleration_structure) but unless there is an + // index buffer we need to have IndexType::NONE_KHR as our index type. + .index_type(vk::IndexType::NONE_KHR) .vertex_data(vk::DeviceOrHostAddressConstKHR { device_address: get_device_address(triangles.vertex_buffer), }) @@ -1012,7 +1019,7 @@ impl crate::CommandEncoder for super::CommandEncoder { buffer.raw, offset, draw_count, - mem::size_of::() as u32, + size_of::() as u32, ) }; } @@ -1028,7 +1035,7 @@ impl crate::CommandEncoder for super::CommandEncoder { buffer.raw, offset, draw_count, - mem::size_of::() as u32, + size_of::() as u32, ) }; } @@ -1040,7 +1047,7 @@ impl crate::CommandEncoder for super::CommandEncoder { count_offset: wgt::BufferAddress, max_count: u32, ) { - let stride = mem::size_of::() as u32; + let stride = size_of::() as u32; match self.device.extension_fns.draw_indirect_count { Some(ref t) => { unsafe { @@ -1066,7 +1073,7 @@ impl crate::CommandEncoder for super::CommandEncoder { count_offset: wgt::BufferAddress, max_count: u32, ) { - let stride = mem::size_of::() as u32; + let stride = size_of::() as u32; match self.device.extension_fns.draw_indirect_count { Some(ref t) => { unsafe { diff --git a/wgpu-hal/src/vulkan/conv.rs b/wgpu-hal/src/vulkan/conv.rs index 5576f34c00..f7cce466cb 100644 --- a/wgpu-hal/src/vulkan/conv.rs +++ b/wgpu-hal/src/vulkan/conv.rs @@ -36,7 +36,7 @@ impl super::PrivateCapabilities { Tf::Rgba8Sint => F::R8G8B8A8_SINT, Tf::Rgb10a2Uint => F::A2B10G10R10_UINT_PACK32, Tf::Rgb10a2Unorm => F::A2B10G10R10_UNORM_PACK32, - Tf::Rg11b10UFloat => F::B10G11R11_UFLOAT_PACK32, + Tf::Rg11b10Ufloat => F::B10G11R11_UFLOAT_PACK32, Tf::R64Uint => F::R64_UINT, Tf::Rg32Uint => F::R32G32_UINT, Tf::Rg32Sint => F::R32G32_SINT, diff --git a/wgpu-hal/src/vulkan/device.rs b/wgpu-hal/src/vulkan/device.rs index 70136bdfb5..6c3bfc5ed2 100644 --- a/wgpu-hal/src/vulkan/device.rs +++ b/wgpu-hal/src/vulkan/device.rs @@ -15,6 +15,13 @@ use std::{ }; impl super::DeviceShared { + /// Set the name of `object` to `name`. + /// + /// If `name` contains an interior null byte, then the name set will be truncated to that byte. + /// + /// # Safety + /// + /// It must be valid to set `object`'s debug name pub(super) unsafe fn set_object_name(&self, object: impl vk::Handle, name: &str) { let Some(extension) = self.extension_fns.debug_utils.as_ref() else { return; @@ -44,7 +51,7 @@ impl super::DeviceShared { &buffer_vec }; - let name = unsafe { CStr::from_bytes_with_nul_unchecked(name_bytes) }; + let name = CStr::from_bytes_until_nul(name_bytes).expect("We have added a null byte"); let _result = unsafe { extension.set_debug_utils_object_name( @@ -290,7 +297,7 @@ impl super::DeviceShared { for &raw in self.framebuffers.lock().values() { unsafe { self.raw.destroy_framebuffer(raw, None) }; } - if self.handle_is_owned { + if self.drop_guard.is_none() { unsafe { self.raw.destroy_device(None) }; } } @@ -642,19 +649,20 @@ impl super::Device { view_formats: wgt_view_formats, surface_semaphores, next_semaphore_index: 0, + next_present_time: None, }) } /// # Safety /// /// - `vk_image` must be created respecting `desc` - /// - If `drop_guard` is `Some`, the application must manually destroy the image handle. This - /// can be done inside the `Drop` impl of `drop_guard`. + /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of `vk_image`. If + /// `drop_callback` is [`Some`], `vk_image` must be valid until the callback is called. /// - If the `ImageCreateFlags` does not contain `MUTABLE_FORMAT`, the `view_formats` of `desc` must be empty. pub unsafe fn texture_from_raw( vk_image: vk::Image, desc: &crate::TextureDescriptor, - drop_guard: Option, + drop_callback: Option, ) -> super::Texture { let mut raw_flags = vk::ImageCreateFlags::empty(); let mut view_formats = vec![]; @@ -673,6 +681,8 @@ impl super::Device { raw_flags |= vk::ImageCreateFlags::MUTABLE_FORMAT; } + let drop_guard = crate::DropGuard::from_option(drop_callback); + super::Texture { raw: vk_image, drop_guard, @@ -759,6 +769,7 @@ impl super::Device { temp_options.debug_info = Some(naga::back::spv::DebugInfo { source_code: &debug.source_code, file_name: debug.file_name.as_ref().as_ref(), + language: naga::back::spv::SourceLanguage::WGSL, }) } if !stage.zero_initialize_workgroup_memory { @@ -953,6 +964,10 @@ impl crate::Device for super::Device { self.counters.buffers.sub(1); } + unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) { + self.counters.buffers.add(1); + } + unsafe fn map_buffer( &self, buffer: &super::Buffer, @@ -967,14 +982,14 @@ impl crate::Device for super::Device { .contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT); Ok(crate::BufferMapping { ptr, is_coherent }) } else { - super::hal_usage_error("tried to map external buffer") + crate::hal_usage_error("tried to map external buffer") } } unsafe fn unmap_buffer(&self, buffer: &super::Buffer) { if let Some(ref block) = buffer.block { unsafe { block.lock().unmap(&*self.shared) }; } else { - super::hal_usage_error("tried to unmap external buffer") + crate::hal_usage_error("tried to unmap external buffer") } } @@ -1124,6 +1139,10 @@ impl crate::Device for super::Device { self.counters.textures.sub(1); } + unsafe fn add_raw_texture(&self, _texture: &super::Texture) { + self.counters.textures.add(1); + } + unsafe fn create_texture_view( &self, texture: &super::Texture, @@ -1724,6 +1743,7 @@ impl crate::Device for super::Device { .map(|d| naga::back::spv::DebugInfo { source_code: d.source_code.as_ref(), file_name: d.file_name.as_ref().as_ref(), + language: naga::back::spv::SourceLanguage::WGSL, }); if !desc.runtime_checks { naga_options.bounds_check_policies = naga::proc::BoundsCheckPolicies { @@ -2517,7 +2537,7 @@ impl super::DeviceShared { } } None => { - super::hal_usage_error(format!( + crate::hal_usage_error(format!( "no signals reached value {}", wait_value )); @@ -2534,7 +2554,7 @@ impl From for crate::DeviceError { use gpu_alloc::AllocationError as Ae; match error { Ae::OutOfDeviceMemory | Ae::OutOfHostMemory | Ae::TooManyObjects => Self::OutOfMemory, - Ae::NoCompatibleMemoryTypes => super::hal_usage_error(error), + Ae::NoCompatibleMemoryTypes => crate::hal_usage_error(error), } } } @@ -2543,7 +2563,7 @@ impl From for crate::DeviceError { use gpu_alloc::MapError as Me; match error { Me::OutOfDeviceMemory | Me::OutOfHostMemory | Me::MapFailed => Self::OutOfMemory, - Me::NonHostVisible | Me::AlreadyMapped => super::hal_usage_error(error), + Me::NonHostVisible | Me::AlreadyMapped => crate::hal_usage_error(error), } } } diff --git a/wgpu-hal/src/vulkan/instance.rs b/wgpu-hal/src/vulkan/instance.rs index 832c74b030..5673859e45 100644 --- a/wgpu-hal/src/vulkan/instance.rs +++ b/wgpu-hal/src/vulkan/instance.rs @@ -310,6 +310,8 @@ impl super::Instance { /// - `extensions` must be a superset of `desired_extensions()` and must be created from the /// same entry, `instance_api_version`` and flags. /// - `android_sdk_version` is ignored and can be `0` for all platforms besides Android + /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of `raw_instance`. If + /// `drop_callback` is [`Some`], `raw_instance` must be valid until the callback is called. /// /// If `debug_utils_user_data` is `Some`, then the validation layer is /// available, so create a [`vk::DebugUtilsMessengerEXT`]. @@ -323,7 +325,7 @@ impl super::Instance { extensions: Vec<&'static CStr>, flags: wgt::InstanceFlags, has_nv_optimus: bool, - drop_guard: Option, + drop_callback: Option, ) -> Result { log::debug!("Instance version: 0x{:x}", instance_api_version); @@ -364,6 +366,8 @@ impl super::Instance { None }; + let drop_guard = crate::DropGuard::from_option(drop_callback); + Ok(Self { shared: Arc::new(super::InstanceShared { raw: raw_instance, @@ -510,7 +514,7 @@ impl super::Instance { #[cfg(metal)] fn create_surface_from_view( &self, - view: *mut c_void, + view: std::ptr::NonNull, ) -> Result { if !self.shared.extensions.contains(&ext::metal_surface::NAME) { return Err(crate::InstanceError::new(String::from( @@ -518,16 +522,17 @@ impl super::Instance { ))); } - let layer = unsafe { - crate::metal::Surface::get_metal_layer(view.cast::(), None) - }; + let layer = unsafe { crate::metal::Surface::get_metal_layer(view.cast()) }; + // NOTE: The layer is retained by Vulkan's `vkCreateMetalSurfaceEXT`, + // so no need to retain it beyond the scope of this function. + let layer_ptr = (*layer).cast(); let surface = { let metal_loader = ext::metal_surface::Instance::new(&self.shared.entry, &self.shared.raw); let vk_info = vk::MetalSurfaceCreateInfoEXT::default() .flags(vk::MetalSurfaceCreateFlagsEXT::empty()) - .layer(layer.cast()); + .layer(layer_ptr); unsafe { metal_loader.create_metal_surface(&vk_info, None).unwrap() } }; @@ -550,12 +555,11 @@ impl Drop for super::InstanceShared { fn drop(&mut self) { unsafe { // Keep du alive since destroy_instance may also log - let _du = self.debug_utils.take().map(|du| { + let _du = self.debug_utils.take().inspect(|du| { du.extension .destroy_debug_utils_messenger(du.messenger, None); - du }); - if let Some(_drop_guard) = self.drop_guard.take() { + if self.drop_guard.is_none() { self.raw.destroy_instance(None); } } @@ -829,7 +833,7 @@ impl crate::Instance for super::Instance { extensions, desc.flags, has_nv_optimus, - Some(Box::new(())), // `Some` signals that wgpu-hal is in charge of destroying vk_instance + None, ) } } @@ -870,13 +874,13 @@ impl crate::Instance for super::Instance { (Rwh::AppKit(handle), _) if self.shared.extensions.contains(&ext::metal_surface::NAME) => { - self.create_surface_from_view(handle.ns_view.as_ptr()) + self.create_surface_from_view(handle.ns_view) } #[cfg(all(target_os = "ios", feature = "metal"))] (Rwh::UiKit(handle), _) if self.shared.extensions.contains(&ext::metal_surface::NAME) => { - self.create_surface_from_view(handle.ui_view.as_ptr()) + self.create_surface_from_view(handle.ui_view) } (_, _) => Err(crate::InstanceError::new(format!( "window handle {window_handle:?} is not a Vulkan-compatible handle" diff --git a/wgpu-hal/src/vulkan/mod.rs b/wgpu-hal/src/vulkan/mod.rs index 26186d5fa8..843b836f46 100644 --- a/wgpu-hal/src/vulkan/mod.rs +++ b/wgpu-hal/src/vulkan/mod.rs @@ -355,6 +355,13 @@ struct Swapchain { /// index as the image index, but we need to specify the semaphore as an argument /// to the acquire_next_image function which is what tells us which image to use. next_semaphore_index: usize, + /// The present timing information which will be set in the next call to [`present()`](crate::Queue::present()). + /// + /// # Safety + /// + /// This must only be set if [`wgt::Features::VULKAN_GOOGLE_DISPLAY_TIMING`] is enabled, and + /// so the VK_GOOGLE_display_timing extension is present. + next_present_time: Option, } impl Swapchain { @@ -375,6 +382,47 @@ pub struct Surface { swapchain: RwLock>, } +impl Surface { + /// Get the raw Vulkan swapchain associated with this surface. + /// + /// Returns [`None`] if the surface is not configured. + pub fn raw_swapchain(&self) -> Option { + let read = self.swapchain.read(); + read.as_ref().map(|it| it.raw) + } + + /// Set the present timing information which will be used for the next [presentation](crate::Queue::present()) of this surface, + /// using [VK_GOOGLE_display_timing]. + /// + /// This can be used to give an id to presentations, for future use of [`vk::PastPresentationTimingGOOGLE`]. + /// Note that `wgpu-hal` does *not* provide a way to use that API - you should manually access this through [`ash`]. + /// + /// This can also be used to add a "not before" timestamp to the presentation. + /// + /// The exact semantics of the fields are also documented in the [specification](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkPresentTimeGOOGLE.html) for the extension. + /// + /// # Panics + /// + /// - If the surface hasn't been configured. + /// - If the device doesn't [support present timing](wgt::Features::VULKAN_GOOGLE_DISPLAY_TIMING). + /// + /// [VK_GOOGLE_display_timing]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_GOOGLE_display_timing.html + #[track_caller] + pub fn set_next_present_time(&self, present_timing: vk::PresentTimeGOOGLE) { + let mut swapchain = self.swapchain.write(); + let swapchain = swapchain + .as_mut() + .expect("Surface should have been configured"); + let features = wgt::Features::VULKAN_GOOGLE_DISPLAY_TIMING; + if swapchain.device.features.contains(features) { + swapchain.next_present_time = Some(present_timing); + } else { + // Ideally we'd use something like `device.required_features` here, but that's in `wgpu-core`, which we are a dependency of + panic!("Tried to set display timing properties without the corresponding feature ({features:?}) enabled."); + } + } +} + #[derive(Debug)] pub struct SurfaceTexture { index: u32, @@ -445,9 +493,36 @@ struct PrivateCapabilities { /// Ability to present contents to any screen. Only needed to work around broken platform configurations. can_present: bool, non_coherent_map_mask: wgt::BufferAddress, + + /// True if this adapter advertises the [`robustBufferAccess`][vrba] feature. + /// + /// Note that Vulkan's `robustBufferAccess` is not sufficient to implement + /// `wgpu_hal`'s guarantee that shaders will not access buffer contents via + /// a given bindgroup binding outside that binding's [accessible + /// region][ar]. Enabling `robustBufferAccess` does ensure that + /// out-of-bounds reads and writes are not undefined behavior (that's good), + /// but still permits out-of-bounds reads to return data from anywhere + /// within the buffer, not just the accessible region. + /// + /// [ar]: ../struct.BufferBinding.html#accessible-region + /// [vrba]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#features-robustBufferAccess robust_buffer_access: bool, + robust_image_access: bool, + + /// True if this adapter supports the [`VK_EXT_robustness2`] extension's + /// [`robustBufferAccess2`] feature. + /// + /// This is sufficient to implement `wgpu_hal`'s [required bounds-checking][ar] of + /// shader accesses to buffer contents. If this feature is not available, + /// this backend must have Naga inject bounds checks in the generated + /// SPIR-V. + /// + /// [`VK_EXT_robustness2`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_robustness2.html + /// [`robustBufferAccess2`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkPhysicalDeviceRobustness2FeaturesEXT.html#features-robustBufferAccess2 + /// [ar]: ../struct.BufferBinding.html#accessible-region robust_buffer_access2: bool, + robust_image_access2: bool, zero_initialize_workgroup_memory: bool, image_format_list: bool, @@ -547,7 +622,7 @@ struct DeviceShared { family_index: u32, queue_index: u32, raw_queue: vk::Queue, - handle_is_owned: bool, + drop_guard: Option, instance: Arc, physical_device: vk::PhysicalDevice, enabled_extensions: Vec<&'static CStr>, @@ -1158,6 +1233,23 @@ impl crate::Queue for Queue { .image_indices(&image_indices) .wait_semaphores(swapchain_semaphores.get_present_wait_semaphores()); + let mut display_timing; + let present_times; + let vk_info = if let Some(present_time) = ssc.next_present_time.take() { + debug_assert!( + ssc.device + .features + .contains(wgt::Features::VULKAN_GOOGLE_DISPLAY_TIMING), + "`next_present_time` should only be set if `VULKAN_GOOGLE_DISPLAY_TIMING` is enabled" + ); + present_times = [present_time]; + display_timing = vk::PresentTimesInfoGOOGLE::default().times(&present_times); + // SAFETY: We know that VK_GOOGLE_display_timing is present because of the safety contract on `next_present_time`. + vk_info.push_next(&mut display_timing) + } else { + vk_info + }; + let suboptimal = { profiling::scope!("vkQueuePresentKHR"); unsafe { self.swapchain_fn.queue_present(self.raw, &vk_info) }.map_err(|error| { @@ -1295,8 +1387,3 @@ fn get_lost_err() -> crate::DeviceError { #[allow(unreachable_code)] crate::DeviceError::Lost } - -#[cold] -fn hal_usage_error(txt: T) -> ! { - panic!("wgpu-hal invariant was violated (usage error): {txt}") -} diff --git a/wgpu-info/src/texture.rs b/wgpu-info/src/texture.rs index ea4cb0ea20..64325f0e5b 100644 --- a/wgpu-info/src/texture.rs +++ b/wgpu-info/src/texture.rs @@ -32,7 +32,7 @@ pub const TEXTURE_FORMAT_LIST: [wgpu::TextureFormat; 117] = [ wgpu::TextureFormat::Rgb9e5Ufloat, wgpu::TextureFormat::Rgb10a2Uint, wgpu::TextureFormat::Rgb10a2Unorm, - wgpu::TextureFormat::Rg11b10UFloat, + wgpu::TextureFormat::Rg11b10Ufloat, wgpu::TextureFormat::R64Uint, wgpu::TextureFormat::Rg32Uint, wgpu::TextureFormat::Rg32Sint, diff --git a/wgpu-macros/src/lib.rs b/wgpu-macros/src/lib.rs index 0b0812507f..19eea678a1 100644 --- a/wgpu-macros/src/lib.rs +++ b/wgpu-macros/src/lib.rs @@ -37,7 +37,7 @@ pub fn gpu_test(_attr: TokenStream, item: TokenStream) -> TokenStream { // Allow any type that can be converted to a GpuTestConfiguration let test_config = ::wgpu_test::GpuTestConfiguration::from(#expr).name_from_init_function_typename::(#ident_lower); - ::wgpu_test::execute_test(test_config, None, 0).await; + ::wgpu_test::execute_test(None, test_config, None).await; } } .into() diff --git a/wgpu-types/Cargo.toml b/wgpu-types/Cargo.toml index e79b301342..38bda98bc2 100644 --- a/wgpu-types/Cargo.toml +++ b/wgpu-types/Cargo.toml @@ -43,9 +43,11 @@ js-sys.workspace = true web-sys = { workspace = true, features = [ "ImageBitmap", "ImageData", + "HtmlImageElement", "HtmlVideoElement", "HtmlCanvasElement", "OffscreenCanvas", + "VideoFrame", ] } [dev-dependencies] diff --git a/wgpu-types/src/lib.rs b/wgpu-types/src/lib.rs index 1fe12d3349..545dbe8b2a 100644 --- a/wgpu-types/src/lib.rs +++ b/wgpu-types/src/lib.rs @@ -13,6 +13,7 @@ use serde::Deserialize; #[cfg(any(feature = "serde", test))] use serde::Serialize; use std::hash::{Hash, Hasher}; +use std::mem::size_of; use std::path::PathBuf; use std::{num::NonZeroU32, ops::Range}; @@ -401,7 +402,7 @@ bitflags::bitflags! { const SHADER_F16 = 1 << 8; - /// Allows for usage of textures of format [`TextureFormat::Rg11b10UFloat`] as a render target + /// Allows for usage of textures of format [`TextureFormat::Rg11b10Ufloat`] as a render target /// /// Supported platforms: /// - Vulkan @@ -567,7 +568,7 @@ bitflags::bitflags! { /// may also create uniform arrays of storage textures. /// /// ex. - /// - `var textures: array, 10>` (WGSL) + /// - `var textures: array, 10>` (WGSL) /// - `uniform image2D textures[10]` (GLSL) /// /// This capability allows them to exist and to be indexed by dynamically uniform @@ -952,6 +953,22 @@ bitflags::bitflags! { /// /// This is a native only feature. const SHADER_INT64_ATOMIC_ALL_OPS = 1 << 61; + /// Allows using the [VK_GOOGLE_display_timing] Vulkan extension. + /// + /// This is used for frame pacing to reduce latency, and is generally only available on Android. + /// + /// This feature does not have a `wgpu`-level API, and so users of wgpu wishing + /// to use this functionality must access it using various `as_hal` functions, + /// primarily [`Surface::as_hal()`], to then use. + /// + /// Supported platforms: + /// - Vulkan (with [VK_GOOGLE_display_timing]) + /// + /// This is a native only feature. + /// + /// [VK_GOOGLE_display_timing]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_GOOGLE_display_timing.html + /// [`Surface::as_hal()`]: https://docs.rs/wgpu/latest/wgpu/struct.Surface.html#method.as_hal + const VULKAN_GOOGLE_DISPLAY_TIMING = 1 << 62; /// Enables R64Uint texture atomic min and max. /// /// Supported platforms: @@ -960,7 +977,7 @@ bitflags::bitflags! { /// - Metal (with MSL 3.1+ emulated via RG32Uint texture) /// /// This is a native only feature. - const TEXTURE_INT64_ATOMIC = 1 << 62; + const TEXTURE_INT64_ATOMIC = 1 << 63; } } @@ -1951,12 +1968,13 @@ impl TextureViewDimension { /// Alpha blend factor. /// -/// Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information. -/// /// Corresponds to [WebGPU `GPUBlendFactor`]( -/// https://gpuweb.github.io/gpuweb/#enumdef-gpublendfactor). -/// Values using S1 requires [`Features::DUAL_SOURCE_BLENDING`] and can only be -/// used with the first render target. +/// https://gpuweb.github.io/gpuweb/#enumdef-gpublendfactor). Values using `Src1` +/// require [`Features::DUAL_SOURCE_BLENDING`] and can only be used with the first +/// render target. +/// +/// For further details on how the blend factors are applied, see the analogous +/// functionality in OpenGL: . #[repr(C)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -2016,10 +2034,11 @@ impl BlendFactor { /// Alpha blend operation. /// -/// Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information. -/// /// Corresponds to [WebGPU `GPUBlendOperation`]( /// https://gpuweb.github.io/gpuweb/#enumdef-gpublendoperation). +/// +/// For further details on how the blend operations are applied, see +/// the analogous functionality in OpenGL: . #[repr(C)] #[derive(Copy, Clone, Debug, Default, Hash, Eq, PartialEq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -2094,8 +2113,6 @@ impl Default for BlendComponent { /// Describe the blend state of a render pipeline, /// within [`ColorTargetState`]. /// -/// See the OpenGL or Vulkan spec for more information. -/// /// Corresponds to [WebGPU `GPUBlendState`]( /// https://gpuweb.github.io/gpuweb/#dictdef-gpublendstate). #[repr(C)] @@ -2546,7 +2563,7 @@ pub enum TextureFormat { /// Red, green, blue, and alpha channels. 10 bit integer for RGB channels, 2 bit integer for alpha channel. [0, 1023] ([0, 3] for alpha) converted to/from float [0, 1] in shader. Rgb10a2Unorm, /// Red, green, and blue channels. 11 bit float with no sign bit for RG channels. 10 bit float with no sign bit for blue channel. Float in shader. - Rg11b10UFloat, + Rg11b10Ufloat, // Normal 64 bit formats /// Red channel only. 64 bit integer per channel. Unsigned in shader. @@ -2838,7 +2855,7 @@ impl<'de> Deserialize<'de> for TextureFormat { "bgra8unorm-srgb" => TextureFormat::Bgra8UnormSrgb, "rgb10a2uint" => TextureFormat::Rgb10a2Uint, "rgb10a2unorm" => TextureFormat::Rgb10a2Unorm, - "rg11b10ufloat" => TextureFormat::Rg11b10UFloat, + "rg11b10ufloat" => TextureFormat::Rg11b10Ufloat, "r64uint" => TextureFormat::R64Uint, "rg32uint" => TextureFormat::Rg32Uint, "rg32sint" => TextureFormat::Rg32Sint, @@ -2967,7 +2984,7 @@ impl Serialize for TextureFormat { TextureFormat::Bgra8UnormSrgb => "bgra8unorm-srgb", TextureFormat::Rgb10a2Uint => "rgb10a2uint", TextureFormat::Rgb10a2Unorm => "rgb10a2unorm", - TextureFormat::Rg11b10UFloat => "rg11b10ufloat", + TextureFormat::Rg11b10Ufloat => "rg11b10ufloat", TextureFormat::R64Uint => "r64uint", TextureFormat::Rg32Uint => "rg32uint", TextureFormat::Rg32Sint => "rg32sint", @@ -3210,7 +3227,7 @@ impl TextureFormat { | Self::Rgb9e5Ufloat | Self::Rgb10a2Uint | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat + | Self::Rg11b10Ufloat | Self::R64Uint | Self::Rg32Uint | Self::Rg32Sint @@ -3319,7 +3336,7 @@ impl TextureFormat { | Self::Rgb9e5Ufloat | Self::Rgb10a2Uint | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat + | Self::Rg11b10Ufloat | Self::Rg32Uint | Self::Rg32Sint | Self::Rg32Float @@ -3440,7 +3457,7 @@ impl TextureFormat { Self::Bgra8UnormSrgb => (msaa_resolve, attachment), Self::Rgb10a2Uint => ( msaa, attachment), Self::Rgb10a2Unorm => (msaa_resolve, attachment), - Self::Rg11b10UFloat => ( msaa, rg11b10f), + Self::Rg11b10Ufloat => ( msaa, rg11b10f), Self::R64Uint => ( noaa, attachment), Self::Rg32Uint => ( noaa, all_flags), Self::Rg32Sint => ( noaa, all_flags), @@ -3552,7 +3569,7 @@ impl TextureFormat { | Self::Rg16Float | Self::Rgba16Float | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat => Some(float), + | Self::Rg11b10Ufloat => Some(float), Self::R32Float | Self::Rg32Float | Self::Rgba32Float => Some(float32_sample_type), @@ -3685,7 +3702,7 @@ impl TextureFormat { | Self::Rg16Sint | Self::Rg16Float => Some(4), Self::R32Uint | Self::R32Sint | Self::R32Float => Some(4), - Self::Rgb9e5Ufloat | Self::Rgb10a2Uint | Self::Rgb10a2Unorm | Self::Rg11b10UFloat => { + Self::Rgb9e5Ufloat | Self::Rgb10a2Uint | Self::Rgb10a2Unorm | Self::Rg11b10Ufloat => { Some(4) } @@ -3789,7 +3806,7 @@ impl TextureFormat { | Self::Rg32Float | Self::Rgb10a2Uint | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat => Some(8), + | Self::Rg11b10Ufloat => Some(8), Self::Rgba32Uint | Self::Rgba32Sint | Self::Rgba32Float => Some(16), Self::Stencil8 | Self::Depth16Unorm @@ -3873,7 +3890,7 @@ impl TextureFormat { | Self::Rgba32Float | Self::Rgb10a2Uint | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat => Some(4), + | Self::Rg11b10Ufloat => Some(4), Self::Stencil8 | Self::Depth16Unorm | Self::Depth24Plus @@ -3965,7 +3982,7 @@ impl TextureFormat { | Self::Rgba32Sint | Self::Rgba32Float => 4, - Self::Rgb9e5Ufloat | Self::Rg11b10UFloat => 3, + Self::Rgb9e5Ufloat | Self::Rg11b10Ufloat => 3, Self::Rgb10a2Uint | Self::Rgb10a2Unorm => 4, Self::Stencil8 | Self::Depth16Unorm | Self::Depth24Plus | Self::Depth32Float => 1, @@ -4183,7 +4200,7 @@ fn texture_format_serialize() { "\"rgb10a2unorm\"".to_string() ); assert_eq!( - serde_json::to_string(&TextureFormat::Rg11b10UFloat).unwrap(), + serde_json::to_string(&TextureFormat::Rg11b10Ufloat).unwrap(), "\"rg11b10ufloat\"".to_string() ); assert_eq!( @@ -4484,7 +4501,7 @@ fn texture_format_deserialize() { ); assert_eq!( serde_json::from_str::("\"rg11b10ufloat\"").unwrap(), - TextureFormat::Rg11b10UFloat + TextureFormat::Rg11b10Ufloat ); assert_eq!( serde_json::from_str::("\"r64uint\"").unwrap(), @@ -5561,8 +5578,18 @@ pub struct SurfaceConfiguration { /// `Bgra8Unorm` and `Bgra8UnormSrgb` pub format: TextureFormat, /// Width of the swap chain. Must be the same size as the surface, and nonzero. + /// + /// If this is not the same size as the underlying surface (e.g. if it is + /// set once, and the window is later resized), the behaviour is defined + /// but platform-specific, and may change in the future (currently macOS + /// scales the surface, other platforms may do something else). pub width: u32, /// Height of the swap chain. Must be the same size as the surface, and nonzero. + /// + /// If this is not the same size as the underlying surface (e.g. if it is + /// set once, and the window is later resized), the behaviour is defined + /// but platform-specific, and may change in the future (currently macOS + /// scales the surface, other platforms may do something else). pub height: u32, /// Presentation mode of the swap chain. Fifo is the only mode guaranteed to be supported. /// FifoRelaxed, Immediate, and Mailbox will crash if unsupported, while AutoVsync and @@ -6547,7 +6574,7 @@ pub enum StorageTextureAccess { /// Example WGSL syntax: /// ```rust,ignore /// @group(0) @binding(0) - /// var my_storage_image: texture_storage_2d; + /// var my_storage_image: texture_storage_2d; /// ``` /// /// Example GLSL syntax: @@ -6564,7 +6591,7 @@ pub enum StorageTextureAccess { /// Example WGSL syntax: /// ```rust,ignore /// @group(0) @binding(0) - /// var my_storage_image: texture_storage_2d; + /// var my_storage_image: texture_storage_2d; /// ``` /// /// Example GLSL syntax: @@ -6581,7 +6608,7 @@ pub enum StorageTextureAccess { /// Example WGSL syntax: /// ```rust,ignore /// @group(0) @binding(0) - /// var my_storage_image: texture_storage_2d; + /// var my_storage_image: texture_storage_2d; /// ``` /// /// Example GLSL syntax: @@ -6705,8 +6732,8 @@ pub enum BindingType { /// Dimension of the texture view that is going to be sampled. view_dimension: TextureViewDimension, /// True if the texture has a sample count greater than 1. If this is true, - /// the texture must be read from shaders with `texture1DMS`, `texture2DMS`, or `texture3DMS`, - /// depending on `dimension`. + /// the texture must be declared as `texture_multisampled_2d` or + /// `texture_depth_multisampled_2d` in the shader, and read using `textureLoad`. multisampled: bool, }, /// A storage texture. @@ -6714,15 +6741,16 @@ pub enum BindingType { /// Example WGSL syntax: /// ```rust,ignore /// @group(0) @binding(0) - /// var my_storage_image: texture_storage_2d; + /// var my_storage_image: texture_storage_2d; /// ``` /// /// Example GLSL syntax: /// ```cpp,ignore /// layout(set=0, binding=0, r32f) writeonly uniform image2D myStorageImage; /// ``` - /// Note that the texture format must be specified in the shader as well. - /// A list of valid formats can be found in the specification here: + /// Note that the texture format must be specified in the shader, along with the + /// access mode. For WGSL, the format must be one of the enumerants in the list + /// of [storage texel formats](https://gpuweb.github.io/gpuweb/wgsl/#storage-texel-formats). /// /// Corresponds to [WebGPU `GPUStorageTextureBindingLayout`]( /// https://gpuweb.github.io/gpuweb/#dictdef-gpustoragetexturebindinglayout). @@ -6886,6 +6914,9 @@ pub enum ExternalImageSource { /// /// Requires [`DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES`] OffscreenCanvas(web_sys::OffscreenCanvas), + /// Copy from a video frame. + #[cfg(web_sys_unstable_apis)] + VideoFrame(web_sys::VideoFrame), } #[cfg(target_arch = "wasm32")] @@ -6899,6 +6930,8 @@ impl ExternalImageSource { ExternalImageSource::ImageData(i) => i.width(), ExternalImageSource::HTMLCanvasElement(c) => c.width(), ExternalImageSource::OffscreenCanvas(c) => c.width(), + #[cfg(web_sys_unstable_apis)] + ExternalImageSource::VideoFrame(v) => v.display_width(), } } @@ -6911,6 +6944,8 @@ impl ExternalImageSource { ExternalImageSource::ImageData(i) => i.height(), ExternalImageSource::HTMLCanvasElement(c) => c.height(), ExternalImageSource::OffscreenCanvas(c) => c.height(), + #[cfg(web_sys_unstable_apis)] + ExternalImageSource::VideoFrame(v) => v.display_height(), } } } @@ -6927,6 +6962,8 @@ impl std::ops::Deref for ExternalImageSource { Self::ImageData(i) => i, Self::HTMLCanvasElement(c) => c, Self::OffscreenCanvas(c) => c, + #[cfg(web_sys_unstable_apis)] + Self::VideoFrame(v) => v, } } } @@ -7234,7 +7271,7 @@ impl DrawIndirectArgs { unsafe { std::mem::transmute(std::slice::from_raw_parts( std::ptr::from_ref(self).cast::(), - std::mem::size_of::(), + size_of::(), )) } } @@ -7265,7 +7302,7 @@ impl DrawIndexedIndirectArgs { unsafe { std::mem::transmute(std::slice::from_raw_parts( std::ptr::from_ref(self).cast::(), - std::mem::size_of::(), + size_of::(), )) } } @@ -7290,7 +7327,7 @@ impl DispatchIndirectArgs { unsafe { std::mem::transmute(std::slice::from_raw_parts( std::ptr::from_ref(self).cast::(), - std::mem::size_of::(), + size_of::(), )) } } @@ -7315,8 +7352,15 @@ impl ShaderBoundChecks { /// Creates a new configuration where the shader isn't bound checked. /// /// # Safety - /// The caller MUST ensure that all shaders built with this configuration don't perform any - /// out of bounds reads or writes. + /// + /// The caller MUST ensure that all shaders built with this configuration + /// don't perform any out of bounds reads or writes. + /// + /// Note that `wgpu_core`, in particular, initializes only those portions of + /// buffers that it expects might be read, and it does not expect contents + /// outside the ranges bound in bindgroups to be accessible, so using this + /// configuration with ill-behaved shaders could expose uninitialized GPU + /// memory contents to the application. #[must_use] pub unsafe fn unchecked() -> Self { ShaderBoundChecks { @@ -7339,10 +7383,6 @@ impl Default for ShaderBoundChecks { /// Selects which DX12 shader compiler to use. /// -/// If the `wgpu-hal/dx12-shader-compiler` feature isn't enabled then this will fall back -/// to the Fxc compiler at runtime and log an error. -/// This feature is always enabled when using `wgpu`. -/// /// If the `Dxc` option is selected, but `dxcompiler.dll` and `dxil.dll` files aren't found, /// then this will fall back to the Fxc compiler at runtime and log an error. /// @@ -7359,6 +7399,10 @@ pub enum Dx12Compiler { /// /// However, it requires both `dxcompiler.dll` and `dxil.dll` to be shipped with the application. /// These files can be downloaded from . + /// + /// Minimum supported version: [v1.5.2010](https://github.com/microsoft/DirectXShaderCompiler/releases/tag/v1.5.2010) + /// + /// It also requires WDDM 2.1 (Windows 10 version 1607). Dxc { /// Path to the `dxil.dll` file, or path to the directory containing `dxil.dll` file. Passing `None` will use standard platform specific dll loading rules. dxil_path: Option, @@ -7538,10 +7582,4 @@ pub enum DeviceLostReason { /// exactly once before it is dropped, which helps with managing the /// memory owned by the callback. ReplacedCallback = 3, - /// When setting the callback, but the device is already invalid - /// - /// As above, when the callback is provided, wgpu guarantees that it - /// will eventually be called. If the device is already invalid, wgpu - /// will call the callback immediately, with this reason. - DeviceInvalid = 4, } diff --git a/wgpu/Cargo.toml b/wgpu/Cargo.toml index 2512840a64..9569281eec 100644 --- a/wgpu/Cargo.toml +++ b/wgpu/Cargo.toml @@ -161,10 +161,7 @@ hal = { workspace = true } hal = { workspace = true, features = ["renderdoc"] } [target.'cfg(windows)'.dependencies] -hal = { workspace = true, features = [ - "dxc_shader_compiler", - "renderdoc", -] } +hal = { workspace = true, features = ["renderdoc"] } [target.'cfg(target_arch = "wasm32")'.dependencies.hal] workspace = true diff --git a/wgpu/src/api/adapter.rs b/wgpu/src/api/adapter.rs index 5f43a461f1..034ea37abe 100644 --- a/wgpu/src/api/adapter.rs +++ b/wgpu/src/api/adapter.rs @@ -1,6 +1,6 @@ use std::{future::Future, sync::Arc, thread}; -use crate::context::{DeviceRequest, DynContext, ObjectId}; +use crate::context::{DeviceRequest, DynContext}; use crate::*; /// Handle to a physical graphics and/or compute device. @@ -14,7 +14,6 @@ use crate::*; #[derive(Debug)] pub struct Adapter { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] @@ -23,7 +22,7 @@ static_assertions::assert_impl_all!(Adapter: Send, Sync); impl Drop for Adapter { fn drop(&mut self) { if !thread::panicking() { - self.context.adapter_drop(&self.id, self.data.as_ref()) + self.context.adapter_drop(self.data.as_ref()) } } } @@ -40,14 +39,6 @@ pub type RequestAdapterOptions<'a, 'b> = RequestAdapterOptionsBase<&'a Surface<' static_assertions::assert_impl_all!(RequestAdapterOptions<'_, '_>: Send, Sync); impl Adapter { - /// Returns a globally-unique identifier for this `Adapter`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } - /// Requests a connection to a physical device, creating a logical device. /// /// Returns the [`Device`] together with a [`Queue`] that executes command buffers. @@ -80,7 +71,6 @@ impl Adapter { let context = Arc::clone(&self.context); let device = DynContext::adapter_request_device( &*self.context, - &self.id, self.data.as_ref(), desc, trace_path, @@ -88,20 +78,16 @@ impl Adapter { async move { device.await.map( |DeviceRequest { - device_id, device_data, - queue_id, queue_data, }| { ( Device { context: Arc::clone(&context), - id: device_id, data: device_data, }, Queue { context, - id: queue_id, data: queue_data, }, ) @@ -131,18 +117,21 @@ impl Adapter { // Part of the safety requirements is that the device was generated from the same adapter. // Therefore, unwrap is fine here since only WgpuCoreContext based adapters have the ability to create hal devices. .unwrap() - .create_device_from_hal(&self.id.into(), hal_device, desc, trace_path) + .create_device_from_hal( + crate::context::downcast_ref(self.data.as_ref()), + hal_device, + desc, + trace_path, + ) } .map(|(device, queue)| { ( Device { context: Arc::clone(&context), - id: device.id().into(), data: Box::new(device), }, Queue { context, - id: queue.id().into(), data: Box::new(queue), }, ) @@ -178,7 +167,12 @@ impl Adapter { .as_any() .downcast_ref::() { - unsafe { ctx.adapter_as_hal::(self.id.into(), hal_adapter_callback) } + unsafe { + ctx.adapter_as_hal::( + crate::context::downcast_ref(self.data.as_ref()), + hal_adapter_callback, + ) + } } else { hal_adapter_callback(None) } @@ -188,31 +182,29 @@ impl Adapter { pub fn is_surface_supported(&self, surface: &Surface<'_>) -> bool { DynContext::adapter_is_surface_supported( &*self.context, - &self.id, self.data.as_ref(), - &surface.id, surface.surface_data.as_ref(), ) } /// The features which can be used to create devices on this adapter. pub fn features(&self) -> Features { - DynContext::adapter_features(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_features(&*self.context, self.data.as_ref()) } /// The best limits which can be used to create devices on this adapter. pub fn limits(&self) -> Limits { - DynContext::adapter_limits(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_limits(&*self.context, self.data.as_ref()) } /// Get info about the adapter itself. pub fn get_info(&self) -> AdapterInfo { - DynContext::adapter_get_info(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_get_info(&*self.context, self.data.as_ref()) } /// Get info about the adapter itself. pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities { - DynContext::adapter_downlevel_capabilities(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_downlevel_capabilities(&*self.context, self.data.as_ref()) } /// Returns the features supported for a given texture format by this adapter. @@ -220,12 +212,7 @@ impl Adapter { /// Note that the WebGPU spec further restricts the available usages/features. /// To disable these restrictions on a device, request the [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] feature. pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures { - DynContext::adapter_get_texture_format_features( - &*self.context, - &self.id, - self.data.as_ref(), - format, - ) + DynContext::adapter_get_texture_format_features(&*self.context, self.data.as_ref(), format) } /// Generates a timestamp using the clock used by the presentation engine. @@ -250,6 +237,6 @@ impl Adapter { // /// [Instant]: std::time::Instant pub fn get_presentation_timestamp(&self) -> PresentationTimestamp { - DynContext::adapter_get_presentation_timestamp(&*self.context, &self.id, self.data.as_ref()) + DynContext::adapter_get_presentation_timestamp(&*self.context, self.data.as_ref()) } } diff --git a/wgpu/src/api/bind_group.rs b/wgpu/src/api/bind_group.rs index 51c1efac74..42a774b295 100644 --- a/wgpu/src/api/bind_group.rs +++ b/wgpu/src/api/bind_group.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a binding group. @@ -14,26 +13,17 @@ use crate::*; #[derive(Debug)] pub struct BindGroup { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(BindGroup: Send, Sync); -impl BindGroup { - /// Returns a globally-unique identifier for this `BindGroup`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(BindGroup); impl Drop for BindGroup { fn drop(&mut self) { if !thread::panicking() { - self.context.bind_group_drop(&self.id, self.data.as_ref()); + self.context.bind_group_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/bind_group_layout.rs b/wgpu/src/api/bind_group_layout.rs index 1268c664f1..db335689ca 100644 --- a/wgpu/src/api/bind_group_layout.rs +++ b/wgpu/src/api/bind_group_layout.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a binding group layout. @@ -17,27 +16,17 @@ use crate::*; #[derive(Debug)] pub struct BindGroupLayout { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync); -impl BindGroupLayout { - /// Returns a globally-unique identifier for this `BindGroupLayout`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(BindGroupLayout); impl Drop for BindGroupLayout { fn drop(&mut self) { if !thread::panicking() { - self.context - .bind_group_layout_drop(&self.id, self.data.as_ref()); + self.context.bind_group_layout_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/buffer.rs b/wgpu/src/api/buffer.rs index 6f54637994..9d490616d3 100644 --- a/wgpu/src/api/buffer.rs +++ b/wgpu/src/api/buffer.rs @@ -7,7 +7,7 @@ use std::{ use parking_lot::Mutex; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Handle to a GPU-accessible buffer. @@ -173,7 +173,6 @@ use crate::*; #[derive(Debug)] pub struct Buffer { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) map_context: Mutex, pub(crate) size: wgt::BufferAddress, @@ -183,15 +182,9 @@ pub struct Buffer { #[cfg(send_sync)] static_assertions::assert_impl_all!(Buffer: Send, Sync); -impl Buffer { - /// Returns a globally-unique identifier for this `Buffer`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } +super::impl_partialeq_eq_hash!(Buffer); +impl Buffer { /// Return the binding view of the entire buffer. pub fn as_entire_binding(&self) -> BindingResource<'_> { BindingResource::Buffer(self.as_entire_buffer_binding()) @@ -217,14 +210,17 @@ impl Buffer { &self, hal_buffer_callback: F, ) -> R { - let id = self.id; - if let Some(ctx) = self .context .as_any() .downcast_ref::() { - unsafe { ctx.buffer_as_hal::(id.into(), hal_buffer_callback) } + unsafe { + ctx.buffer_as_hal::( + crate::context::downcast_ref(self.data.as_ref()), + hal_buffer_callback, + ) + } } else { hal_buffer_callback(None) } @@ -256,12 +252,12 @@ impl Buffer { /// Flushes any pending write operations and unmaps the buffer from host memory. pub fn unmap(&self) { self.map_context.lock().reset(); - DynContext::buffer_unmap(&*self.context, &self.id, self.data.as_ref()); + DynContext::buffer_unmap(&*self.context, self.data.as_ref()); } /// Destroy the associated native resources as soon as possible. pub fn destroy(&self) { - DynContext::buffer_destroy(&*self.context, &self.id, self.data.as_ref()); + DynContext::buffer_destroy(&*self.context, self.data.as_ref()); } /// Returns the length of the buffer allocation in bytes. @@ -343,12 +339,7 @@ impl<'a> BufferSlice<'a> { callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static, ) { let mut mc = self.buffer.map_context.lock(); - assert_eq!( - mc.initial_range, - 0..0, - "Buffer {:?} is already mapped", - self.buffer.id - ); + assert_eq!(mc.initial_range, 0..0, "Buffer is already mapped"); let end = match self.size { Some(s) => self.offset + s.get(), None => mc.total_size, @@ -357,7 +348,6 @@ impl<'a> BufferSlice<'a> { DynContext::buffer_map_async( &*self.buffer.context, - &self.buffer.id, self.buffer.data.as_ref(), mode, self.offset..end, @@ -383,7 +373,6 @@ impl<'a> BufferSlice<'a> { let end = self.buffer.map_context.lock().add(self.offset, self.size); let data = DynContext::buffer_get_mapped_range( &*self.buffer.context, - &self.buffer.id, self.buffer.data.as_ref(), self.offset..end, ); @@ -429,7 +418,6 @@ impl<'a> BufferSlice<'a> { let end = self.buffer.map_context.lock().add(self.offset, self.size); let data = DynContext::buffer_get_mapped_range( &*self.buffer.context, - &self.buffer.id, self.buffer.data.as_ref(), self.offset..end, ); @@ -680,7 +668,7 @@ impl Drop for BufferViewMut<'_> { impl Drop for Buffer { fn drop(&mut self) { if !thread::panicking() { - self.context.buffer_drop(&self.id, self.data.as_ref()); + self.context.buffer_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/command_buffer.rs b/wgpu/src/api/command_buffer.rs index 4d56fe9b2f..6c519ed65a 100644 --- a/wgpu/src/api/command_buffer.rs +++ b/wgpu/src/api/command_buffer.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a command buffer on the GPU. @@ -13,7 +12,6 @@ use crate::*; #[derive(Debug)] pub struct CommandBuffer { pub(crate) context: Arc, - pub(crate) id: Option, pub(crate) data: Option>, } #[cfg(send_sync)] @@ -22,9 +20,8 @@ static_assertions::assert_impl_all!(CommandBuffer: Send, Sync); impl Drop for CommandBuffer { fn drop(&mut self) { if !thread::panicking() { - if let Some(id) = self.id.take() { - self.context - .command_buffer_drop(&id, self.data.take().unwrap().as_ref()); + if let Some(data) = self.data.take() { + self.context.command_buffer_drop(data.as_ref()); } } } diff --git a/wgpu/src/api/command_encoder.rs b/wgpu/src/api/command_encoder.rs index d8e8594a89..a45564b45d 100644 --- a/wgpu/src/api/command_encoder.rs +++ b/wgpu/src/api/command_encoder.rs @@ -1,6 +1,6 @@ use std::{marker::PhantomData, ops::Range, sync::Arc, thread}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Encodes a series of GPU operations. @@ -15,7 +15,6 @@ use crate::*; #[derive(Debug)] pub struct CommandEncoder { pub(crate) context: Arc, - pub(crate) id: Option, pub(crate) data: Box, } #[cfg(send_sync)] @@ -24,9 +23,7 @@ static_assertions::assert_impl_all!(CommandEncoder: Send, Sync); impl Drop for CommandEncoder { fn drop(&mut self) { if !thread::panicking() { - if let Some(id) = self.id.take() { - self.context.command_encoder_drop(&id, self.data.as_ref()); - } + self.context.command_encoder_drop(self.data.as_ref()); } } } @@ -71,14 +68,9 @@ static_assertions::assert_impl_all!(ImageCopyTexture<'_>: Send, Sync); impl CommandEncoder { /// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution. pub fn finish(mut self) -> CommandBuffer { - let (id, data) = DynContext::command_encoder_finish( - &*self.context, - self.id.take().unwrap(), - self.data.as_mut(), - ); + let data = DynContext::command_encoder_finish(&*self.context, self.data.as_mut()); CommandBuffer { context: Arc::clone(&self.context), - id: Some(id), data: Some(data), } } @@ -97,16 +89,10 @@ impl CommandEncoder { &'encoder mut self, desc: &RenderPassDescriptor<'_>, ) -> RenderPass<'encoder> { - let id = self.id.as_ref().unwrap(); - let (id, data) = DynContext::command_encoder_begin_render_pass( - &*self.context, - id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::command_encoder_begin_render_pass(&*self.context, self.data.as_ref(), desc); RenderPass { inner: RenderPassInner { - id, data, context: self.context.clone(), }, @@ -128,16 +114,13 @@ impl CommandEncoder { &'encoder mut self, desc: &ComputePassDescriptor<'_>, ) -> ComputePass<'encoder> { - let id = self.id.as_ref().unwrap(); - let (id, data) = DynContext::command_encoder_begin_compute_pass( + let data = DynContext::command_encoder_begin_compute_pass( &*self.context, - id, self.data.as_ref(), desc, ); ComputePass { inner: ComputePassInner { - id, data, context: self.context.clone(), }, @@ -162,12 +145,9 @@ impl CommandEncoder { ) { DynContext::command_encoder_copy_buffer_to_buffer( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), - &source.id, source.data.as_ref(), source_offset, - &destination.id, destination.data.as_ref(), destination_offset, copy_size, @@ -183,7 +163,6 @@ impl CommandEncoder { ) { DynContext::command_encoder_copy_buffer_to_texture( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), source, destination, @@ -200,7 +179,6 @@ impl CommandEncoder { ) { DynContext::command_encoder_copy_texture_to_buffer( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), source, destination, @@ -223,7 +201,6 @@ impl CommandEncoder { ) { DynContext::command_encoder_copy_texture_to_texture( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), source, destination, @@ -247,9 +224,8 @@ impl CommandEncoder { pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) { DynContext::command_encoder_clear_texture( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), - texture, + texture.data.as_ref(), subresource_range, ); } @@ -268,9 +244,8 @@ impl CommandEncoder { ) { DynContext::command_encoder_clear_buffer( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), - buffer, + buffer.data.as_ref(), offset, size, ); @@ -278,25 +253,17 @@ impl CommandEncoder { /// Inserts debug marker. pub fn insert_debug_marker(&mut self, label: &str) { - let id = self.id.as_ref().unwrap(); - DynContext::command_encoder_insert_debug_marker( - &*self.context, - id, - self.data.as_ref(), - label, - ); + DynContext::command_encoder_insert_debug_marker(&*self.context, self.data.as_ref(), label); } /// Start record commands and group it into debug marker group. pub fn push_debug_group(&mut self, label: &str) { - let id = self.id.as_ref().unwrap(); - DynContext::command_encoder_push_debug_group(&*self.context, id, self.data.as_ref(), label); + DynContext::command_encoder_push_debug_group(&*self.context, self.data.as_ref(), label); } /// Stops command recording and creates debug group. pub fn pop_debug_group(&mut self) { - let id = self.id.as_ref().unwrap(); - DynContext::command_encoder_pop_debug_group(&*self.context, id, self.data.as_ref()); + DynContext::command_encoder_pop_debug_group(&*self.context, self.data.as_ref()); } /// Resolves a query set, writing the results into the supplied destination buffer. @@ -312,13 +279,10 @@ impl CommandEncoder { ) { DynContext::command_encoder_resolve_query_set( &*self.context, - self.id.as_ref().unwrap(), self.data.as_ref(), - &query_set.id, query_set.data.as_ref(), query_range.start, query_range.end - query_range.start, - &destination.id, destination.data.as_ref(), destination_offset, ) @@ -341,14 +305,12 @@ impl CommandEncoder { &mut self, hal_command_encoder_callback: F, ) -> Option { - use wgc::id::CommandEncoderId; - self.context .as_any() .downcast_ref::() .map(|ctx| unsafe { ctx.command_encoder_as_hal_mut::( - CommandEncoderId::from(self.id.unwrap()), + crate::context::downcast_ref(self.data.as_ref()), hal_command_encoder_callback, ) }) @@ -372,9 +334,7 @@ impl CommandEncoder { pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::command_encoder_write_timestamp( &*self.context, - self.id.as_ref().unwrap(), self.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ) diff --git a/wgpu/src/api/compute_pass.rs b/wgpu/src/api/compute_pass.rs index 30123b8052..306a9f0b47 100644 --- a/wgpu/src/api/compute_pass.rs +++ b/wgpu/src/api/compute_pass.rs @@ -1,6 +1,6 @@ use std::{marker::PhantomData, sync::Arc, thread}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// In-progress recording of a compute pass. @@ -48,16 +48,15 @@ impl<'encoder> ComputePass<'encoder> { pub fn set_bind_group( &mut self, index: u32, - bind_group: &BindGroup, + bind_group: Option<&BindGroup>, offsets: &[DynamicOffset], ) { + let bg = bind_group.map(|x| x.data.as_ref()); DynContext::compute_pass_set_bind_group( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), index, - &bind_group.id, - bind_group.data.as_ref(), + bg, offsets, ); } @@ -66,9 +65,7 @@ impl<'encoder> ComputePass<'encoder> { pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) { DynContext::compute_pass_set_pipeline( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &pipeline.id, pipeline.data.as_ref(), ); } @@ -77,7 +74,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn insert_debug_marker(&mut self, label: &str) { DynContext::compute_pass_insert_debug_marker( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), label, ); @@ -87,7 +83,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn push_debug_group(&mut self, label: &str) { DynContext::compute_pass_push_debug_group( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), label, ); @@ -95,11 +90,7 @@ impl<'encoder> ComputePass<'encoder> { /// Stops command recording and creates debug group. pub fn pop_debug_group(&mut self) { - DynContext::compute_pass_pop_debug_group( - &*self.inner.context, - &mut self.inner.id, - self.inner.data.as_mut(), - ); + DynContext::compute_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut()); } /// Dispatches compute work operations. @@ -108,7 +99,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) { DynContext::compute_pass_dispatch_workgroups( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), x, y, @@ -126,9 +116,7 @@ impl<'encoder> ComputePass<'encoder> { ) { DynContext::compute_pass_dispatch_workgroups_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -148,7 +136,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) { DynContext::compute_pass_set_push_constants( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), offset, data, @@ -167,9 +154,7 @@ impl<'encoder> ComputePass<'encoder> { pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::compute_pass_write_timestamp( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ) @@ -183,9 +168,7 @@ impl<'encoder> ComputePass<'encoder> { pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::compute_pass_begin_pipeline_statistics_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ); @@ -196,7 +179,6 @@ impl<'encoder> ComputePass<'encoder> { pub fn end_pipeline_statistics_query(&mut self) { DynContext::compute_pass_end_pipeline_statistics_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), ); } @@ -204,7 +186,6 @@ impl<'encoder> ComputePass<'encoder> { #[derive(Debug)] pub(crate) struct ComputePassInner { - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) context: Arc, } @@ -212,8 +193,7 @@ pub(crate) struct ComputePassInner { impl Drop for ComputePassInner { fn drop(&mut self) { if !thread::panicking() { - self.context - .compute_pass_end(&mut self.id, self.data.as_mut()); + self.context.compute_pass_end(self.data.as_mut()); } } } diff --git a/wgpu/src/api/compute_pipeline.rs b/wgpu/src/api/compute_pipeline.rs index ea2de4b8b2..16885ac96b 100644 --- a/wgpu/src/api/compute_pipeline.rs +++ b/wgpu/src/api/compute_pipeline.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a compute pipeline. @@ -12,38 +11,34 @@ use crate::*; #[derive(Debug)] pub struct ComputePipeline { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(ComputePipeline: Send, Sync); -impl ComputePipeline { - /// Returns a globally-unique identifier for this `ComputePipeline`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } +super::impl_partialeq_eq_hash!(ComputePipeline); +impl ComputePipeline { /// Get an object representing the bind group layout at a given index. + /// + /// If this pipeline was created with a [default layout][ComputePipelineDescriptor::layout], + /// then bind groups created with the returned `BindGroupLayout` can only be used with this + /// pipeline. + /// + /// This method will raise a validation error if there is no bind group layout at `index`. pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout { let context = Arc::clone(&self.context); - let (id, data) = self.context.compute_pipeline_get_bind_group_layout( - &self.id, - self.data.as_ref(), - index, - ); - BindGroupLayout { context, id, data } + let data = self + .context + .compute_pipeline_get_bind_group_layout(self.data.as_ref(), index); + BindGroupLayout { context, data } } } impl Drop for ComputePipeline { fn drop(&mut self) { if !thread::panicking() { - self.context - .compute_pipeline_drop(&self.id, self.data.as_ref()); + self.context.compute_pipeline_drop(self.data.as_ref()); } } } @@ -59,6 +54,25 @@ pub struct ComputePipelineDescriptor<'a> { /// Debug label of the pipeline. This will show up in graphics debuggers for easy identification. pub label: Label<'a>, /// The layout of bind groups for this pipeline. + /// + /// If this is set, then [`Device::create_compute_pipeline`] will raise a validation error if + /// the layout doesn't match what the shader module(s) expect. + /// + /// Using the same [`PipelineLayout`] for many [`RenderPipeline`] or [`ComputePipeline`] + /// pipelines guarantees that you don't have to rebind any resources when switching between + /// those pipelines. + /// + /// ## Default pipeline layout + /// + /// If `layout` is `None`, then the pipeline has a [default layout] created and used instead. + /// The default layout is deduced from the shader modules. + /// + /// You can use [`ComputePipeline::get_bind_group_layout`] to create bind groups for use with + /// the default layout. However, these bind groups cannot be used with any other pipelines. This + /// is convenient for simple pipelines, but using an explicit layout is recommended in most + /// cases. + /// + /// [default layout]: https://www.w3.org/TR/webgpu/#default-pipeline-layout pub layout: Option<&'a PipelineLayout>, /// The compiled shader module for this stage. pub module: &'a ShaderModule, diff --git a/wgpu/src/api/device.rs b/wgpu/src/api/device.rs index a6d447a8fb..8c565ef524 100644 --- a/wgpu/src/api/device.rs +++ b/wgpu/src/api/device.rs @@ -2,7 +2,7 @@ use std::{error, fmt, future::Future, sync::Arc, thread}; use parking_lot::Mutex; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Open connection to a graphics and/or compute device. @@ -16,7 +16,6 @@ use crate::*; #[derive(Debug)] pub struct Device { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] @@ -32,14 +31,6 @@ pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor>; static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync); impl Device { - /// Returns a globally-unique identifier for this `Device`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } - /// Check for resource cleanups and mapping callbacks. Will block if [`Maintain::Wait`] is passed. /// /// Return `true` if the queue is empty, or `false` if there are more queue @@ -50,7 +41,7 @@ impl Device { /// /// When running on WebGPU, this is a no-op. `Device`s are automatically polled. pub fn poll(&self, maintain: Maintain) -> MaintainResult { - DynContext::device_poll(&*self.context, &self.id, self.data.as_ref(), maintain) + DynContext::device_poll(&*self.context, self.data.as_ref(), maintain) } /// The features which can be used on this device. @@ -58,7 +49,7 @@ impl Device { /// No additional features can be used, even if the underlying adapter can support them. #[must_use] pub fn features(&self) -> Features { - DynContext::device_features(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_features(&*self.context, self.data.as_ref()) } /// The limits which can be used on this device. @@ -66,7 +57,7 @@ impl Device { /// No better limits can be used, even if the underlying adapter can support them. #[must_use] pub fn limits(&self) -> Limits { - DynContext::device_limits(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_limits(&*self.context, self.data.as_ref()) } /// Creates a shader module from either SPIR-V or WGSL source code. @@ -85,16 +76,14 @@ impl Device { /// #[must_use] pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule { - let (id, data) = DynContext::device_create_shader_module( + let data = DynContext::device_create_shader_module( &*self.context, - &self.id, self.data.as_ref(), desc, wgt::ShaderBoundChecks::new(), ); ShaderModule { context: Arc::clone(&self.context), - id, data, } } @@ -114,16 +103,14 @@ impl Device { &self, desc: ShaderModuleDescriptor<'_>, ) -> ShaderModule { - let (id, data) = DynContext::device_create_shader_module( + let data = DynContext::device_create_shader_module( &*self.context, - &self.id, self.data.as_ref(), desc, unsafe { wgt::ShaderBoundChecks::unchecked() }, ); ShaderModule { context: Arc::clone(&self.context), - id, data, } } @@ -141,17 +128,11 @@ impl Device { &self, desc: &ShaderModuleDescriptorSpirV<'_>, ) -> ShaderModule { - let (id, data) = unsafe { - DynContext::device_create_shader_module_spirv( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ) + let data = unsafe { + DynContext::device_create_shader_module_spirv(&*self.context, self.data.as_ref(), desc) }; ShaderModule { context: Arc::clone(&self.context), - id, data, } } @@ -159,15 +140,10 @@ impl Device { /// Creates an empty [`CommandEncoder`]. #[must_use] pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder { - let (id, data) = DynContext::device_create_command_encoder( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_command_encoder(&*self.context, self.data.as_ref(), desc); CommandEncoder { context: Arc::clone(&self.context), - id: Some(id), data, } } @@ -178,15 +154,13 @@ impl Device { &self, desc: &RenderBundleEncoderDescriptor<'_>, ) -> RenderBundleEncoder<'_> { - let (id, data) = DynContext::device_create_render_bundle_encoder( + let data = DynContext::device_create_render_bundle_encoder( &*self.context, - &self.id, self.data.as_ref(), desc, ); RenderBundleEncoder { context: Arc::clone(&self.context), - id, data, parent: self, _p: Default::default(), @@ -196,15 +170,9 @@ impl Device { /// Creates a new [`BindGroup`]. #[must_use] pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup { - let (id, data) = DynContext::device_create_bind_group( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = DynContext::device_create_bind_group(&*self.context, self.data.as_ref(), desc); BindGroup { context: Arc::clone(&self.context), - id, data, } } @@ -215,15 +183,10 @@ impl Device { &self, desc: &BindGroupLayoutDescriptor<'_>, ) -> BindGroupLayout { - let (id, data) = DynContext::device_create_bind_group_layout( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_bind_group_layout(&*self.context, self.data.as_ref(), desc); BindGroupLayout { context: Arc::clone(&self.context), - id, data, } } @@ -231,15 +194,10 @@ impl Device { /// Creates a [`PipelineLayout`]. #[must_use] pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout { - let (id, data) = DynContext::device_create_pipeline_layout( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_pipeline_layout(&*self.context, self.data.as_ref(), desc); PipelineLayout { context: Arc::clone(&self.context), - id, data, } } @@ -247,15 +205,10 @@ impl Device { /// Creates a [`RenderPipeline`]. #[must_use] pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline { - let (id, data) = DynContext::device_create_render_pipeline( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_render_pipeline(&*self.context, self.data.as_ref(), desc); RenderPipeline { context: Arc::clone(&self.context), - id, data, } } @@ -263,15 +216,10 @@ impl Device { /// Creates a [`ComputePipeline`]. #[must_use] pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline { - let (id, data) = DynContext::device_create_compute_pipeline( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ); + let data = + DynContext::device_create_compute_pipeline(&*self.context, self.data.as_ref(), desc); ComputePipeline { context: Arc::clone(&self.context), - id, data, } } @@ -284,12 +232,10 @@ impl Device { map_context.initial_range = 0..desc.size; } - let (id, data) = - DynContext::device_create_buffer(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::device_create_buffer(&*self.context, self.data.as_ref(), desc); Buffer { context: Arc::clone(&self.context), - id, data, map_context: Mutex::new(map_context), size: desc.size, @@ -302,11 +248,9 @@ impl Device { /// `desc` specifies the general format of the texture. #[must_use] pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture { - let (id, data) = - DynContext::device_create_texture(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::device_create_texture(&*self.context, self.data.as_ref(), desc); Texture { context: Arc::clone(&self.context), - id, data, owned: true, descriptor: TextureDescriptor { @@ -340,13 +284,12 @@ impl Device { .unwrap() .create_texture_from_hal::( hal_texture, - self.data.as_ref().downcast_ref().unwrap(), + crate::context::downcast_ref(self.data.as_ref()), desc, ) }; Texture { context: Arc::clone(&self.context), - id: ObjectId::from(texture.id()), data: Box::new(texture), owned: true, descriptor: TextureDescriptor { @@ -376,7 +319,7 @@ impl Device { map_context.initial_range = 0..desc.size; } - let (id, buffer) = unsafe { + let buffer = unsafe { self.context .as_any() .downcast_ref::() @@ -385,14 +328,13 @@ impl Device { .unwrap() .create_buffer_from_hal::( hal_buffer, - self.data.as_ref().downcast_ref().unwrap(), + crate::context::downcast_ref(self.data.as_ref()), desc, ) }; Buffer { context: Arc::clone(&self.context), - id: ObjectId::from(id), data: Box::new(buffer), map_context: Mutex::new(map_context), size: desc.size, @@ -405,11 +347,9 @@ impl Device { /// `desc` specifies the behavior of the sampler. #[must_use] pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler { - let (id, data) = - DynContext::device_create_sampler(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::device_create_sampler(&*self.context, self.data.as_ref(), desc); Sampler { context: Arc::clone(&self.context), - id, data, } } @@ -417,11 +357,9 @@ impl Device { /// Creates a new [`QuerySet`]. #[must_use] pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet { - let (id, data) = - DynContext::device_create_query_set(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::device_create_query_set(&*self.context, self.data.as_ref(), desc); QuerySet { context: Arc::clone(&self.context), - id, data, } } @@ -429,29 +367,28 @@ impl Device { /// Set a callback for errors that are not handled in error scopes. pub fn on_uncaptured_error(&self, handler: Box) { self.context - .device_on_uncaptured_error(&self.id, self.data.as_ref(), handler); + .device_on_uncaptured_error(self.data.as_ref(), handler); } /// Push an error scope. pub fn push_error_scope(&self, filter: ErrorFilter) { self.context - .device_push_error_scope(&self.id, self.data.as_ref(), filter); + .device_push_error_scope(self.data.as_ref(), filter); } /// Pop an error scope. pub fn pop_error_scope(&self) -> impl Future> + WasmNotSend { - self.context - .device_pop_error_scope(&self.id, self.data.as_ref()) + self.context.device_pop_error_scope(self.data.as_ref()) } /// Starts frame capture. pub fn start_capture(&self) { - DynContext::device_start_capture(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_start_capture(&*self.context, self.data.as_ref()) } /// Stops frame capture. pub fn stop_capture(&self) { - DynContext::device_stop_capture(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_stop_capture(&*self.context, self.data.as_ref()) } /// Query internal counters from the native backend for debugging purposes. @@ -462,7 +399,7 @@ impl Device { /// If a counter is not set, its contains its default value (zero). #[must_use] pub fn get_internal_counters(&self) -> wgt::InternalCounters { - DynContext::device_get_internal_counters(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_get_internal_counters(&*self.context, self.data.as_ref()) } /// Generate an GPU memory allocation report if the underlying backend supports it. @@ -472,7 +409,7 @@ impl Device { /// for example as a workaround for driver issues. #[must_use] pub fn generate_allocator_report(&self) -> Option { - DynContext::generate_allocator_report(&*self.context, &self.id, self.data.as_ref()) + DynContext::generate_allocator_report(&*self.context, self.data.as_ref()) } /// Apply a callback to this `Device`'s underlying backend device. @@ -504,7 +441,7 @@ impl Device { .downcast_ref::() .map(|ctx| unsafe { ctx.device_as_hal::( - self.data.as_ref().downcast_ref().unwrap(), + crate::context::downcast_ref(self.data.as_ref()), hal_device_callback, ) }) @@ -512,7 +449,7 @@ impl Device { /// Destroy this device. pub fn destroy(&self) { - DynContext::device_destroy(&*self.context, &self.id, self.data.as_ref()) + DynContext::device_destroy(&*self.context, self.data.as_ref()) } /// Set a DeviceLostCallback on this device. @@ -522,18 +459,11 @@ impl Device { ) { DynContext::device_set_device_lost_callback( &*self.context, - &self.id, self.data.as_ref(), Box::new(callback), ) } - /// Test-only function to make this device invalid. - #[doc(hidden)] - pub fn make_invalid(&self) { - DynContext::device_make_invalid(&*self.context, &self.id, self.data.as_ref()) - } - /// Create a [`PipelineCache`] with initial data /// /// This can be passed to [`Device::create_compute_pipeline`] @@ -576,17 +506,11 @@ impl Device { &self, desc: &PipelineCacheDescriptor<'_>, ) -> PipelineCache { - let (id, data) = unsafe { - DynContext::device_create_pipeline_cache( - &*self.context, - &self.id, - self.data.as_ref(), - desc, - ) + let data = unsafe { + DynContext::device_create_pipeline_cache(&*self.context, self.data.as_ref(), desc) }; PipelineCache { context: Arc::clone(&self.context), - id, data, } } @@ -595,7 +519,7 @@ impl Device { impl Drop for Device { fn drop(&mut self) { if !thread::panicking() { - self.context.device_drop(&self.id, self.data.as_ref()); + self.context.device_drop(self.data.as_ref()); } } } @@ -681,30 +605,31 @@ pub enum ErrorFilter { } static_assertions::assert_impl_all!(ErrorFilter: Send, Sync); +/// Lower level source of the error. +/// +/// `Send + Sync` varies depending on configuration. +#[cfg(send_sync)] +#[cfg_attr(docsrs, doc(cfg(all())))] +pub type ErrorSource = Box; +/// Lower level source of the error. +/// +/// `Send + Sync` varies depending on configuration. +#[cfg(not(send_sync))] +#[cfg_attr(docsrs, doc(cfg(all())))] +pub type ErrorSource = Box; + /// Error type #[derive(Debug)] pub enum Error { /// Out of memory error OutOfMemory { /// Lower level source of the error. - #[cfg(send_sync)] - #[cfg_attr(docsrs, doc(cfg(all())))] - source: Box, - /// Lower level source of the error. - #[cfg(not(send_sync))] - #[cfg_attr(docsrs, doc(cfg(all())))] - source: Box, + source: ErrorSource, }, /// Validation error, signifying a bug in code or data Validation { /// Lower level source of the error. - #[cfg(send_sync)] - #[cfg_attr(docsrs, doc(cfg(all())))] - source: Box, - /// Lower level source of the error. - #[cfg(not(send_sync))] - #[cfg_attr(docsrs, doc(cfg(all())))] - source: Box, + source: ErrorSource, /// Description of the validation error. description: String, }, @@ -713,13 +638,7 @@ pub enum Error { /// These could be due to internal implementation or system limits being reached. Internal { /// Lower level source of the error. - #[cfg(send_sync)] - #[cfg_attr(docsrs, doc(cfg(all())))] - source: Box, - /// Lower level source of the error. - #[cfg(not(send_sync))] - #[cfg_attr(docsrs, doc(cfg(all())))] - source: Box, + source: ErrorSource, /// Description of the internal GPU error. description: String, }, diff --git a/wgpu/src/api/id.rs b/wgpu/src/api/id.rs deleted file mode 100644 index d9041883b2..0000000000 --- a/wgpu/src/api/id.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::{cmp::Ordering, fmt, marker::PhantomData, num::NonZeroU64}; - -use crate::context::ObjectId; - -/// Opaque globally-unique identifier -#[repr(transparent)] -pub struct Id(NonZeroU64, PhantomData<*mut T>); - -impl Id { - /// Create a new `Id` from a ObjectID. - pub(crate) fn new(id: ObjectId) -> Self { - Id(id.global_id(), PhantomData) - } - - /// For testing use only. We provide no guarantees about the actual value of the ids. - #[doc(hidden)] - pub fn inner(&self) -> u64 { - self.0.get() - } -} - -// SAFETY: `Id` is a bare `NonZeroU64`, the type parameter is a marker purely to avoid confusing Ids -// returned for different types , so `Id` can safely implement Send and Sync. -unsafe impl Send for Id {} - -// SAFETY: See the implementation for `Send`. -unsafe impl Sync for Id {} - -impl Clone for Id { - fn clone(&self) -> Self { - *self - } -} - -impl Copy for Id {} - -impl fmt::Debug for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Id").field(&self.0).finish() - } -} - -impl PartialEq for Id { - fn eq(&self, other: &Id) -> bool { - self.0 == other.0 - } -} - -impl Eq for Id {} - -impl PartialOrd for Id { - fn partial_cmp(&self, other: &Id) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Id { - fn cmp(&self, other: &Id) -> Ordering { - self.0.cmp(&other.0) - } -} - -impl std::hash::Hash for Id { - fn hash(&self, state: &mut H) { - self.0.hash(state) - } -} diff --git a/wgpu/src/api/instance.rs b/wgpu/src/api/instance.rs index 26d8b863b1..af6775b86b 100644 --- a/wgpu/src/api/instance.rs +++ b/wgpu/src/api/instance.rs @@ -95,7 +95,7 @@ impl Instance { /// [`Backends::BROWSER_WEBGPU`] takes a special role: /// If it is set and WebGPU support is detected, this instance will *only* be able to create /// WebGPU adapters. If you instead want to force use of WebGL, either - /// disable the `webgpu` compile-time feature or do add the [`Backends::BROWSER_WEBGPU`] + /// disable the `webgpu` compile-time feature or don't add the [`Backends::BROWSER_WEBGPU`] /// flag to the the `instance_desc`'s `backends` field. /// If it is set and WebGPU support is *not* detected, the instance will use wgpu-core /// to create adapters. Meaning that if the `webgl` feature is enabled, it is able to create @@ -118,8 +118,9 @@ impl Instance { { let is_only_available_backend = !cfg!(wgpu_core); let requested_webgpu = _instance_desc.backends.contains(Backends::BROWSER_WEBGPU); - let support_webgpu = - crate::backend::get_browser_gpu_property().map_or(false, |gpu| !gpu.is_undefined()); + let support_webgpu = crate::backend::get_browser_gpu_property() + .map(|maybe_gpu| maybe_gpu.is_some()) + .unwrap_or(false); if is_only_available_backend || (requested_webgpu && support_webgpu) { return Self { @@ -202,8 +203,6 @@ impl Instance { /// - `backends` - Backends from which to enumerate adapters. #[cfg(native)] pub fn enumerate_adapters(&self, backends: Backends) -> Vec { - use crate::context::ObjectId; - let context = Arc::clone(&self.context); self.context .as_any() @@ -211,10 +210,9 @@ impl Instance { .map(|ctx| { ctx.enumerate_adapters(backends) .into_iter() - .map(move |id| crate::Adapter { + .map(move |adapter| crate::Adapter { context: Arc::clone(&context), - id: ObjectId::from(id), - data: Box::new(()), + data: Box::new(adapter), }) .collect() }) @@ -234,11 +232,7 @@ impl Instance { ) -> impl Future> + WasmNotSend { let context = Arc::clone(&self.context); let adapter = self.context.instance_request_adapter(options); - async move { - adapter - .await - .map(|(id, data)| Adapter { context, id, data }) - } + async move { adapter.await.map(|data| Adapter { context, data }) } } /// Converts a wgpu-hal `ExposedAdapter` to a wgpu [`Adapter`]. @@ -252,18 +246,16 @@ impl Instance { hal_adapter: hal::ExposedAdapter, ) -> Adapter { let context = Arc::clone(&self.context); - let id = unsafe { + let adapter = unsafe { context .as_any() .downcast_ref::() .unwrap() .create_adapter_from_hal(hal_adapter) - .into() }; Adapter { context, - id, - data: Box::new(()), + data: Box::new(adapter), } } @@ -355,12 +347,11 @@ impl Instance { &self, target: SurfaceTargetUnsafe, ) -> Result, CreateSurfaceError> { - let (id, data) = unsafe { self.context.instance_create_surface(target) }?; + let data = unsafe { self.context.instance_create_surface(target) }?; Ok(Surface { context: Arc::clone(&self.context), _handle_source: None, - id, surface_data: data, config: Mutex::new(None), }) diff --git a/wgpu/src/api/mod.rs b/wgpu/src/api/mod.rs index 819f6847cf..52b9ec1602 100644 --- a/wgpu/src/api/mod.rs +++ b/wgpu/src/api/mod.rs @@ -32,7 +32,6 @@ mod common_pipeline; mod compute_pass; mod compute_pipeline; mod device; -mod id; mod instance; mod pipeline_cache; mod pipeline_layout; @@ -59,7 +58,6 @@ pub use common_pipeline::*; pub use compute_pass::*; pub use compute_pipeline::*; pub use device::*; -pub use id::*; pub use instance::*; pub use pipeline_cache::*; pub use pipeline_layout::*; @@ -78,3 +76,35 @@ pub use texture_view::*; /// Object debugging label. pub type Label<'a> = Option<&'a str>; + +macro_rules! impl_partialeq_eq_hash { + ($ty:ty) => { + impl PartialEq for $ty { + fn eq(&self, other: &Self) -> bool { + std::ptr::addr_eq(self.data.as_ref(), other.data.as_ref()) + } + } + impl Eq for $ty {} + + impl std::hash::Hash for $ty { + fn hash(&self, state: &mut H) { + let ptr = self.data.as_ref() as *const Data as *const (); + ptr.hash(state); + } + } + + impl PartialOrd for $ty { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + impl Ord for $ty { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + let a = self.data.as_ref() as *const Data as *const (); + let b = other.data.as_ref() as *const Data as *const (); + a.cmp(&b) + } + } + }; +} +pub(crate) use impl_partialeq_eq_hash; diff --git a/wgpu/src/api/pipeline_cache.rs b/wgpu/src/api/pipeline_cache.rs index 42ab15b8ba..800e786cae 100644 --- a/wgpu/src/api/pipeline_cache.rs +++ b/wgpu/src/api/pipeline_cache.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a pipeline cache, which is used to accelerate @@ -68,7 +67,6 @@ use crate::*; #[derive(Debug)] pub struct PipelineCache { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } @@ -83,16 +81,14 @@ impl PipelineCache { /// /// This function is unique to the Rust API of `wgpu`. pub fn get_data(&self) -> Option> { - self.context - .pipeline_cache_get_data(&self.id, self.data.as_ref()) + self.context.pipeline_cache_get_data(self.data.as_ref()) } } impl Drop for PipelineCache { fn drop(&mut self) { if !thread::panicking() { - self.context - .pipeline_cache_drop(&self.id, self.data.as_ref()); + self.context.pipeline_cache_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/pipeline_layout.rs b/wgpu/src/api/pipeline_layout.rs index f47ea1a174..20538dd9e7 100644 --- a/wgpu/src/api/pipeline_layout.rs +++ b/wgpu/src/api/pipeline_layout.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a pipeline layout. @@ -12,27 +11,17 @@ use crate::*; #[derive(Debug)] pub struct PipelineLayout { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(PipelineLayout: Send, Sync); -impl PipelineLayout { - /// Returns a globally-unique identifier for this `PipelineLayout`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(PipelineLayout); impl Drop for PipelineLayout { fn drop(&mut self) { if !thread::panicking() { - self.context - .pipeline_layout_drop(&self.id, self.data.as_ref()); + self.context.pipeline_layout_drop(self.data.as_ref()); } } } @@ -51,8 +40,8 @@ pub struct PipelineLayoutDescriptor<'a> { /// "set = 0", second entry will provide all the bindings for "set = 1" etc. pub bind_group_layouts: &'a [&'a BindGroupLayout], /// Set of push constant ranges this pipeline uses. Each shader stage that uses push constants - /// must define the range in push constant memory that corresponds to its single `layout(push_constant)` - /// uniform block. + /// must define the range in push constant memory that corresponds to its single `var` + /// buffer. /// /// If this array is non-empty, the [`Features::PUSH_CONSTANTS`] must be enabled. pub push_constant_ranges: &'a [PushConstantRange], diff --git a/wgpu/src/api/query_set.rs b/wgpu/src/api/query_set.rs index 41c262bd98..a0cac6847b 100644 --- a/wgpu/src/api/query_set.rs +++ b/wgpu/src/api/query_set.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a query set. @@ -11,27 +10,18 @@ use crate::*; #[derive(Debug)] pub struct QuerySet { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] #[cfg(send_sync)] static_assertions::assert_impl_all!(QuerySet: Send, Sync); -impl QuerySet { - /// Returns a globally-unique identifier for this `QuerySet`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(QuerySet); impl Drop for QuerySet { fn drop(&mut self) { if !thread::panicking() { - self.context.query_set_drop(&self.id, self.data.as_ref()); + self.context.query_set_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/queue.rs b/wgpu/src/api/queue.rs index c675f9f926..b57b33ece3 100644 --- a/wgpu/src/api/queue.rs +++ b/wgpu/src/api/queue.rs @@ -4,7 +4,7 @@ use std::{ thread, }; -use crate::context::{DynContext, ObjectId, QueueWriteBuffer}; +use crate::context::{DynContext, QueueWriteBuffer}; use crate::*; /// Handle to a command queue on a device. @@ -17,7 +17,6 @@ use crate::*; #[derive(Debug)] pub struct Queue { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] @@ -26,7 +25,7 @@ static_assertions::assert_impl_all!(Queue: Send, Sync); impl Drop for Queue { fn drop(&mut self) { if !thread::panicking() { - self.context.queue_drop(&self.id, self.data.as_ref()); + self.context.queue_drop(self.data.as_ref()); } } } @@ -38,7 +37,10 @@ impl Drop for Queue { /// This type is unique to the Rust API of `wgpu`. /// There is no analogue in the WebGPU specification. #[derive(Debug, Clone)] -pub struct SubmissionIndex(pub(crate) Arc); +pub struct SubmissionIndex { + #[cfg_attr(not(native), allow(dead_code))] + pub(crate) data: Arc, +} #[cfg(send_sync)] static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync); @@ -87,9 +89,7 @@ impl<'a> Drop for QueueWriteBufferView<'a> { fn drop(&mut self) { DynContext::queue_write_staging_buffer( &*self.queue.context, - &self.queue.id, self.queue.data.as_ref(), - &self.buffer.id, self.buffer.data.as_ref(), self.offset, &*self.inner, @@ -121,9 +121,7 @@ impl Queue { pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) { DynContext::queue_write_buffer( &*self.context, - &self.id, self.data.as_ref(), - &buffer.id, buffer.data.as_ref(), offset, data, @@ -168,19 +166,13 @@ impl Queue { profiling::scope!("Queue::write_buffer_with"); DynContext::queue_validate_write_buffer( &*self.context, - &self.id, self.data.as_ref(), - &buffer.id, buffer.data.as_ref(), offset, size, )?; - let staging_buffer = DynContext::queue_create_staging_buffer( - &*self.context, - &self.id, - self.data.as_ref(), - size, - )?; + let staging_buffer = + DynContext::queue_create_staging_buffer(&*self.context, self.data.as_ref(), size)?; Some(QueueWriteBufferView { queue: self, buffer, @@ -222,7 +214,6 @@ impl Queue { ) { DynContext::queue_write_texture( &*self.context, - &self.id, self.data.as_ref(), texture, data, @@ -241,7 +232,6 @@ impl Queue { ) { DynContext::queue_copy_external_image_to_texture( &*self.context, - &self.id, self.data.as_ref(), source, dest, @@ -256,16 +246,12 @@ impl Queue { ) -> SubmissionIndex { let mut command_buffers = command_buffers .into_iter() - .map(|mut comb| (comb.id.take().unwrap(), comb.data.take().unwrap())); + .map(|mut comb| comb.data.take().unwrap()); - let data = DynContext::queue_submit( - &*self.context, - &self.id, - self.data.as_ref(), - &mut command_buffers, - ); + let data = + DynContext::queue_submit(&*self.context, self.data.as_ref(), &mut command_buffers); - SubmissionIndex(data) + SubmissionIndex { data } } /// Gets the amount of nanoseconds each tick of a timestamp query represents. @@ -275,7 +261,7 @@ impl Queue { /// Timestamp values are represented in nanosecond values on WebGPU, see `` /// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required. pub fn get_timestamp_period(&self) -> f32 { - DynContext::queue_get_timestamp_period(&*self.context, &self.id, self.data.as_ref()) + DynContext::queue_get_timestamp_period(&*self.context, self.data.as_ref()) } /// Registers a callback when the previous call to submit finishes running on the gpu. This callback @@ -292,7 +278,6 @@ impl Queue { pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) { DynContext::queue_on_submitted_work_done( &*self.context, - &self.id, self.data.as_ref(), Box::new(callback), ) diff --git a/wgpu/src/api/render_bundle.rs b/wgpu/src/api/render_bundle.rs index e80da93e2d..5932458aeb 100644 --- a/wgpu/src/api/render_bundle.rs +++ b/wgpu/src/api/render_bundle.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Pre-prepared reusable bundle of GPU operations. @@ -15,27 +14,17 @@ use crate::*; #[derive(Debug)] pub struct RenderBundle { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(RenderBundle: Send, Sync); -impl RenderBundle { - /// Returns a globally-unique identifier for this `RenderBundle`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(RenderBundle); impl Drop for RenderBundle { fn drop(&mut self) { if !thread::panicking() { - self.context - .render_bundle_drop(&self.id, self.data.as_ref()); + self.context.render_bundle_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/render_bundle_encoder.rs b/wgpu/src/api/render_bundle_encoder.rs index ae5829bee1..d69548cdfd 100644 --- a/wgpu/src/api/render_bundle_encoder.rs +++ b/wgpu/src/api/render_bundle_encoder.rs @@ -1,6 +1,6 @@ use std::{marker::PhantomData, num::NonZeroU32, ops::Range, sync::Arc}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Encodes a series of GPU operations into a reusable "render bundle". @@ -17,7 +17,6 @@ use crate::*; #[derive(Debug)] pub struct RenderBundleEncoder<'a> { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) parent: &'a Device, /// This type should be !Send !Sync, because it represents an allocation on this thread's @@ -53,11 +52,9 @@ static_assertions::assert_impl_all!(RenderBundleEncoderDescriptor<'_>: Send, Syn impl<'a> RenderBundleEncoder<'a> { /// Finishes recording and returns a [`RenderBundle`] that can be executed in other render passes. pub fn finish(self, desc: &RenderBundleDescriptor<'_>) -> RenderBundle { - let (id, data) = - DynContext::render_bundle_encoder_finish(&*self.context, self.id, self.data, desc); + let data = DynContext::render_bundle_encoder_finish(&*self.context, self.data, desc); RenderBundle { context: Arc::clone(&self.context), - id, data, } } @@ -69,16 +66,15 @@ impl<'a> RenderBundleEncoder<'a> { pub fn set_bind_group( &mut self, index: u32, - bind_group: &'a BindGroup, + bind_group: Option<&'a BindGroup>, offsets: &[DynamicOffset], ) { + let bg = bind_group.map(|x| x.data.as_ref()); DynContext::render_bundle_encoder_set_bind_group( &*self.parent.context, - &mut self.id, self.data.as_mut(), index, - &bind_group.id, - bind_group.data.as_ref(), + bg, offsets, ) } @@ -89,9 +85,7 @@ impl<'a> RenderBundleEncoder<'a> { pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) { DynContext::render_bundle_encoder_set_pipeline( &*self.parent.context, - &mut self.id, self.data.as_mut(), - &pipeline.id, pipeline.data.as_ref(), ) } @@ -103,9 +97,7 @@ impl<'a> RenderBundleEncoder<'a> { pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) { DynContext::render_bundle_encoder_set_index_buffer( &*self.parent.context, - &mut self.id, self.data.as_mut(), - &buffer_slice.buffer.id, buffer_slice.buffer.data.as_ref(), index_format, buffer_slice.offset, @@ -126,10 +118,8 @@ impl<'a> RenderBundleEncoder<'a> { pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) { DynContext::render_bundle_encoder_set_vertex_buffer( &*self.parent.context, - &mut self.id, self.data.as_mut(), slot, - &buffer_slice.buffer.id, buffer_slice.buffer.data.as_ref(), buffer_slice.offset, buffer_slice.size, @@ -157,7 +147,6 @@ impl<'a> RenderBundleEncoder<'a> { pub fn draw(&mut self, vertices: Range, instances: Range) { DynContext::render_bundle_encoder_draw( &*self.parent.context, - &mut self.id, self.data.as_mut(), vertices, instances, @@ -188,7 +177,6 @@ impl<'a> RenderBundleEncoder<'a> { pub fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { DynContext::render_bundle_encoder_draw_indexed( &*self.parent.context, - &mut self.id, self.data.as_mut(), indices, base_vertex, @@ -204,9 +192,7 @@ impl<'a> RenderBundleEncoder<'a> { pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) { DynContext::render_bundle_encoder_draw_indirect( &*self.parent.context, - &mut self.id, self.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -226,9 +212,7 @@ impl<'a> RenderBundleEncoder<'a> { ) { DynContext::render_bundle_encoder_draw_indexed_indirect( &*self.parent.context, - &mut self.id, self.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -268,7 +252,6 @@ impl<'a> RenderBundleEncoder<'a> { pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { DynContext::render_bundle_encoder_set_push_constants( &*self.parent.context, - &mut self.id, self.data.as_mut(), stages, offset, diff --git a/wgpu/src/api/render_pass.rs b/wgpu/src/api/render_pass.rs index bdb8ebe372..7cdbc31355 100644 --- a/wgpu/src/api/render_pass.rs +++ b/wgpu/src/api/render_pass.rs @@ -1,11 +1,10 @@ use std::{marker::PhantomData, ops::Range, sync::Arc, thread}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; #[derive(Debug)] pub(crate) struct RenderPassInner { - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) context: Arc, } @@ -13,8 +12,7 @@ pub(crate) struct RenderPassInner { impl Drop for RenderPassInner { fn drop(&mut self) { if !thread::panicking() { - self.context - .render_pass_end(&mut self.id, self.data.as_mut()); + self.context.render_pass_end(self.data.as_mut()); } } } @@ -79,16 +77,15 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_bind_group( &mut self, index: u32, - bind_group: &BindGroup, + bind_group: Option<&BindGroup>, offsets: &[DynamicOffset], ) { + let bg = bind_group.map(|x| x.data.as_ref()); DynContext::render_pass_set_bind_group( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), index, - &bind_group.id, - bind_group.data.as_ref(), + bg, offsets, ) } @@ -99,9 +96,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_pipeline(&mut self, pipeline: &RenderPipeline) { DynContext::render_pass_set_pipeline( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &pipeline.id, pipeline.data.as_ref(), ) } @@ -114,7 +109,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_blend_constant(&mut self, color: Color) { DynContext::render_pass_set_blend_constant( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), color, ) @@ -127,9 +121,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'_>, index_format: IndexFormat) { DynContext::render_pass_set_index_buffer( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &buffer_slice.buffer.id, buffer_slice.buffer.data.as_ref(), index_format, buffer_slice.offset, @@ -150,10 +142,8 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'_>) { DynContext::render_pass_set_vertex_buffer( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), slot, - &buffer_slice.buffer.id, buffer_slice.buffer.data.as_ref(), buffer_slice.offset, buffer_slice.size, @@ -172,7 +162,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { DynContext::render_pass_set_scissor_rect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), x, y, @@ -190,7 +179,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) { DynContext::render_pass_set_viewport( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), x, y, @@ -208,7 +196,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_stencil_reference(&mut self, reference: u32) { DynContext::render_pass_set_stencil_reference( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), reference, ); @@ -218,7 +205,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn insert_debug_marker(&mut self, label: &str) { DynContext::render_pass_insert_debug_marker( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), label, ); @@ -228,7 +214,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn push_debug_group(&mut self, label: &str) { DynContext::render_pass_push_debug_group( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), label, ); @@ -236,11 +221,7 @@ impl<'encoder> RenderPass<'encoder> { /// Stops command recording and creates debug group. pub fn pop_debug_group(&mut self) { - DynContext::render_pass_pop_debug_group( - &*self.inner.context, - &mut self.inner.id, - self.inner.data.as_mut(), - ); + DynContext::render_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut()); } /// Draws primitives from the active vertex buffer(s). @@ -267,7 +248,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn draw(&mut self, vertices: Range, instances: Range) { DynContext::render_pass_draw( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), vertices, instances, @@ -301,7 +281,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { DynContext::render_pass_draw_indexed( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), indices, base_vertex, @@ -325,9 +304,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn draw_indirect(&mut self, indirect_buffer: &Buffer, indirect_offset: BufferAddress) { DynContext::render_pass_draw_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -354,9 +331,7 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_draw_indexed_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, ); @@ -371,13 +346,10 @@ impl<'encoder> RenderPass<'encoder> { &mut self, render_bundles: I, ) { - let mut render_bundles = render_bundles - .into_iter() - .map(|rb| (&rb.id, rb.data.as_ref())); + let mut render_bundles = render_bundles.into_iter().map(|rb| rb.data.as_ref()); DynContext::render_pass_execute_bundles( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), &mut render_bundles, ) @@ -404,9 +376,7 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_multi_draw_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, count, @@ -432,9 +402,7 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_multi_draw_indexed_indirect( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, count, @@ -476,12 +444,9 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_multi_draw_indirect_count( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, - &count_buffer.id, count_buffer.data.as_ref(), count_offset, max_count, @@ -523,12 +488,9 @@ impl<'encoder> RenderPass<'encoder> { ) { DynContext::render_pass_multi_draw_indexed_indirect_count( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &indirect_buffer.id, indirect_buffer.data.as_ref(), indirect_offset, - &count_buffer.id, count_buffer.data.as_ref(), count_offset, max_count, @@ -581,7 +543,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { DynContext::render_pass_set_push_constants( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), stages, offset, @@ -602,9 +563,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::render_pass_write_timestamp( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ) @@ -617,7 +576,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn begin_occlusion_query(&mut self, query_index: u32) { DynContext::render_pass_begin_occlusion_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), query_index, ); @@ -626,11 +584,7 @@ impl<'encoder> RenderPass<'encoder> { /// End the occlusion query on this render pass. It can be started with /// `begin_occlusion_query`. Occlusion queries may not be nested. pub fn end_occlusion_query(&mut self) { - DynContext::render_pass_end_occlusion_query( - &*self.inner.context, - &mut self.inner.id, - self.inner.data.as_mut(), - ); + DynContext::render_pass_end_occlusion_query(&*self.inner.context, self.inner.data.as_mut()); } } @@ -641,9 +595,7 @@ impl<'encoder> RenderPass<'encoder> { pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) { DynContext::render_pass_begin_pipeline_statistics_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), - &query_set.id, query_set.data.as_ref(), query_index, ); @@ -654,7 +606,6 @@ impl<'encoder> RenderPass<'encoder> { pub fn end_pipeline_statistics_query(&mut self) { DynContext::render_pass_end_pipeline_statistics_query( &*self.inner.context, - &mut self.inner.id, self.inner.data.as_mut(), ); } diff --git a/wgpu/src/api/render_pipeline.rs b/wgpu/src/api/render_pipeline.rs index 7e74127167..dd1c1cefe8 100644 --- a/wgpu/src/api/render_pipeline.rs +++ b/wgpu/src/api/render_pipeline.rs @@ -1,6 +1,5 @@ use std::{num::NonZeroU32, sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a rendering (graphics) pipeline. @@ -12,37 +11,34 @@ use crate::*; #[derive(Debug)] pub struct RenderPipeline { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(RenderPipeline: Send, Sync); +super::impl_partialeq_eq_hash!(RenderPipeline); + impl Drop for RenderPipeline { fn drop(&mut self) { if !thread::panicking() { - self.context - .render_pipeline_drop(&self.id, self.data.as_ref()); + self.context.render_pipeline_drop(self.data.as_ref()); } } } impl RenderPipeline { - /// Returns a globally-unique identifier for this `RenderPipeline`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } - /// Get an object representing the bind group layout at a given index. + /// + /// If this pipeline was created with a [default layout][RenderPipelineDescriptor::layout], then + /// bind groups created with the returned `BindGroupLayout` can only be used with this pipeline. + /// + /// This method will raise a validation error if there is no bind group layout at `index`. pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout { let context = Arc::clone(&self.context); - let (id, data) = - self.context - .render_pipeline_get_bind_group_layout(&self.id, self.data.as_ref(), index); - BindGroupLayout { context, id, data } + let data = self + .context + .render_pipeline_get_bind_group_layout(self.data.as_ref(), index); + BindGroupLayout { context, data } } } @@ -130,6 +126,24 @@ pub struct RenderPipelineDescriptor<'a> { /// Debug label of the pipeline. This will show up in graphics debuggers for easy identification. pub label: Label<'a>, /// The layout of bind groups for this pipeline. + /// + /// If this is set, then [`Device::create_render_pipeline`] will raise a validation error if + /// the layout doesn't match what the shader module(s) expect. + /// + /// Using the same [`PipelineLayout`] for many [`RenderPipeline`] or [`ComputePipeline`] + /// pipelines guarantees that you don't have to rebind any resources when switching between + /// those pipelines. + /// + /// ## Default pipeline layout + /// + /// If `layout` is `None`, then the pipeline has a [default layout] created and used instead. + /// The default layout is deduced from the shader modules. + /// + /// You can use [`RenderPipeline::get_bind_group_layout`] to create bind groups for use with the + /// default layout. However, these bind groups cannot be used with any other pipelines. This is + /// convenient for simple pipelines, but using an explicit layout is recommended in most cases. + /// + /// [default layout]: https://www.w3.org/TR/webgpu/#default-pipeline-layout pub layout: Option<&'a PipelineLayout>, /// The compiled vertex stage, its entry point, and the input buffers layout. pub vertex: VertexState<'a>, diff --git a/wgpu/src/api/sampler.rs b/wgpu/src/api/sampler.rs index 63267ded5d..d60bcccd26 100644 --- a/wgpu/src/api/sampler.rs +++ b/wgpu/src/api/sampler.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a sampler. @@ -15,26 +14,17 @@ use crate::*; #[derive(Debug)] pub struct Sampler { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(Sampler: Send, Sync); -impl Sampler { - /// Returns a globally-unique identifier for this `Sampler`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } -} +super::impl_partialeq_eq_hash!(Sampler); impl Drop for Sampler { fn drop(&mut self) { if !thread::panicking() { - self.context.sampler_drop(&self.id, self.data.as_ref()); + self.context.sampler_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/shader_module.rs b/wgpu/src/api/shader_module.rs index d81562e932..20334a75ad 100644 --- a/wgpu/src/api/shader_module.rs +++ b/wgpu/src/api/shader_module.rs @@ -1,6 +1,5 @@ use std::{borrow::Cow, future::Future, marker::PhantomData, sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a compiled shader module. @@ -14,34 +13,25 @@ use crate::*; #[derive(Debug)] pub struct ShaderModule { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(ShaderModule: Send, Sync); +super::impl_partialeq_eq_hash!(ShaderModule); + impl Drop for ShaderModule { fn drop(&mut self) { if !thread::panicking() { - self.context - .shader_module_drop(&self.id, self.data.as_ref()); + self.context.shader_module_drop(self.data.as_ref()); } } } impl ShaderModule { - /// Returns a globally-unique identifier for this `ShaderModule`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } - /// Get the compilation info for the shader module. pub fn get_compilation_info(&self) -> impl Future + WasmNotSend { - self.context - .shader_get_compilation_info(&self.id, self.data.as_ref()) + self.context.shader_get_compilation_info(self.data.as_ref()) } } diff --git a/wgpu/src/api/surface.rs b/wgpu/src/api/surface.rs index de140a9dcf..ecb0d96938 100644 --- a/wgpu/src/api/surface.rs +++ b/wgpu/src/api/surface.rs @@ -3,7 +3,7 @@ use std::{error, fmt, sync::Arc, thread}; use parking_lot::Mutex; use raw_window_handle::{HasDisplayHandle, HasWindowHandle}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Describes a [`Surface`]. @@ -32,9 +32,6 @@ pub struct Surface<'window> { /// would become invalid when the window is dropped. pub(crate) _handle_source: Option>, - /// Wgpu-core surface id. - pub(crate) id: ObjectId, - /// Additional surface data returned by [`DynContext::instance_create_surface`]. pub(crate) surface_data: Box, @@ -48,23 +45,13 @@ pub struct Surface<'window> { } impl Surface<'_> { - /// Returns a globally-unique identifier for this `Surface`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id> { - Id::new(self.id) - } - /// Returns the capabilities of the surface when used with the given adapter. /// /// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter. pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities { DynContext::surface_get_capabilities( &*self.context, - &self.id, self.surface_data.as_ref(), - &adapter.id, adapter.data.as_ref(), ) } @@ -101,9 +88,7 @@ impl Surface<'_> { pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) { DynContext::surface_configure( &*self.context, - &self.id, self.surface_data.as_ref(), - &device.id, device.data.as_ref(), config, ); @@ -121,11 +106,8 @@ impl Surface<'_> { /// If a SurfaceTexture referencing this surface is alive when the swapchain is recreated, /// recreating the swapchain will panic. pub fn get_current_texture(&self) -> Result { - let (texture_id, texture_data, status, detail) = DynContext::surface_get_current_texture( - &*self.context, - &self.id, - self.surface_data.as_ref(), - ); + let (texture_data, status, detail) = + DynContext::surface_get_current_texture(&*self.context, self.surface_data.as_ref()); let suboptimal = match status { SurfaceStatus::Good => false, @@ -155,12 +137,10 @@ impl Surface<'_> { view_formats: &[], }; - texture_id - .zip(texture_data) - .map(|(id, data)| SurfaceTexture { + texture_data + .map(|data| SurfaceTexture { texture: Texture { context: Arc::clone(&self.context), - id, data, owned: false, descriptor, @@ -188,7 +168,7 @@ impl Surface<'_> { .downcast_ref::() .map(|ctx| unsafe { ctx.surface_as_hal::( - self.surface_data.downcast_ref().unwrap(), + crate::context::downcast_ref(self.surface_data.as_ref()), hal_surface_callback, ) }) @@ -209,7 +189,6 @@ impl<'window> fmt::Debug for Surface<'window> { "None" }, ) - .field("id", &self.id) .field("data", &self.surface_data) .field("config", &self.config) .finish() @@ -222,8 +201,7 @@ static_assertions::assert_impl_all!(Surface<'_>: Send, Sync); impl Drop for Surface<'_> { fn drop(&mut self) { if !thread::panicking() { - self.context - .surface_drop(&self.id, self.surface_data.as_ref()) + self.context.surface_drop(self.surface_data.as_ref()) } } } diff --git a/wgpu/src/api/texture.rs b/wgpu/src/api/texture.rs index 98295b9396..5d01ecdea3 100644 --- a/wgpu/src/api/texture.rs +++ b/wgpu/src/api/texture.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, thread}; -use crate::context::{DynContext, ObjectId}; +use crate::context::DynContext; use crate::*; /// Handle to a texture on the GPU. @@ -11,7 +11,6 @@ use crate::*; #[derive(Debug)] pub struct Texture { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, pub(crate) owned: bool, pub(crate) descriptor: TextureDescriptor<'static>, @@ -19,15 +18,9 @@ pub struct Texture { #[cfg(send_sync)] static_assertions::assert_impl_all!(Texture: Send, Sync); -impl Texture { - /// Returns a globally-unique identifier for this `Texture`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } +super::impl_partialeq_eq_hash!(Texture); +impl Texture { /// Returns the inner hal Texture using a callback. The hal texture will be `None` if the /// backend type argument does not match with this wgpu Texture /// @@ -39,14 +32,17 @@ impl Texture { &self, hal_texture_callback: F, ) -> R { - let texture = self.data.as_ref().downcast_ref().unwrap(); - if let Some(ctx) = self .context .as_any() .downcast_ref::() { - unsafe { ctx.texture_as_hal::(texture, hal_texture_callback) } + unsafe { + ctx.texture_as_hal::( + crate::context::downcast_ref(self.data.as_ref()), + hal_texture_callback, + ) + } } else { hal_texture_callback(None) } @@ -54,18 +50,16 @@ impl Texture { /// Creates a view of this texture. pub fn create_view(&self, desc: &TextureViewDescriptor<'_>) -> TextureView { - let (id, data) = - DynContext::texture_create_view(&*self.context, &self.id, self.data.as_ref(), desc); + let data = DynContext::texture_create_view(&*self.context, self.data.as_ref(), desc); TextureView { context: Arc::clone(&self.context), - id, data, } } /// Destroy the associated native resources as soon as possible. pub fn destroy(&self) { - DynContext::texture_destroy(&*self.context, &self.id, self.data.as_ref()); + DynContext::texture_destroy(&*self.context, self.data.as_ref()); } /// Make an `ImageCopyTexture` representing the whole texture. @@ -145,7 +139,7 @@ impl Texture { impl Drop for Texture { fn drop(&mut self) { if self.owned && !thread::panicking() { - self.context.texture_drop(&self.id, self.data.as_ref()); + self.context.texture_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/api/texture_view.rs b/wgpu/src/api/texture_view.rs index b6e60a3c60..862fe21999 100644 --- a/wgpu/src/api/texture_view.rs +++ b/wgpu/src/api/texture_view.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, thread}; -use crate::context::ObjectId; use crate::*; /// Handle to a texture view. @@ -12,21 +11,14 @@ use crate::*; #[derive(Debug)] pub struct TextureView { pub(crate) context: Arc, - pub(crate) id: ObjectId, pub(crate) data: Box, } #[cfg(send_sync)] static_assertions::assert_impl_all!(TextureView: Send, Sync); -impl TextureView { - /// Returns a globally-unique identifier for this `TextureView`. - /// - /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be different for all resources created from the same `Instance`. - pub fn global_id(&self) -> Id { - Id::new(self.id) - } +super::impl_partialeq_eq_hash!(TextureView); +impl TextureView { /// Returns the inner hal TextureView using a callback. The hal texture will be `None` if the /// backend type argument does not match with this wgpu Texture /// @@ -38,17 +30,16 @@ impl TextureView { &self, hal_texture_view_callback: F, ) -> R { - use wgc::id::TextureViewId; - - let texture_view_id = TextureViewId::from(self.id); - if let Some(ctx) = self .context .as_any() .downcast_ref::() { unsafe { - ctx.texture_view_as_hal::(texture_view_id, hal_texture_view_callback) + ctx.texture_view_as_hal::( + crate::context::downcast_ref(self.data.as_ref()), + hal_texture_view_callback, + ) } } else { hal_texture_view_callback(None) @@ -59,7 +50,7 @@ impl TextureView { impl Drop for TextureView { fn drop(&mut self) { if !thread::panicking() { - self.context.texture_view_drop(&self.id, self.data.as_ref()); + self.context.texture_view_drop(self.data.as_ref()); } } } diff --git a/wgpu/src/backend/webgpu.rs b/wgpu/src/backend/webgpu.rs index fb74a9c74a..a1fb459fb9 100644 --- a/wgpu/src/backend/webgpu.rs +++ b/wgpu/src/backend/webgpu.rs @@ -1,5 +1,6 @@ #![allow(clippy::type_complexity)] +mod defined_non_null_js_value; mod ext_bindings; mod webgpu_sys; @@ -10,54 +11,28 @@ use std::{ collections::HashMap, fmt, future::Future, - marker::PhantomData, - num::NonZeroU64, ops::Range, pin::Pin, rc::Rc, - sync::atomic::{AtomicU64, Ordering}, task::{self, Poll}, }; use wasm_bindgen::{prelude::*, JsCast}; use crate::{ - context::{downcast_ref, ObjectId, QueueWriteBuffer, Unused}, + context::{downcast_ref, QueueWriteBuffer}, CompilationInfo, SurfaceTargetUnsafe, UncapturedErrorHandler, }; -fn create_identified(value: T) -> (Identified, Sendable) { - static NEXT_ID: AtomicU64 = AtomicU64::new(1); - let id = NEXT_ID.fetch_add(1, Ordering::Relaxed); - ( - Identified(NonZeroU64::new(id).unwrap(), PhantomData), - Sendable(value), - ) -} +use defined_non_null_js_value::DefinedNonNullJsValue; // We need to make a wrapper for some of the handle types returned by the web backend to make them // implement `Send` and `Sync` to match native. // // SAFETY: All webgpu handle types in wasm32 are internally a `JsValue`, and `JsValue` is neither -// Send nor Sync. Currently, wasm32 has no threading support so implementing `Send` or `Sync` for a -// type is (for now) harmless. Eventually wasm32 will support threading, and depending on how this -// is integrated (or not integrated) with values like those in webgpu, this may become unsound. - -impl From for Identified { - fn from(object_id: ObjectId) -> Self { - Self(object_id.global_id(), PhantomData) - } -} - -impl From> for ObjectId { - fn from(identified: Identified) -> Self { - Self::new( - // TODO: the ID isn't used, so we hardcode it to 1 for now until we rework this - // API. - NonZeroU64::new(1).unwrap(), - identified.0, - ) - } -} +// Send nor Sync. Currently, wasm32 has no threading support by default, so implementing `Send` or +// `Sync` for a type is harmless. However, nightly Rust supports compiling wasm with experimental +// threading support via `--target-features`. If `wgpu` is being compiled with those features, we do +// not implement `Send` and `Sync` on the webgpu handle types. #[derive(Clone, Debug)] pub(crate) struct Sendable(T); @@ -66,14 +41,10 @@ unsafe impl Send for Sendable {} #[cfg(send_sync)] unsafe impl Sync for Sendable {} -#[derive(Clone, Debug)] -pub(crate) struct Identified(std::num::NonZeroU64, PhantomData); -#[cfg(send_sync)] -unsafe impl Send for Identified {} -#[cfg(send_sync)] -unsafe impl Sync for Identified {} - -pub(crate) struct ContextWebGpu(webgpu_sys::Gpu); +pub(crate) struct ContextWebGpu { + /// `None` if browser does not advertise support for WebGPU. + gpu: Option>, +} #[cfg(send_sync)] unsafe impl Send for ContextWebGpu {} #[cfg(send_sync)] @@ -224,6 +195,36 @@ impl MakeSendFuture { #[cfg(send_sync)] unsafe impl Send for MakeSendFuture {} +/// Wraps a future that returns `Option` and adds the ability to immediately +/// return None. +pub(crate) struct OptionFuture(Option); + +impl>, T> Future for OptionFuture { + type Output = Option; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + // This is safe because we have no Drop implementation to violate the Pin requirements and + // do not provide any means of moving the inner future. + unsafe { + let this = self.get_unchecked_mut(); + match &mut this.0 { + Some(future) => Pin::new_unchecked(future).poll(cx), + None => task::Poll::Ready(None), + } + } + } +} + +impl OptionFuture { + fn some(future: F) -> Self { + Self(Some(future)) + } + + fn none() -> Self { + Self(None) + } +} + fn map_texture_format(texture_format: wgt::TextureFormat) -> webgpu_sys::GpuTextureFormat { use webgpu_sys::GpuTextureFormat as tf; use wgt::TextureFormat; @@ -261,7 +262,7 @@ fn map_texture_format(texture_format: wgt::TextureFormat) -> webgpu_sys::GpuText unimplemented!("Current version of web_sys is missing {texture_format:?}") } TextureFormat::Rgb10a2Unorm => tf::Rgb10a2unorm, - TextureFormat::Rg11b10UFloat => tf::Rg11b10ufloat, + TextureFormat::Rg11b10Ufloat => tf::Rg11b10ufloat, // 64-bit formats TextureFormat::R64Uint => tf::R64uint, TextureFormat::Rg32Uint => tf::Rg32uint, @@ -889,14 +890,9 @@ fn map_js_sys_limits(limits: &wgt::Limits) -> js_sys::Object { type JsFutureResult = Result; -fn future_request_adapter( - result: JsFutureResult, -) -> Option<( - Identified, - Sendable, -)> { +fn future_request_adapter(result: JsFutureResult) -> Option> { match result.and_then(wasm_bindgen::JsCast::dyn_into) { - Ok(adapter) => Some(create_identified(adapter)), + Ok(adapter) => Some(Sendable(adapter)), Err(_) => None, } } @@ -905,19 +901,17 @@ fn future_request_device( result: JsFutureResult, ) -> Result< ( - Identified, Sendable, - Identified, Sendable, ), crate::RequestDeviceError, > { result .map(|js_value| { - let (device_id, device_data) = create_identified(webgpu_sys::GpuDevice::from(js_value)); - let (queue_id, queue_data) = create_identified(device_data.0.queue()); + let device_data = Sendable(webgpu_sys::GpuDevice::from(js_value)); + let queue_data = Sendable(device_data.0.queue()); - (device_id, device_data, queue_id, queue_data) + (device_data, queue_data) }) .map_err(|error_value| crate::RequestDeviceError { inner: crate::RequestDeviceErrorKind::WebGpu(error_value), @@ -1020,13 +1014,7 @@ impl ContextWebGpu { &self, canvas: Canvas, context_result: Result, wasm_bindgen::JsValue>, - ) -> Result< - ( - ::SurfaceId, - ::SurfaceData, - ), - crate::CreateSurfaceError, - > { + ) -> Result<::SurfaceData, crate::CreateSurfaceError> { let context: js_sys::Object = match context_result { Ok(Some(context)) => context, Ok(None) => { @@ -1061,7 +1049,7 @@ impl ContextWebGpu { .dyn_into() .expect("canvas context is not a GPUCanvasContext"); - Ok(create_identified((canvas, context))) + Ok(Sendable((canvas, context))) } /// Get mapped buffer range directly as a `js_sys::ArrayBuffer`. @@ -1095,95 +1083,74 @@ pub enum Canvas { Offscreen(web_sys::OffscreenCanvas), } -/// Returns the browsers gpu object or `None` if the current context is neither the main thread nor a dedicated worker. +#[derive(Debug, Clone, Copy)] +pub struct BrowserGpuPropertyInaccessible; + +/// Returns the browser's gpu object or `Err(BrowserGpuPropertyInaccessible)` if +/// the current context is neither the main thread nor a dedicated worker. /// -/// If WebGPU is not supported, the Gpu property is `undefined` (but *not* necessarily `None`). +/// If WebGPU is not supported, the Gpu property is `undefined`, and so this +/// function will return `Ok(None)`. /// /// See: /// * /// * -pub fn get_browser_gpu_property() -> Option { +pub fn get_browser_gpu_property( +) -> Result>, BrowserGpuPropertyInaccessible> { let global: Global = js_sys::global().unchecked_into(); - if !global.window().is_undefined() { + let maybe_undefined_gpu: webgpu_sys::Gpu = if !global.window().is_undefined() { let navigator = global.unchecked_into::().navigator(); - Some(ext_bindings::NavigatorGpu::gpu(&navigator)) + ext_bindings::NavigatorGpu::gpu(&navigator) } else if !global.worker().is_undefined() { let navigator = global .unchecked_into::() .navigator(); - Some(ext_bindings::NavigatorGpu::gpu(&navigator)) + ext_bindings::NavigatorGpu::gpu(&navigator) } else { - None - } + return Err(BrowserGpuPropertyInaccessible); + }; + Ok(DefinedNonNullJsValue::new(maybe_undefined_gpu)) } impl crate::context::Context for ContextWebGpu { - type AdapterId = Identified; type AdapterData = Sendable; - type DeviceId = Identified; type DeviceData = Sendable; - type QueueId = Identified; type QueueData = Sendable; - type ShaderModuleId = Identified; type ShaderModuleData = Sendable; - type BindGroupLayoutId = Identified; type BindGroupLayoutData = Sendable; - type BindGroupId = Identified; type BindGroupData = Sendable; - type TextureViewId = Identified; type TextureViewData = Sendable; - type SamplerId = Identified; type SamplerData = Sendable; - type BufferId = Identified; type BufferData = Sendable; - type TextureId = Identified; type TextureData = Sendable; - type QuerySetId = Identified; type QuerySetData = Sendable; - type PipelineLayoutId = Identified; type PipelineLayoutData = Sendable; - type RenderPipelineId = Identified; type RenderPipelineData = Sendable; - type ComputePipelineId = Identified; type ComputePipelineData = Sendable; - type CommandEncoderId = Identified; type CommandEncoderData = Sendable; - type ComputePassId = Identified; type ComputePassData = Sendable; - type RenderPassId = Identified; type RenderPassData = Sendable; - type CommandBufferId = Identified; type CommandBufferData = Sendable; - type RenderBundleEncoderId = Identified; type RenderBundleEncoderData = Sendable; - type RenderBundleId = Identified; type RenderBundleData = Sendable; - type SurfaceId = Identified<(Canvas, webgpu_sys::GpuCanvasContext)>; type SurfaceData = Sendable<(Canvas, webgpu_sys::GpuCanvasContext)>; type SurfaceOutputDetail = SurfaceOutputDetail; type SubmissionIndexData = (); - type PipelineCacheId = Unused; type PipelineCacheData = (); - type RequestAdapterFuture = MakeSendFuture< - wasm_bindgen_futures::JsFuture, - fn(JsFutureResult) -> Option<(Self::AdapterId, Self::AdapterData)>, + type RequestAdapterFuture = OptionFuture< + MakeSendFuture< + wasm_bindgen_futures::JsFuture, + fn(JsFutureResult) -> Option, + >, >; type RequestDeviceFuture = MakeSendFuture< wasm_bindgen_futures::JsFuture, fn( JsFutureResult, - ) -> Result< - ( - Self::DeviceId, - Self::DeviceData, - Self::QueueId, - Self::QueueData, - ), - crate::RequestDeviceError, - >, + ) -> Result<(Self::DeviceData, Self::QueueData), crate::RequestDeviceError>, >; type PopErrorScopeFuture = MakeSendFuture Option>; @@ -1194,18 +1161,19 @@ impl crate::context::Context for ContextWebGpu { >; fn init(_instance_desc: wgt::InstanceDescriptor) -> Self { - let Some(gpu) = get_browser_gpu_property() else { + let Ok(gpu) = get_browser_gpu_property() else { panic!( "Accessing the GPU is only supported on the main thread or from a dedicated worker" ); }; - ContextWebGpu(gpu) + + ContextWebGpu { gpu } } unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(Self::SurfaceId, Self::SurfaceData), crate::CreateSurfaceError> { + ) -> Result { match target { SurfaceTargetUnsafe::RawHandle { raw_display_handle: _, @@ -1269,17 +1237,20 @@ impl crate::context::Context for ContextWebGpu { if let Some(mapped_pref) = mapped_power_preference { mapped_options.power_preference(mapped_pref); } - let adapter_promise = self.0.request_adapter_with_options(&mapped_options); - - MakeSendFuture::new( - wasm_bindgen_futures::JsFuture::from(adapter_promise), - future_request_adapter, - ) + if let Some(gpu) = &self.gpu { + let adapter_promise = gpu.request_adapter_with_options(&mapped_options); + OptionFuture::some(MakeSendFuture::new( + wasm_bindgen_futures::JsFuture::from(adapter_promise), + future_request_adapter, + )) + } else { + // Gpu is undefined; WebGPU is not supported in this browser. + OptionFuture::none() + } } fn adapter_request_device( &self, - _adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, desc: &crate::DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, @@ -1333,44 +1304,29 @@ impl crate::context::Context for ContextWebGpu { fn adapter_is_surface_supported( &self, - _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData, - _surface: &Self::SurfaceId, _surface_data: &Self::SurfaceData, ) -> bool { true } - fn adapter_features( - &self, - _adapter: &Self::AdapterId, - adapter_data: &Self::AdapterData, - ) -> wgt::Features { + fn adapter_features(&self, adapter_data: &Self::AdapterData) -> wgt::Features { map_wgt_features(adapter_data.0.features()) } - fn adapter_limits( - &self, - _adapter: &Self::AdapterId, - adapter_data: &Self::AdapterData, - ) -> wgt::Limits { + fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> wgt::Limits { map_wgt_limits(adapter_data.0.limits()) } fn adapter_downlevel_capabilities( &self, - _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData, ) -> wgt::DownlevelCapabilities { // WebGPU is assumed to be fully compliant wgt::DownlevelCapabilities::default() } - fn adapter_get_info( - &self, - _adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, - ) -> wgt::AdapterInfo { + fn adapter_get_info(&self, _adapter_data: &Self::AdapterData) -> wgt::AdapterInfo { // TODO: web-sys has no way of getting information on adapters wgt::AdapterInfo { name: String::new(), @@ -1385,16 +1341,14 @@ impl crate::context::Context for ContextWebGpu { fn adapter_get_texture_format_features( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, format: wgt::TextureFormat, ) -> wgt::TextureFormatFeatures { - format.guaranteed_format_features(self.adapter_features(adapter, adapter_data)) + format.guaranteed_format_features(self.adapter_features(adapter_data)) } fn adapter_get_presentation_timestamp( &self, - _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData, ) -> wgt::PresentationTimestamp { wgt::PresentationTimestamp::INVALID_TIMESTAMP @@ -1402,9 +1356,7 @@ impl crate::context::Context for ContextWebGpu { fn surface_get_capabilities( &self, - _surface: &Self::SurfaceId, _surface_data: &Self::SurfaceData, - _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData, ) -> wgt::SurfaceCapabilities { let mut formats = vec![ @@ -1415,7 +1367,11 @@ impl crate::context::Context for ContextWebGpu { let mut mapped_formats = formats.iter().map(|format| map_texture_format(*format)); // Preferred canvas format will only be either "rgba8unorm" or "bgra8unorm". // https://www.w3.org/TR/webgpu/#dom-gpu-getpreferredcanvasformat - let preferred_format = self.0.get_preferred_canvas_format(); + let gpu = self + .gpu + .as_ref() + .expect("Caller could not have created an adapter if gpu is undefined."); + let preferred_format = gpu.get_preferred_canvas_format(); if let Some(index) = mapped_formats.position(|format| format == preferred_format) { formats.swap(0, index); } @@ -1433,9 +1389,7 @@ impl crate::context::Context for ContextWebGpu { fn surface_configure( &self, - _surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, - _device: &Self::DeviceId, device_data: &Self::DeviceData, config: &crate::SurfaceConfiguration, ) { @@ -1479,21 +1433,14 @@ impl crate::context::Context for ContextWebGpu { fn surface_get_current_texture( &self, - _surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, ) -> ( - Option, Option, wgt::SurfaceStatus, Self::SurfaceOutputDetail, ) { - let (surface_id, surface_data) = create_identified(surface_data.0 .1.get_current_texture()); - ( - Some(surface_id), - Some(surface_data), - wgt::SurfaceStatus::Good, - (), - ) + let surface_data = Sendable(surface_data.0 .1.get_current_texture()); + (Some(surface_data), wgt::SurfaceStatus::Good, ()) } fn surface_present(&self, _detail: &Self::SurfaceOutputDetail) { @@ -1504,31 +1451,14 @@ impl crate::context::Context for ContextWebGpu { // Can't really discard this on the Web } - fn device_features( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> wgt::Features { + fn device_features(&self, device_data: &Self::DeviceData) -> wgt::Features { map_wgt_features(device_data.0.features()) } - fn device_limits( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> wgt::Limits { + fn device_limits(&self, device_data: &Self::DeviceData) -> wgt::Limits { map_wgt_limits(device_data.0.limits()) } - fn device_downlevel_properties( - &self, - _device: &Self::DeviceId, - _device_data: &Self::DeviceData, - ) -> wgt::DownlevelCapabilities { - // WebGPU is assumed to be fully compliant - wgt::DownlevelCapabilities::default() - } - #[cfg_attr( not(any( feature = "spirv", @@ -1540,11 +1470,10 @@ impl crate::context::Context for ContextWebGpu { )] fn device_create_shader_module( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: crate::ShaderModuleDescriptor<'_>, _shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData) { + ) -> Self::ShaderModuleData { let shader_module_result = match desc.source { #[cfg(feature = "spirv")] crate::ShaderSource::SpirV(ref spv) => { @@ -1673,25 +1602,22 @@ impl crate::context::Context for ContextWebGpu { module: device_data.0.create_shader_module(&descriptor), compilation_info, }; - let (id, data) = create_identified(shader_module); - (id, data) + Sendable(shader_module) } unsafe fn device_create_shader_module_spirv( &self, - _device: &Self::DeviceId, _device_data: &Self::DeviceData, _desc: &crate::ShaderModuleDescriptorSpirV<'_>, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData) { + ) -> Self::ShaderModuleData { unreachable!("SPIRV_SHADER_PASSTHROUGH is not enabled for this backend") } fn device_create_bind_group_layout( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::BindGroupLayoutDescriptor<'_>, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { + ) -> Self::BindGroupLayoutData { let mapped_bindings = desc .entries .iter() @@ -1783,15 +1709,14 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_bind_group_layout(&mapped_desc)) + Sendable(device_data.0.create_bind_group_layout(&mapped_desc)) } fn device_create_bind_group( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::BindGroupDescriptor<'_>, - ) -> (Self::BindGroupId, Self::BindGroupData) { + ) -> Self::BindGroupData { let mapped_entries = desc .entries .iter() @@ -1843,15 +1768,14 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_bind_group(&mapped_desc)) + Sendable(device_data.0.create_bind_group(&mapped_desc)) } fn device_create_pipeline_layout( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::PipelineLayoutDescriptor<'_>, - ) -> (Self::PipelineLayoutId, Self::PipelineLayoutData) { + ) -> Self::PipelineLayoutData { let temp_layouts = desc .bind_group_layouts .iter() @@ -1865,15 +1789,14 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_pipeline_layout(&mapped_desc)) + Sendable(device_data.0.create_pipeline_layout(&mapped_desc)) } fn device_create_render_pipeline( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::RenderPipelineDescriptor<'_>, - ) -> (Self::RenderPipelineId, Self::RenderPipelineData) { + ) -> Self::RenderPipelineData { let module: &::ShaderModuleData = downcast_ref(desc.vertex.module.data.as_ref()); let mut mapped_vertex_state = webgpu_sys::GpuVertexState::new(&module.0.module); @@ -1975,15 +1898,14 @@ impl crate::context::Context for ContextWebGpu { let mapped_primitive = map_primitive_state(&desc.primitive); mapped_desc.primitive(&mapped_primitive); - create_identified(device_data.0.create_render_pipeline(&mapped_desc)) + Sendable(device_data.0.create_render_pipeline(&mapped_desc)) } fn device_create_compute_pipeline( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::ComputePipelineDescriptor<'_>, - ) -> (Self::ComputePipelineId, Self::ComputePipelineData) { + ) -> Self::ComputePipelineData { let shader_module: &::ShaderModuleData = downcast_ref(desc.module.data.as_ref()); let mut mapped_compute_stage = @@ -2008,32 +1930,29 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.label(label); } - create_identified(device_data.0.create_compute_pipeline(&mapped_desc)) + Sendable(device_data.0.create_compute_pipeline(&mapped_desc)) } unsafe fn device_create_pipeline_cache( &self, - _: &Self::DeviceId, _: &Self::DeviceData, _: &crate::PipelineCacheDescriptor<'_>, - ) -> (Self::PipelineCacheId, Self::PipelineCacheData) { - (Unused, ()) + ) -> Self::PipelineCacheData { } - fn pipeline_cache_drop(&self, _: &Self::PipelineCacheId, _: &Self::PipelineCacheData) {} + fn pipeline_cache_drop(&self, _: &Self::PipelineCacheData) {} fn device_create_buffer( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::BufferDescriptor<'_>, - ) -> (Self::BufferId, Self::BufferData) { + ) -> Self::BufferData { let mut mapped_desc = webgpu_sys::GpuBufferDescriptor::new(desc.size as f64, desc.usage.bits()); mapped_desc.mapped_at_creation(desc.mapped_at_creation); if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(WebBuffer::new( + Sendable(WebBuffer::new( device_data.0.create_buffer(&mapped_desc), desc, )) @@ -2041,10 +1960,9 @@ impl crate::context::Context for ContextWebGpu { fn device_create_texture( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::TextureDescriptor<'_>, - ) -> (Self::TextureId, Self::TextureData) { + ) -> Self::TextureData { let mut mapped_desc = webgpu_sys::GpuTextureDescriptor::new( map_texture_format(desc.format), &map_extent_3d(desc.size), @@ -2062,15 +1980,14 @@ impl crate::context::Context for ContextWebGpu { .map(|format| JsValue::from(map_texture_format(*format))) .collect::(); mapped_desc.view_formats(&mapped_view_formats); - create_identified(device_data.0.create_texture(&mapped_desc)) + Sendable(device_data.0.create_texture(&mapped_desc)) } fn device_create_sampler( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::SamplerDescriptor<'_>, - ) -> (Self::SamplerId, Self::SamplerData) { + ) -> Self::SamplerData { let mut mapped_desc = webgpu_sys::GpuSamplerDescriptor::new(); mapped_desc.address_mode_u(map_address_mode(desc.address_mode_u)); mapped_desc.address_mode_v(map_address_mode(desc.address_mode_v)); @@ -2088,15 +2005,14 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_sampler_with_descriptor(&mapped_desc)) + Sendable(device_data.0.create_sampler_with_descriptor(&mapped_desc)) } fn device_create_query_set( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &wgt::QuerySetDescriptor>, - ) -> (Self::QuerySetId, Self::QuerySetData) { + ) -> Self::QuerySetData { let ty = match desc.ty { wgt::QueryType::Occlusion => webgpu_sys::GpuQueryType::Occlusion, wgt::QueryType::Timestamp => webgpu_sys::GpuQueryType::Timestamp, @@ -2106,20 +2022,19 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified(device_data.0.create_query_set(&mapped_desc)) + Sendable(device_data.0.create_query_set(&mapped_desc)) } fn device_create_command_encoder( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::CommandEncoderDescriptor<'_>, - ) -> (Self::CommandEncoderId, Self::CommandEncoderData) { + ) -> Self::CommandEncoderData { let mut mapped_desc = webgpu_sys::GpuCommandEncoderDescriptor::new(); if let Some(label) = desc.label { mapped_desc.label(label); } - create_identified( + Sendable( device_data .0 .create_command_encoder_with_descriptor(&mapped_desc), @@ -2128,10 +2043,9 @@ impl crate::context::Context for ContextWebGpu { fn device_create_render_bundle_encoder( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::RenderBundleEncoderDescriptor<'_>, - ) -> (Self::RenderBundleEncoderId, Self::RenderBundleEncoderData) { + ) -> Self::RenderBundleEncoderData { let mapped_color_formats = desc .color_formats .iter() @@ -2151,40 +2065,23 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.stencil_read_only(ds.stencil_read_only); } mapped_desc.sample_count(desc.sample_count); - create_identified(device_data.0.create_render_bundle_encoder(&mapped_desc)) - } - - #[doc(hidden)] - fn device_make_invalid(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) { - // Unimplemented + Sendable(device_data.0.create_render_bundle_encoder(&mapped_desc)) } - fn device_drop(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) { + fn device_drop(&self, _device_data: &Self::DeviceData) { // Device is dropped automatically } - fn device_destroy(&self, _buffer: &Self::DeviceId, device_data: &Self::DeviceData) { + fn device_destroy(&self, device_data: &Self::DeviceData) { device_data.0.destroy(); } - fn device_mark_lost( - &self, - _device: &Self::DeviceId, - _device_data: &Self::DeviceData, - _message: &str, - ) { - // TODO: figure out the GPUDevice implementation of this, including resolving - // the device.lost promise, which will require a different invocation pattern - // with a callback. - } - - fn queue_drop(&self, _queue: &Self::QueueId, _queue_data: &Self::QueueData) { + fn queue_drop(&self, _queue_data: &Self::QueueData) { // Queue is dropped automatically } fn device_set_device_lost_callback( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, device_lost_callback: crate::context::DeviceLostCallback, ) { @@ -2206,7 +2103,6 @@ impl crate::context::Context for ContextWebGpu { fn device_poll( &self, - _device: &Self::DeviceId, _device_data: &Self::DeviceData, _maintain: crate::Maintain, ) -> crate::MaintainResult { @@ -2216,7 +2112,6 @@ impl crate::context::Context for ContextWebGpu { fn device_on_uncaptured_error( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, handler: Box, ) { @@ -2231,12 +2126,7 @@ impl crate::context::Context for ContextWebGpu { f.forget(); } - fn device_push_error_scope( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - filter: crate::ErrorFilter, - ) { + fn device_push_error_scope(&self, device_data: &Self::DeviceData, filter: crate::ErrorFilter) { device_data.0.push_error_scope(match filter { crate::ErrorFilter::OutOfMemory => webgpu_sys::GpuErrorFilter::OutOfMemory, crate::ErrorFilter::Validation => webgpu_sys::GpuErrorFilter::Validation, @@ -2244,11 +2134,7 @@ impl crate::context::Context for ContextWebGpu { }); } - fn device_pop_error_scope( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> Self::PopErrorScopeFuture { + fn device_pop_error_scope(&self, device_data: &Self::DeviceData) -> Self::PopErrorScopeFuture { let error_promise = device_data.0.pop_error_scope(); MakeSendFuture::new( wasm_bindgen_futures::JsFuture::from(error_promise), @@ -2258,7 +2144,6 @@ impl crate::context::Context for ContextWebGpu { fn buffer_map_async( &self, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, mode: crate::MapMode, range: Range, @@ -2277,7 +2162,6 @@ impl crate::context::Context for ContextWebGpu { fn buffer_get_mapped_range( &self, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, sub_range: Range, ) -> Box { @@ -2289,14 +2173,13 @@ impl crate::context::Context for ContextWebGpu { }) } - fn buffer_unmap(&self, _buffer: &Self::BufferId, buffer_data: &Self::BufferData) { + fn buffer_unmap(&self, buffer_data: &Self::BufferData) { buffer_data.0.buffer.unmap(); buffer_data.0.mapping.borrow_mut().mapped_buffer = None; } fn shader_get_compilation_info( &self, - _shader: &Self::ShaderModuleId, shader_data: &Self::ShaderModuleData, ) -> Self::CompilationInfoFuture { let compilation_info_promise = shader_data.0.module.get_compilation_info(); @@ -2312,10 +2195,9 @@ impl crate::context::Context for ContextWebGpu { fn texture_create_view( &self, - _texture: &Self::TextureId, texture_data: &Self::TextureData, desc: &crate::TextureViewDescriptor<'_>, - ) -> (Self::TextureViewId, Self::TextureViewData) { + ) -> Self::TextureViewData { let mut mapped = webgpu_sys::GpuTextureViewDescriptor::new(); if let Some(dim) = desc.dimension { mapped.dimension(map_texture_view_dimension(dim)); @@ -2335,147 +2217,102 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped.label(label); } - create_identified(texture_data.0.create_view_with_descriptor(&mapped)) + Sendable(texture_data.0.create_view_with_descriptor(&mapped)) } - fn surface_drop(&self, _surface: &Self::SurfaceId, _surface_data: &Self::SurfaceData) { + fn surface_drop(&self, _surface_data: &Self::SurfaceData) { // Dropped automatically } - fn adapter_drop(&self, _adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData) { + fn adapter_drop(&self, _adapter_data: &Self::AdapterData) { // Dropped automatically } - fn buffer_destroy(&self, _buffer: &Self::BufferId, buffer_data: &Self::BufferData) { + fn buffer_destroy(&self, buffer_data: &Self::BufferData) { buffer_data.0.buffer.destroy(); } - fn buffer_drop(&self, _buffer: &Self::BufferId, _buffer_data: &Self::BufferData) { + fn buffer_drop(&self, _buffer_data: &Self::BufferData) { // Dropped automatically } - fn texture_destroy(&self, _texture: &Self::TextureId, texture_data: &Self::TextureData) { + fn texture_destroy(&self, texture_data: &Self::TextureData) { texture_data.0.destroy(); } - fn texture_drop(&self, _texture: &Self::TextureId, _texture_data: &Self::TextureData) { + fn texture_drop(&self, _texture_data: &Self::TextureData) { // Dropped automatically } - fn texture_view_drop( - &self, - _texture_view: &Self::TextureViewId, - _texture_view_data: &Self::TextureViewData, - ) { + fn texture_view_drop(&self, _texture_view_data: &Self::TextureViewData) { // Dropped automatically } - fn sampler_drop(&self, _sampler: &Self::SamplerId, _sampler_data: &Self::SamplerData) { + fn sampler_drop(&self, _sampler_data: &Self::SamplerData) { // Dropped automatically } - fn query_set_drop(&self, _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData) { + fn query_set_drop(&self, _query_set_data: &Self::QuerySetData) { // Dropped automatically } - fn bind_group_drop( - &self, - _bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, - ) { + fn bind_group_drop(&self, _bind_group_data: &Self::BindGroupData) { // Dropped automatically } - fn bind_group_layout_drop( - &self, - _bind_group_layout: &Self::BindGroupLayoutId, - _bind_group_layout_data: &Self::BindGroupLayoutData, - ) { + fn bind_group_layout_drop(&self, _bind_group_layout_data: &Self::BindGroupLayoutData) { // Dropped automatically } - fn pipeline_layout_drop( - &self, - _pipeline_layout: &Self::PipelineLayoutId, - _pipeline_layout_data: &Self::PipelineLayoutData, - ) { + fn pipeline_layout_drop(&self, _pipeline_layout_data: &Self::PipelineLayoutData) { // Dropped automatically } - fn shader_module_drop( - &self, - _shader_module: &Self::ShaderModuleId, - _shader_module_data: &Self::ShaderModuleData, - ) { + fn shader_module_drop(&self, _shader_module_data: &Self::ShaderModuleData) { // Dropped automatically } - fn command_encoder_drop( - &self, - _command_encoder: &Self::CommandEncoderId, - _command_encoder_data: &Self::CommandEncoderData, - ) { + fn command_encoder_drop(&self, _command_encoder_data: &Self::CommandEncoderData) { // Dropped automatically } - fn command_buffer_drop( - &self, - _command_buffer: &Self::CommandBufferId, - _command_buffer_data: &Self::CommandBufferData, - ) { + fn command_buffer_drop(&self, _command_buffer_data: &Self::CommandBufferData) { // Dropped automatically } - fn render_bundle_drop( - &self, - _render_bundle: &Self::RenderBundleId, - _render_bundle_data: &Self::RenderBundleData, - ) { + fn render_bundle_drop(&self, _render_bundle_data: &Self::RenderBundleData) { // Dropped automatically } - fn compute_pipeline_drop( - &self, - _pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, - ) { + fn compute_pipeline_drop(&self, _pipeline_data: &Self::ComputePipelineData) { // Dropped automatically } - fn render_pipeline_drop( - &self, - _pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, - ) { + fn render_pipeline_drop(&self, _pipeline_data: &Self::RenderPipelineData) { // Dropped automatically } fn compute_pipeline_get_bind_group_layout( &self, - _pipeline: &Self::ComputePipelineId, pipeline_data: &Self::ComputePipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { - create_identified(pipeline_data.0.get_bind_group_layout(index)) + ) -> Self::BindGroupLayoutData { + Sendable(pipeline_data.0.get_bind_group_layout(index)) } fn render_pipeline_get_bind_group_layout( &self, - _pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { - create_identified(pipeline_data.0.get_bind_group_layout(index)) + ) -> Self::BindGroupLayoutData { + Sendable(pipeline_data.0.get_bind_group_layout(index)) } fn command_encoder_copy_buffer_to_buffer( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - _source: &Self::BufferId, source_data: &Self::BufferData, source_offset: wgt::BufferAddress, - _destination: &Self::BufferId, destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, copy_size: wgt::BufferAddress, @@ -2493,7 +2330,6 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_copy_buffer_to_texture( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyBuffer<'_>, destination: crate::ImageCopyTexture<'_>, @@ -2510,7 +2346,6 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_copy_texture_to_buffer( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture<'_>, destination: crate::ImageCopyBuffer<'_>, @@ -2527,7 +2362,6 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_copy_texture_to_texture( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture<'_>, destination: crate::ImageCopyTexture<'_>, @@ -2544,10 +2378,9 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_begin_compute_pass( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &crate::ComputePassDescriptor<'_>, - ) -> (Self::ComputePassId, Self::ComputePassData) { + ) -> Self::ComputePassData { let mut mapped_desc = webgpu_sys::GpuComputePassDescriptor::new(); if let Some(label) = desc.label { mapped_desc.label(label); @@ -2566,7 +2399,7 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.timestamp_writes(&writes); } - create_identified( + Sendable( encoder_data .0 .begin_compute_pass_with_descriptor(&mapped_desc), @@ -2575,10 +2408,9 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_begin_render_pass( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &crate::RenderPassDescriptor<'_>, - ) -> (Self::RenderPassId, Self::RenderPassData) { + ) -> Self::RenderPassData { let mapped_color_attachments = desc .color_attachments .iter() @@ -2668,16 +2500,15 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.timestamp_writes(&writes); } - create_identified(encoder_data.0.begin_render_pass(&mapped_desc)) + Sendable(encoder_data.0.begin_render_pass(&mapped_desc)) } fn command_encoder_finish( &self, - _encoder: Self::CommandEncoderId, encoder_data: &mut Self::CommandEncoderData, - ) -> (Self::CommandBufferId, Self::CommandBufferData) { + ) -> Self::CommandBufferData { let label = encoder_data.0.label(); - create_identified(if label.is_empty() { + Sendable(if label.is_empty() { encoder_data.0.finish() } else { let mut mapped_desc = webgpu_sys::GpuCommandBufferDescriptor::new(); @@ -2688,9 +2519,8 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_clear_texture( &self, - _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, - _texture: &crate::Texture, + _texture_data: &Self::TextureData, _subresource_range: &wgt::ImageSubresourceRange, ) { //TODO @@ -2698,29 +2528,25 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_clear_buffer( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - buffer: &crate::Buffer, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { - let buffer: &::BufferData = - downcast_ref(buffer.data.as_ref()); match size { Some(size) => encoder_data.0.clear_buffer_with_f64_and_f64( - &buffer.0.buffer, + &buffer_data.0.buffer, offset as f64, size as f64, ), None => encoder_data .0 - .clear_buffer_with_f64(&buffer.0.buffer, offset as f64), + .clear_buffer_with_f64(&buffer_data.0.buffer, offset as f64), } } fn command_encoder_insert_debug_marker( &self, - _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, _label: &str, ) { @@ -2730,7 +2556,6 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_push_debug_group( &self, - _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, _label: &str, ) { @@ -2738,20 +2563,14 @@ impl crate::context::Context for ContextWebGpu { // encoder.push_debug_group(label); } - fn command_encoder_pop_debug_group( - &self, - _encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, - ) { + fn command_encoder_pop_debug_group(&self, _encoder_data: &Self::CommandEncoderData) { // Not available in gecko yet // encoder.pop_debug_group(); } fn command_encoder_write_timestamp( &self, - _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { @@ -2762,13 +2581,10 @@ impl crate::context::Context for ContextWebGpu { fn command_encoder_resolve_query_set( &self, - _encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - _query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, first_query: u32, query_count: u32, - _destination: &Self::BufferId, destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, ) { @@ -2783,11 +2599,10 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_finish( &self, - _encoder: Self::RenderBundleEncoderId, encoder_data: Self::RenderBundleEncoderData, desc: &crate::RenderBundleDescriptor<'_>, - ) -> (Self::RenderBundleId, Self::RenderBundleData) { - create_identified(match desc.label { + ) -> Self::RenderBundleData { + Sendable(match desc.label { Some(label) => { let mut mapped_desc = webgpu_sys::GpuRenderBundleDescriptor::new(); mapped_desc.label(label); @@ -2799,9 +2614,7 @@ impl crate::context::Context for ContextWebGpu { fn queue_write_buffer( &self, - _queue: &Self::QueueId, queue_data: &Self::QueueData, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, data: &[u8], @@ -2828,9 +2641,7 @@ impl crate::context::Context for ContextWebGpu { fn queue_validate_write_buffer( &self, - _queue: &Self::QueueId, _queue_data: &Self::QueueData, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: wgt::BufferSize, @@ -2865,7 +2676,6 @@ impl crate::context::Context for ContextWebGpu { fn queue_create_staging_buffer( &self, - _queue: &Self::QueueId, _queue_data: &Self::QueueData, size: wgt::BufferSize, ) -> Option> { @@ -2876,9 +2686,7 @@ impl crate::context::Context for ContextWebGpu { fn queue_write_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, staging_buffer: &dyn QueueWriteBuffer, @@ -2888,19 +2696,11 @@ impl crate::context::Context for ContextWebGpu { .downcast_ref::() .unwrap() .slice(); - self.queue_write_buffer( - queue, - queue_data, - buffer, - buffer_data, - offset, - staging_buffer, - ) + self.queue_write_buffer(queue_data, buffer_data, offset, staging_buffer) } fn queue_write_texture( &self, - _queue: &Self::QueueId, queue_data: &Self::QueueData, texture: crate::ImageCopyTexture<'_>, data: &[u8], @@ -2936,7 +2736,6 @@ impl crate::context::Context for ContextWebGpu { fn queue_copy_external_image_to_texture( &self, - _queue: &Self::QueueId, queue_data: &Self::QueueData, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, @@ -2951,43 +2750,36 @@ impl crate::context::Context for ContextWebGpu { ); } - fn queue_submit>( + fn queue_submit>( &self, - _queue: &Self::QueueId, queue_data: &Self::QueueData, command_buffers: I, ) -> Self::SubmissionIndexData { let temp_command_buffers = command_buffers - .map(|(_, data)| data.0) + .map(|data| data.0) .collect::(); queue_data.0.submit(&temp_command_buffers); } - fn queue_get_timestamp_period( - &self, - _queue: &Self::QueueId, - _queue_data: &Self::QueueData, - ) -> f32 { + fn queue_get_timestamp_period(&self, _queue_data: &Self::QueueData) -> f32 { // Timestamp values are always in nanoseconds, see https://gpuweb.github.io/gpuweb/#timestamp 1.0 } fn queue_on_submitted_work_done( &self, - _queue: &Self::QueueId, _queue_data: &Self::QueueData, _callback: crate::context::SubmittedWorkDoneCallback, ) { unimplemented!() } - fn device_start_capture(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) {} - fn device_stop_capture(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) {} + fn device_start_capture(&self, _device_data: &Self::DeviceData) {} + fn device_stop_capture(&self, _device_data: &Self::DeviceData) {} fn device_get_internal_counters( &self, - _device: &Self::DeviceId, _device_data: &Self::DeviceData, ) -> wgt::InternalCounters { Default::default() @@ -2995,25 +2787,18 @@ impl crate::context::Context for ContextWebGpu { fn device_generate_allocator_report( &self, - _device: &Self::DeviceId, _device_data: &Self::DeviceData, ) -> Option { None } - fn pipeline_cache_get_data( - &self, - _: &Self::PipelineCacheId, - _: &Self::PipelineCacheData, - ) -> Option> { + fn pipeline_cache_get_data(&self, _: &Self::PipelineCacheData) -> Option> { None } fn compute_pass_set_pipeline( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - _pipeline: &Self::ComputePipelineId, pipeline_data: &Self::ComputePipelineData, ) { pass_data.0.set_pipeline(&pipeline_data.0) @@ -3021,13 +2806,16 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_set_bind_group( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, index: u32, - _bind_group: &Self::BindGroupId, - bind_group_data: &Self::BindGroupData, + bind_group_data: Option<&Self::BindGroupData>, offsets: &[wgt::DynamicOffset], ) { + if bind_group_data.is_none() { + // TODO: Handle the None case. + return; + } + let bind_group_data = bind_group_data.unwrap(); if offsets.is_empty() { pass_data.0.set_bind_group(index, Some(&bind_group_data.0)); } else { @@ -3045,7 +2833,6 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_set_push_constants( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, _offset: u32, _data: &[u8], @@ -3055,7 +2842,6 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_insert_debug_marker( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, _label: &str, ) { @@ -3065,7 +2851,6 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_push_debug_group( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, _group_label: &str, ) { @@ -3073,20 +2858,14 @@ impl crate::context::Context for ContextWebGpu { // self.0.push_debug_group(group_label); } - fn compute_pass_pop_debug_group( - &self, - _pass: &mut Self::ComputePassId, - _pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_pop_debug_group(&self, _pass_data: &mut Self::ComputePassData) { // Not available in gecko yet // self.0.pop_debug_group(); } fn compute_pass_write_timestamp( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { @@ -3095,26 +2874,19 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_begin_pipeline_statistics_query( &self, - _pass: &mut Self::ComputePassId, _pass_data: &mut Self::ComputePassData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { // Not available in gecko yet } - fn compute_pass_end_pipeline_statistics_query( - &self, - _pass: &mut Self::ComputePassId, - _pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_end_pipeline_statistics_query(&self, _pass_data: &mut Self::ComputePassData) { // Not available in gecko yet } fn compute_pass_dispatch_workgroups( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, x: u32, y: u32, @@ -3127,9 +2899,7 @@ impl crate::context::Context for ContextWebGpu { fn compute_pass_dispatch_workgroups_indirect( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3139,19 +2909,13 @@ impl crate::context::Context for ContextWebGpu { ); } - fn compute_pass_end( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_end(&self, pass_data: &mut Self::ComputePassData) { pass_data.0.end(); } fn render_bundle_encoder_set_pipeline( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - _pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, ) { encoder_data.0.set_pipeline(&pipeline_data.0); @@ -3159,13 +2923,16 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_set_bind_group( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, index: u32, - _bind_group: &Self::BindGroupId, - bind_group_data: &Self::BindGroupData, + bind_group_data: Option<&Self::BindGroupData>, offsets: &[wgt::DynamicOffset], ) { + if bind_group_data.is_none() { + // TODO: Handle the None case. + return; + } + let bind_group_data = bind_group_data.unwrap(); if offsets.is_empty() { encoder_data .0 @@ -3185,9 +2952,7 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_set_index_buffer( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, @@ -3214,10 +2979,8 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_set_vertex_buffer( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, slot: u32, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, @@ -3243,7 +3006,6 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_set_push_constants( &self, - _encoder: &mut Self::RenderBundleEncoderId, _encoder_data: &mut Self::RenderBundleEncoderData, _stages: wgt::ShaderStages, _offset: u32, @@ -3254,7 +3016,6 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_draw( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, vertices: Range, instances: Range, @@ -3271,7 +3032,6 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_draw_indexed( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, indices: Range, base_vertex: i32, @@ -3290,9 +3050,7 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_draw_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3303,9 +3061,7 @@ impl crate::context::Context for ContextWebGpu { fn render_bundle_encoder_draw_indexed_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3314,67 +3070,9 @@ impl crate::context::Context for ContextWebGpu { .draw_indexed_indirect_with_f64(&indirect_buffer_data.0.buffer, indirect_offset as f64); } - fn render_bundle_encoder_multi_draw_indirect( - &self, - _encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count: u32, - ) { - panic!("MULTI_DRAW_INDIRECT feature must be enabled to call multi_draw_indirect") - } - - fn render_bundle_encoder_multi_draw_indexed_indirect( - &self, - _encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count: u32, - ) { - panic!("MULTI_DRAW_INDIRECT feature must be enabled to call multi_draw_indexed_indirect") - } - - fn render_bundle_encoder_multi_draw_indirect_count( - &self, - _encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, - _count_buffer_data: &Self::BufferData, - _count_buffer_offset: wgt::BufferAddress, - _max_count: u32, - ) { - panic!( - "MULTI_DRAW_INDIRECT_COUNT feature must be enabled to call multi_draw_indirect_count" - ) - } - - fn render_bundle_encoder_multi_draw_indexed_indirect_count( - &self, - _encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, - _count_buffer_data: &Self::BufferData, - _count_buffer_offset: wgt::BufferAddress, - _max_count: u32, - ) { - panic!("MULTI_DRAW_INDIRECT_COUNT feature must be enabled to call multi_draw_indexed_indirect_count") - } - fn render_pass_set_pipeline( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - _pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, ) { pass_data.0.set_pipeline(&pipeline_data.0); @@ -3382,13 +3080,16 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_bind_group( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, index: u32, - _bind_group: &Self::BindGroupId, - bind_group_data: &Self::BindGroupData, + bind_group_data: Option<&Self::BindGroupData>, offsets: &[wgt::DynamicOffset], ) { + if bind_group_data.is_none() { + // TODO: Handle the None case. + return; + } + let bind_group_data = bind_group_data.unwrap(); if offsets.is_empty() { pass_data.0.set_bind_group(index, Some(&bind_group_data.0)); } else { @@ -3406,9 +3107,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_index_buffer( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, @@ -3435,10 +3134,8 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_vertex_buffer( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, slot: u32, - _buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, @@ -3464,7 +3161,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_push_constants( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, _stages: wgt::ShaderStages, _offset: u32, @@ -3475,7 +3171,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_draw( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, vertices: Range, instances: Range, @@ -3492,7 +3187,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_draw_indexed( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, indices: Range, base_vertex: i32, @@ -3511,9 +3205,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_draw_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3524,9 +3216,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_draw_indexed_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { @@ -3537,9 +3227,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_multi_draw_indirect( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, _count: u32, @@ -3549,9 +3237,7 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_multi_draw_indexed_indirect( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, _count: u32, @@ -3561,12 +3247,9 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_multi_draw_indirect_count( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, _count_buffer_data: &Self::BufferData, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, @@ -3578,12 +3261,9 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_multi_draw_indexed_indirect_count( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _indirect_buffer: &Self::BufferId, _indirect_buffer_data: &Self::BufferData, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, _count_buffer_data: &Self::BufferData, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, @@ -3593,7 +3273,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_blend_constant( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, color: wgt::Color, ) { @@ -3604,7 +3283,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_scissor_rect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: u32, y: u32, @@ -3616,7 +3294,6 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_viewport( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: f32, y: f32, @@ -3632,26 +3309,19 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_set_stencil_reference( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, reference: u32, ) { pass_data.0.set_stencil_reference(reference); } - fn render_pass_insert_debug_marker( - &self, - _pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - _label: &str, - ) { + fn render_pass_insert_debug_marker(&self, _pass_data: &mut Self::RenderPassData, _label: &str) { // Not available in gecko yet // self.0.insert_debug_marker(label); } fn render_pass_push_debug_group( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, _group_label: &str, ) { @@ -3659,20 +3329,14 @@ impl crate::context::Context for ContextWebGpu { // self.0.push_debug_group(group_label); } - fn render_pass_pop_debug_group( - &self, - _pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_pop_debug_group(&self, _pass_data: &mut Self::RenderPassData) { // Not available in gecko yet // self.0.pop_debug_group(); } fn render_pass_write_timestamp( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { @@ -3681,57 +3345,41 @@ impl crate::context::Context for ContextWebGpu { fn render_pass_begin_occlusion_query( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, _query_index: u32, ) { // Not available in gecko yet } - fn render_pass_end_occlusion_query( - &self, - _pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end_occlusion_query(&self, _pass_data: &mut Self::RenderPassData) { // Not available in gecko yet } fn render_pass_begin_pipeline_statistics_query( &self, - _pass: &mut Self::RenderPassId, _pass_data: &mut Self::RenderPassData, - _query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData, _query_index: u32, ) { // Not available in gecko yet } - fn render_pass_end_pipeline_statistics_query( - &self, - _pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end_pipeline_statistics_query(&self, _pass_data: &mut Self::RenderPassData) { // Not available in gecko yet } fn render_pass_execute_bundles( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ) { let mapped = render_bundles - .map(|(_, bundle_data)| &bundle_data.0) + .map(|bundle_data| &bundle_data.0) .collect::(); pass_data.0.execute_bundles(&mapped); } - fn render_pass_end( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end(&self, pass_data: &mut Self::RenderPassData) { pass_data.0.end(); } } diff --git a/wgpu/src/backend/webgpu/defined_non_null_js_value.rs b/wgpu/src/backend/webgpu/defined_non_null_js_value.rs new file mode 100644 index 0000000000..fc5a8737ef --- /dev/null +++ b/wgpu/src/backend/webgpu/defined_non_null_js_value.rs @@ -0,0 +1,46 @@ +use std::ops::{Deref, DerefMut}; + +use wasm_bindgen::JsValue; + +/// Derefs to a [`JsValue`] that's known not to be `undefined` or `null`. +#[derive(Debug)] +pub struct DefinedNonNullJsValue(T); + +impl DefinedNonNullJsValue +where + T: AsRef, +{ + pub fn new(value: T) -> Option { + if value.as_ref().is_undefined() || value.as_ref().is_null() { + None + } else { + Some(Self(value)) + } + } +} + +impl Deref for DefinedNonNullJsValue { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for DefinedNonNullJsValue { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl AsRef for DefinedNonNullJsValue { + fn as_ref(&self) -> &T { + &self.0 + } +} + +impl AsMut for DefinedNonNullJsValue { + fn as_mut(&mut self) -> &mut T { + &mut self.0 + } +} diff --git a/wgpu/src/backend/wgpu_core.rs b/wgpu/src/backend/wgpu_core.rs index 7c085ec476..3aac20e21f 100644 --- a/wgpu/src/backend/wgpu_core.rs +++ b/wgpu/src/backend/wgpu_core.rs @@ -1,9 +1,8 @@ use crate::{ - context::{ObjectId, Unused}, - AdapterInfo, BindGroupDescriptor, BindGroupLayoutDescriptor, BindingResource, BufferBinding, - BufferDescriptor, CommandEncoderDescriptor, CompilationInfo, CompilationMessage, - CompilationMessageType, ComputePassDescriptor, ComputePipelineDescriptor, - DownlevelCapabilities, Features, Label, Limits, LoadOp, MapMode, Operations, + context::downcast_ref, AdapterInfo, BindGroupDescriptor, BindGroupLayoutDescriptor, + BindingResource, BufferBinding, BufferDescriptor, CommandEncoderDescriptor, CompilationInfo, + CompilationMessage, CompilationMessageType, ComputePassDescriptor, ComputePipelineDescriptor, + DownlevelCapabilities, ErrorSource, Features, Label, Limits, LoadOp, MapMode, Operations, PipelineCacheDescriptor, PipelineLayoutDescriptor, RenderBundleEncoderDescriptor, RenderPipelineDescriptor, SamplerDescriptor, ShaderModuleDescriptor, ShaderModuleDescriptorSpirV, ShaderSource, StoreOp, SurfaceStatus, SurfaceTargetUnsafe, @@ -15,7 +14,7 @@ use parking_lot::Mutex; use smallvec::SmallVec; use std::{ any::Any, - borrow::Cow::{Borrowed, Owned}, + borrow::Cow::Borrowed, error::Error, fmt, future::{ready, Ready}, @@ -25,10 +24,7 @@ use std::{ sync::Arc, }; use wgc::error::ContextErrorSource; -use wgc::{ - command::bundle_ffi::*, device::DeviceLostClosure, id::CommandEncoderId, id::TextureViewId, - pipeline::CreateShaderModuleError, -}; +use wgc::{command::bundle_ffi::*, device::DeviceLostClosure, pipeline::CreateShaderModuleError}; use wgt::WasmNotSendSync; pub struct ContextWgpuCore(wgc::global::Global); @@ -65,8 +61,7 @@ impl ContextWgpuCore { #[cfg(native)] pub fn enumerate_adapters(&self, backends: wgt::Backends) -> Vec { - self.0 - .enumerate_adapters(wgc::instance::AdapterInputs::Mask(backends, |_| None)) + self.0.enumerate_adapters(backends) } pub unsafe fn create_adapter_from_hal( @@ -82,21 +77,24 @@ impl ContextWgpuCore { R, >( &self, - adapter: wgc::id::AdapterId, + adapter: &wgc::id::AdapterId, hal_adapter_callback: F, ) -> R { unsafe { self.0 - .adapter_as_hal::(adapter, hal_adapter_callback) + .adapter_as_hal::(*adapter, hal_adapter_callback) } } pub unsafe fn buffer_as_hal) -> R, R>( &self, - id: wgc::id::BufferId, + buffer: &Buffer, hal_buffer_callback: F, ) -> R { - unsafe { self.0.buffer_as_hal::(id, hal_buffer_callback) } + unsafe { + self.0 + .buffer_as_hal::(buffer.id, hal_buffer_callback) + } } pub unsafe fn create_device_from_hal( @@ -109,7 +107,7 @@ impl ContextWgpuCore { if trace_dir.is_some() { log::error!("Feature 'trace' has been removed temporarily, see https://github.com/gfx-rs/wgpu/issues/5974"); } - let (device_id, queue_id, error) = unsafe { + let (device_id, queue_id) = unsafe { self.0.create_device_from_hal( *adapter, hal_device.into(), @@ -118,10 +116,7 @@ impl ContextWgpuCore { None, None, ) - }; - if let Some(err) = error { - self.handle_error_fatal(err, "Adapter::create_device_from_hal"); - } + }?; let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); let device = Device { id: device_id, @@ -165,7 +160,7 @@ impl ContextWgpuCore { hal_buffer: A::Buffer, device: &Device, desc: &BufferDescriptor<'_>, - ) -> (wgc::id::BufferId, Buffer) { + ) -> Buffer { let (id, error) = unsafe { self.0.create_buffer_from_hal::( hal_buffer, @@ -182,12 +177,10 @@ impl ContextWgpuCore { "Device::create_buffer_from_hal", ); } - ( + Buffer { id, - Buffer { - error_sink: Arc::clone(&device.error_sink), - }, - ) + error_sink: Arc::clone(&device.error_sink), + } } pub unsafe fn device_as_hal) -> R, R>( @@ -237,12 +230,12 @@ impl ContextWgpuCore { R, >( &self, - texture_view_id: TextureViewId, + texture_view_data: &wgc::id::TextureViewId, hal_texture_view_callback: F, ) -> R { unsafe { self.0 - .texture_view_as_hal::(texture_view_id, hal_texture_view_callback) + .texture_view_as_hal::(*texture_view_data, hal_texture_view_callback) } } @@ -253,12 +246,12 @@ impl ContextWgpuCore { R, >( &self, - command_encoder_id: CommandEncoderId, + command_encoder: &CommandEncoder, hal_command_encoder_callback: F, ) -> R { unsafe { self.0.command_encoder_as_hal_mut::( - command_encoder_id, + command_encoder.id, hal_command_encoder_callback, ) } @@ -269,6 +262,7 @@ impl ContextWgpuCore { } #[cold] + #[track_caller] #[inline(never)] fn handle_error_inner( &self, @@ -277,32 +271,36 @@ impl ContextWgpuCore { label: Label<'_>, fn_ident: &'static str, ) { - let error = wgc::error::ContextError { + let source_error: ErrorSource = Box::new(wgc::error::ContextError { fn_ident, source, label: label.unwrap_or_default().to_string(), - }; + }); let mut sink = sink_mutex.lock(); - let mut source_opt: Option<&(dyn Error + 'static)> = Some(&error); - while let Some(source) = source_opt { - if let Some(wgc::device::DeviceError::OutOfMemory) = - source.downcast_ref::() - { - return sink.handle_error(crate::Error::OutOfMemory { - source: Box::new(error), - }); + let mut source_opt: Option<&(dyn Error + 'static)> = Some(&*source_error); + let error = loop { + if let Some(source) = source_opt { + if let Some(wgc::device::DeviceError::OutOfMemory) = + source.downcast_ref::() + { + break crate::Error::OutOfMemory { + source: source_error, + }; + } + source_opt = source.source(); + } else { + // Otherwise, it is a validation error + break crate::Error::Validation { + description: self.format_error(&*source_error), + source: source_error, + }; } - source_opt = source.source(); - } - - // Otherwise, it is a validation error - sink.handle_error(crate::Error::Validation { - description: self.format_error(&error), - source: Box::new(error), - }); + }; + sink.handle_error(error); } #[inline] + #[track_caller] fn handle_error( &self, sink_mutex: &Mutex, @@ -314,6 +312,7 @@ impl ContextWgpuCore { } #[inline] + #[track_caller] fn handle_error_nolabel( &self, sink_mutex: &Mutex, @@ -334,7 +333,7 @@ impl ContextWgpuCore { } #[inline(never)] - fn format_error(&self, err: &(impl Error + 'static)) -> String { + fn format_error(&self, err: &(dyn Error + 'static)) -> String { let mut output = String::new(); let mut level = 1; @@ -366,14 +365,14 @@ impl ContextWgpuCore { fn map_buffer_copy_view(view: crate::ImageCopyBuffer<'_>) -> wgc::command::ImageCopyBuffer { wgc::command::ImageCopyBuffer { - buffer: view.buffer.id.into(), + buffer: downcast_buffer(view.buffer).id, layout: view.layout, } } fn map_texture_copy_view(view: crate::ImageCopyTexture<'_>) -> wgc::command::ImageCopyTexture { wgc::command::ImageCopyTexture { - texture: view.texture.id.into(), + texture: downcast_texture(view.texture).id, mip_level: view.mip_level, origin: view.origin, aspect: view.aspect, @@ -388,7 +387,7 @@ fn map_texture_tagged_copy_view( view: crate::ImageCopyTextureTagged<'_>, ) -> wgc::command::ImageCopyTextureTagged { wgc::command::ImageCopyTextureTagged { - texture: view.texture.id.into(), + texture: downcast_texture(view.texture).id, mip_level: view.mip_level, origin: view.origin, aspect: view.aspect, @@ -443,14 +442,6 @@ pub struct Surface { configured_device: Mutex>, } -impl Surface { - // Not used on every platform - #[allow(dead_code)] - pub fn id(&self) -> wgc::id::SurfaceId { - self.id - } -} - #[derive(Debug)] pub struct Device { id: wgc::id::DeviceId, @@ -458,21 +449,15 @@ pub struct Device { features: Features, } -impl Device { - // Not used on every platform - #[allow(dead_code)] - pub fn id(&self) -> wgc::id::DeviceId { - self.id - } -} - #[derive(Debug)] pub struct Buffer { + id: wgc::id::BufferId, error_sink: ErrorSink, } #[derive(Debug)] pub struct ShaderModule { + id: wgc::id::ShaderModuleId, compilation_info: CompilationInfo, } @@ -482,26 +467,22 @@ pub struct Texture { error_sink: ErrorSink, } -impl Texture { - // Not used on every platform - #[allow(dead_code)] - pub fn id(&self) -> wgc::id::TextureId { - self.id - } -} - #[derive(Debug)] pub struct Queue { id: wgc::id::QueueId, error_sink: ErrorSink, } -impl Queue { - // Not used on every platform - #[allow(dead_code)] - pub fn id(&self) -> wgc::id::QueueId { - self.id - } +#[derive(Debug)] +pub struct ComputePipeline { + id: wgc::id::ComputePipelineId, + error_sink: ErrorSink, +} + +#[derive(Debug)] +pub struct RenderPipeline { + id: wgc::id::RenderPipelineId, + error_sink: ErrorSink, } #[derive(Debug)] @@ -518,73 +499,43 @@ pub struct RenderPass { #[derive(Debug)] pub struct CommandEncoder { + id: wgc::id::CommandEncoderId, error_sink: ErrorSink, open: bool, } impl crate::Context for ContextWgpuCore { - type AdapterId = wgc::id::AdapterId; - type AdapterData = (); - type DeviceId = wgc::id::DeviceId; + type AdapterData = wgc::id::AdapterId; type DeviceData = Device; - type QueueId = wgc::id::QueueId; type QueueData = Queue; - type ShaderModuleId = wgc::id::ShaderModuleId; type ShaderModuleData = ShaderModule; - type BindGroupLayoutId = wgc::id::BindGroupLayoutId; - type BindGroupLayoutData = (); - type BindGroupId = wgc::id::BindGroupId; - type BindGroupData = (); - type TextureViewId = wgc::id::TextureViewId; - type TextureViewData = (); - type SamplerId = wgc::id::SamplerId; - type SamplerData = (); - type BufferId = wgc::id::BufferId; + type BindGroupLayoutData = wgc::id::BindGroupLayoutId; + type BindGroupData = wgc::id::BindGroupId; + type TextureViewData = wgc::id::TextureViewId; + type SamplerData = wgc::id::SamplerId; type BufferData = Buffer; - type TextureId = wgc::id::TextureId; type TextureData = Texture; - type QuerySetId = wgc::id::QuerySetId; - type QuerySetData = (); - type PipelineLayoutId = wgc::id::PipelineLayoutId; - type PipelineLayoutData = (); - type RenderPipelineId = wgc::id::RenderPipelineId; - type RenderPipelineData = (); - type ComputePipelineId = wgc::id::ComputePipelineId; - type ComputePipelineData = (); - type PipelineCacheId = wgc::id::PipelineCacheId; - type PipelineCacheData = (); - type CommandEncoderId = wgc::id::CommandEncoderId; + type QuerySetData = wgc::id::QuerySetId; + type PipelineLayoutData = wgc::id::PipelineLayoutId; + type RenderPipelineData = RenderPipeline; + type ComputePipelineData = ComputePipeline; + type PipelineCacheData = wgc::id::PipelineCacheId; type CommandEncoderData = CommandEncoder; - type ComputePassId = Unused; type ComputePassData = ComputePass; - type RenderPassId = Unused; type RenderPassData = RenderPass; - type CommandBufferId = wgc::id::CommandBufferId; - type CommandBufferData = (); - type RenderBundleEncoderId = Unused; + type CommandBufferData = wgc::id::CommandBufferId; type RenderBundleEncoderData = wgc::command::RenderBundleEncoder; - type RenderBundleId = wgc::id::RenderBundleId; - type RenderBundleData = (); + type RenderBundleData = wgc::id::RenderBundleId; - type SurfaceId = wgc::id::SurfaceId; type SurfaceData = Surface; type SurfaceOutputDetail = SurfaceOutputDetail; type SubmissionIndexData = wgc::SubmissionIndex; - type RequestAdapterFuture = Ready>; + type RequestAdapterFuture = Ready>; #[allow(clippy::type_complexity)] - type RequestDeviceFuture = Ready< - Result< - ( - Self::DeviceId, - Self::DeviceData, - Self::QueueId, - Self::QueueData, - ), - crate::RequestDeviceError, - >, - >; + type RequestDeviceFuture = + Ready>; type PopErrorScopeFuture = Ready>; type CompilationInfoFuture = Ready; @@ -596,7 +547,7 @@ impl crate::Context for ContextWgpuCore { unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(Self::SurfaceId, Self::SurfaceData), crate::CreateSurfaceError> { + ) -> Result { let id = match target { SurfaceTargetUnsafe::RawHandle { raw_display_handle, @@ -629,13 +580,10 @@ impl crate::Context for ContextWgpuCore { }, }?; - Ok(( + Ok(Surface { id, - Surface { - id, - configured_device: Mutex::default(), - }, - )) + configured_device: Mutex::default(), + }) } fn instance_request_adapter( @@ -646,33 +594,40 @@ impl crate::Context for ContextWgpuCore { &wgc::instance::RequestAdapterOptions { power_preference: options.power_preference, force_fallback_adapter: options.force_fallback_adapter, - compatible_surface: options.compatible_surface.map(|surface| surface.id.into()), + compatible_surface: options.compatible_surface.map(|surface| { + let surface: &::SurfaceData = + downcast_ref(surface.surface_data.as_ref()); + surface.id + }), }, - wgc::instance::AdapterInputs::Mask(wgt::Backends::all(), |_| None), + wgt::Backends::all(), + None, ); - ready(id.ok().map(|id| (id, ()))) + ready(id.ok()) } fn adapter_request_device( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + adapter_data: &Self::AdapterData, desc: &crate::DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, ) -> Self::RequestDeviceFuture { if trace_dir.is_some() { log::error!("Feature 'trace' has been removed temporarily, see https://github.com/gfx-rs/wgpu/issues/5974"); } - let (device_id, queue_id, error) = self.0.adapter_request_device( - *adapter, + let res = self.0.adapter_request_device( + *adapter_data, &desc.map_label(|l| l.map(Borrowed)), None, None, None, ); - if let Some(err) = error { - return ready(Err(err.into())); - } + let (device_id, queue_id) = match res { + Ok(ids) => ids, + Err(err) => { + return ready(Err(err.into())); + } + }; let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); let device = Device { id: device_id, @@ -683,7 +638,7 @@ impl crate::Context for ContextWgpuCore { id: queue_id, error_sink, }; - ready(Ok((device_id, device, queue_id, queue))) + ready(Ok((device, queue))) } fn instance_poll_all_devices(&self, force_wait: bool) -> bool { @@ -695,92 +650,57 @@ impl crate::Context for ContextWgpuCore { fn adapter_is_surface_supported( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, - surface: &Self::SurfaceId, - _surface_data: &Self::SurfaceData, + adapter_data: &Self::AdapterData, + surface_data: &Self::SurfaceData, ) -> bool { - match self.0.adapter_is_surface_supported(*adapter, *surface) { - Ok(result) => result, - Err(err) => self.handle_error_fatal(err, "Adapter::is_surface_supported"), - } + self.0 + .adapter_is_surface_supported(*adapter_data, surface_data.id) } - fn adapter_features( - &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, - ) -> Features { - match self.0.adapter_features(*adapter) { - Ok(features) => features, - Err(err) => self.handle_error_fatal(err, "Adapter::features"), - } + fn adapter_features(&self, adapter_data: &Self::AdapterData) -> Features { + self.0.adapter_features(*adapter_data) } - fn adapter_limits( - &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, - ) -> Limits { - match self.0.adapter_limits(*adapter) { - Ok(limits) => limits, - Err(err) => self.handle_error_fatal(err, "Adapter::limits"), - } + fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> Limits { + self.0.adapter_limits(*adapter_data) } fn adapter_downlevel_capabilities( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + adapter_data: &Self::AdapterData, ) -> DownlevelCapabilities { - match self.0.adapter_downlevel_capabilities(*adapter) { - Ok(downlevel) => downlevel, - Err(err) => self.handle_error_fatal(err, "Adapter::downlevel_properties"), - } + self.0.adapter_downlevel_capabilities(*adapter_data) } - fn adapter_get_info( - &self, - adapter: &wgc::id::AdapterId, - _adapter_data: &Self::AdapterData, - ) -> AdapterInfo { - match self.0.adapter_get_info(*adapter) { - Ok(info) => info, - Err(err) => self.handle_error_fatal(err, "Adapter::get_info"), - } + fn adapter_get_info(&self, adapter_data: &Self::AdapterData) -> AdapterInfo { + self.0.adapter_get_info(*adapter_data) } fn adapter_get_texture_format_features( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + adapter_data: &Self::AdapterData, format: wgt::TextureFormat, ) -> wgt::TextureFormatFeatures { - match self.0.adapter_get_texture_format_features(*adapter, format) { - Ok(info) => info, - Err(err) => self.handle_error_fatal(err, "Adapter::get_texture_format_features"), - } + self.0 + .adapter_get_texture_format_features(*adapter_data, format) } fn adapter_get_presentation_timestamp( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + adapter_data: &Self::AdapterData, ) -> wgt::PresentationTimestamp { - match self.0.adapter_get_presentation_timestamp(*adapter) { - Ok(timestamp) => timestamp, - Err(err) => self.handle_error_fatal(err, "Adapter::correlate_presentation_timestamp"), - } + self.0.adapter_get_presentation_timestamp(*adapter_data) } fn surface_get_capabilities( &self, - surface: &Self::SurfaceId, - _surface_data: &Self::SurfaceData, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + surface_data: &Self::SurfaceData, + adapter_data: &Self::AdapterData, ) -> wgt::SurfaceCapabilities { - match self.0.surface_get_capabilities(*surface, *adapter) { + match self + .0 + .surface_get_capabilities(surface_data.id, *adapter_data) + { Ok(caps) => caps, Err(wgc::instance::GetSurfaceSupportError::Unsupported) => { wgt::SurfaceCapabilities::default() @@ -791,48 +711,40 @@ impl crate::Context for ContextWgpuCore { fn surface_configure( &self, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, config: &crate::SurfaceConfiguration, ) { - let error = self.0.surface_configure(*surface, *device, config); + let error = self + .0 + .surface_configure(surface_data.id, device_data.id, config); if let Some(e) = error { self.handle_error_fatal(e, "Surface::configure"); } else { - *surface_data.configured_device.lock() = Some(*device); + *surface_data.configured_device.lock() = Some(device_data.id); } } fn surface_get_current_texture( &self, - surface: &Self::SurfaceId, - _surface_data: &Self::SurfaceData, + surface_data: &Self::SurfaceData, ) -> ( - Option, Option, SurfaceStatus, Self::SurfaceOutputDetail, ) { - match self.0.surface_get_current_texture(*surface, None) { + match self.0.surface_get_current_texture(surface_data.id, None) { Ok(wgc::present::SurfaceOutput { status, texture_id }) => { - let (id, data) = { - ( - texture_id, - texture_id.map(|id| Texture { - id, - error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), - }), - ) - }; + let data = texture_id.map(|id| Texture { + id, + error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), + }); ( - id, data, status, SurfaceOutputDetail { - surface_id: *surface, + surface_id: surface_data.id, }, ) } @@ -854,33 +766,12 @@ impl crate::Context for ContextWgpuCore { } } - fn device_features( - &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, - ) -> Features { - match self.0.device_features(*device) { - Ok(features) => features, - Err(err) => self.handle_error_fatal(err, "Device::features"), - } - } - - fn device_limits(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) -> Limits { - match self.0.device_limits(*device) { - Ok(limits) => limits, - Err(err) => self.handle_error_fatal(err, "Device::limits"), - } + fn device_features(&self, device_data: &Self::DeviceData) -> Features { + self.0.device_features(device_data.id) } - fn device_downlevel_properties( - &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, - ) -> DownlevelCapabilities { - match self.0.device_downlevel_properties(*device) { - Ok(limits) => limits, - Err(err) => self.handle_error_fatal(err, "Device::downlevel_properties"), - } + fn device_limits(&self, device_data: &Self::DeviceData) -> Limits { + self.0.device_limits(device_data.id) } #[cfg_attr( @@ -894,11 +785,10 @@ impl crate::Context for ContextWgpuCore { )] fn device_create_shader_module( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: ShaderModuleDescriptor<'_>, shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData) { + ) -> Self::ShaderModuleData { let descriptor = wgc::pipeline::ShaderModuleDescriptor { label: desc.label.map(Borrowed), shader_bound_checks, @@ -929,9 +819,9 @@ impl crate::Context for ContextWgpuCore { ShaderSource::Naga(module) => wgc::pipeline::ShaderModuleSource::Naga(module), ShaderSource::Dummy(_) => panic!("found `ShaderSource::Dummy`"), }; - let (id, error) = self - .0 - .device_create_shader_module(*device, &descriptor, source, None); + let (id, error) = + self.0 + .device_create_shader_module(device_data.id, &descriptor, source, None); let compilation_info = match error { Some(cause) => { self.handle_error( @@ -945,15 +835,17 @@ impl crate::Context for ContextWgpuCore { None => CompilationInfo { messages: vec![] }, }; - (id, ShaderModule { compilation_info }) + ShaderModule { + id, + compilation_info, + } } unsafe fn device_create_shader_module_spirv( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData) { + ) -> Self::ShaderModuleData { let descriptor = wgc::pipeline::ShaderModuleDescriptor { label: desc.label.map(Borrowed), // Doesn't matter the value since spirv shaders aren't mutated to include @@ -962,7 +854,7 @@ impl crate::Context for ContextWgpuCore { }; let (id, error) = unsafe { self.0.device_create_shader_module_spirv( - *device, + device_data.id, &descriptor, Borrowed(&desc.source), None, @@ -980,22 +872,24 @@ impl crate::Context for ContextWgpuCore { } None => CompilationInfo { messages: vec![] }, }; - (id, ShaderModule { compilation_info }) + ShaderModule { + id, + compilation_info, + } } fn device_create_bind_group_layout( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BindGroupLayoutDescriptor<'_>, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { + ) -> Self::BindGroupLayoutData { let descriptor = wgc::binding_model::BindGroupLayoutDescriptor { label: desc.label.map(Borrowed), entries: Borrowed(desc.entries), }; let (id, error) = self .0 - .device_create_bind_group_layout(*device, &descriptor, None); + .device_create_bind_group_layout(device_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1004,18 +898,17 @@ impl crate::Context for ContextWgpuCore { "Device::create_bind_group_layout", ); } - (id, ()) + id } fn device_create_bind_group( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BindGroupDescriptor<'_>, - ) -> (Self::BindGroupId, Self::BindGroupData) { + ) -> Self::BindGroupData { use wgc::binding_model as bm; - let mut arrayed_texture_views = Vec::::new(); - let mut arrayed_samplers = Vec::::new(); + let mut arrayed_texture_views = Vec::new(); + let mut arrayed_samplers = Vec::new(); if device_data .features .contains(Features::TEXTURE_BINDING_ARRAY) @@ -1023,10 +916,11 @@ impl crate::Context for ContextWgpuCore { // gather all the array view IDs first for entry in desc.entries.iter() { if let BindingResource::TextureViewArray(array) = entry.resource { - arrayed_texture_views.extend(array.iter().map(|view| &view.id)); + arrayed_texture_views + .extend(array.iter().map(|view| *downcast_texture_view(view))); } if let BindingResource::SamplerArray(array) = entry.resource { - arrayed_samplers.extend(array.iter().map(|sampler| &sampler.id)); + arrayed_samplers.extend(array.iter().map(|sampler| *downcast_sampler(sampler))); } } } @@ -1042,7 +936,7 @@ impl crate::Context for ContextWgpuCore { for entry in desc.entries.iter() { if let BindingResource::BufferArray(array) = entry.resource { arrayed_buffer_bindings.extend(array.iter().map(|binding| bm::BufferBinding { - buffer_id: binding.buffer.id.into(), + buffer_id: downcast_buffer(binding.buffer).id, offset: binding.offset, size: binding.size, })); @@ -1062,7 +956,7 @@ impl crate::Context for ContextWgpuCore { offset, size, }) => bm::BindingResource::Buffer(bm::BufferBinding { - buffer_id: buffer.id.into(), + buffer_id: downcast_buffer(buffer).id, offset, size, }), @@ -1073,38 +967,34 @@ impl crate::Context for ContextWgpuCore { bm::BindingResource::BufferArray(Borrowed(slice)) } BindingResource::Sampler(sampler) => { - bm::BindingResource::Sampler(sampler.id.into()) + bm::BindingResource::Sampler(*downcast_sampler(sampler)) } BindingResource::SamplerArray(array) => { - let samplers = remaining_arrayed_samplers[..array.len()] - .iter() - .map(|id| ::from(*id)) - .collect::>(); + let slice = &remaining_arrayed_samplers[..array.len()]; remaining_arrayed_samplers = &remaining_arrayed_samplers[array.len()..]; - bm::BindingResource::SamplerArray(Owned(samplers)) + bm::BindingResource::SamplerArray(Borrowed(slice)) } BindingResource::TextureView(texture_view) => { - bm::BindingResource::TextureView(texture_view.id.into()) + bm::BindingResource::TextureView(*downcast_texture_view(texture_view)) } BindingResource::TextureViewArray(array) => { - let views = remaining_arrayed_texture_views[..array.len()] - .iter() - .map(|id| ::from(*id)) - .collect::>(); + let slice = &remaining_arrayed_texture_views[..array.len()]; remaining_arrayed_texture_views = &remaining_arrayed_texture_views[array.len()..]; - bm::BindingResource::TextureViewArray(Owned(views)) + bm::BindingResource::TextureViewArray(Borrowed(slice)) } }, }) .collect::>(); let descriptor = bm::BindGroupDescriptor { label: desc.label.as_ref().map(|label| Borrowed(&label[..])), - layout: desc.layout.id.into(), + layout: *downcast_bind_group_layout(desc.layout), entries: Borrowed(&entries), }; - let (id, error) = self.0.device_create_bind_group(*device, &descriptor, None); + let (id, error) = self + .0 + .device_create_bind_group(device_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1113,14 +1003,13 @@ impl crate::Context for ContextWgpuCore { "Device::create_bind_group", ); } - (id, ()) + id } fn device_create_pipeline_layout( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &PipelineLayoutDescriptor<'_>, - ) -> (Self::PipelineLayoutId, Self::PipelineLayoutData) { + ) -> Self::PipelineLayoutData { // Limit is always less or equal to hal::MAX_BIND_GROUPS, so this is always right // Guards following ArrayVec assert!( @@ -1133,7 +1022,7 @@ impl crate::Context for ContextWgpuCore { let temp_layouts = desc .bind_group_layouts .iter() - .map(|bgl| bgl.id.into()) + .map(|bgl| *downcast_bind_group_layout(bgl)) .collect::>(); let descriptor = wgc::binding_model::PipelineLayoutDescriptor { label: desc.label.map(Borrowed), @@ -1143,7 +1032,7 @@ impl crate::Context for ContextWgpuCore { let (id, error) = self .0 - .device_create_pipeline_layout(*device, &descriptor, None); + .device_create_pipeline_layout(device_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1152,14 +1041,13 @@ impl crate::Context for ContextWgpuCore { "Device::create_pipeline_layout", ); } - (id, ()) + id } fn device_create_render_pipeline( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &RenderPipelineDescriptor<'_>, - ) -> (Self::RenderPipelineId, Self::RenderPipelineData) { + ) -> Self::RenderPipelineData { use wgc::pipeline as pipe; let vertex_buffers: ArrayVec<_, { wgc::MAX_VERTEX_BUFFERS }> = desc @@ -1175,10 +1063,10 @@ impl crate::Context for ContextWgpuCore { let descriptor = pipe::RenderPipelineDescriptor { label: desc.label.map(Borrowed), - layout: desc.layout.map(|l| l.id.into()), + layout: desc.layout.map(downcast_pipeline_layout).copied(), vertex: pipe::VertexState { stage: pipe::ProgrammableStageDescriptor { - module: desc.vertex.module.id.into(), + module: downcast_shader_module(desc.vertex.module).id, entry_point: desc.vertex.entry_point.map(Borrowed), constants: Borrowed(desc.vertex.compilation_options.constants), zero_initialize_workgroup_memory: desc @@ -1193,7 +1081,7 @@ impl crate::Context for ContextWgpuCore { multisample: desc.multisample, fragment: desc.fragment.as_ref().map(|frag| pipe::FragmentState { stage: pipe::ProgrammableStageDescriptor { - module: frag.module.id.into(), + module: downcast_shader_module(frag.module).id, entry_point: frag.entry_point.map(Borrowed), constants: Borrowed(frag.compilation_options.constants), zero_initialize_workgroup_memory: frag @@ -1203,12 +1091,12 @@ impl crate::Context for ContextWgpuCore { targets: Borrowed(frag.targets), }), multiview: desc.multiview, - cache: desc.cache.map(|c| c.id.into()), + cache: desc.cache.map(downcast_pipeline_cache).copied(), }; - let (id, error) = self - .0 - .device_create_render_pipeline(*device, &descriptor, None, None); + let (id, error) = + self.0 + .device_create_render_pipeline(device_data.id, &descriptor, None, None); if let Some(cause) = error { if let wgc::pipeline::CreateRenderPipelineError::Internal { stage, ref error } = cause { log::error!("Shader translation error for stage {:?}: {}", stage, error); @@ -1221,33 +1109,35 @@ impl crate::Context for ContextWgpuCore { "Device::create_render_pipeline", ); } - (id, ()) + RenderPipeline { + id, + error_sink: Arc::clone(&device_data.error_sink), + } } fn device_create_compute_pipeline( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &ComputePipelineDescriptor<'_>, - ) -> (Self::ComputePipelineId, Self::ComputePipelineData) { + ) -> Self::ComputePipelineData { use wgc::pipeline as pipe; let descriptor = pipe::ComputePipelineDescriptor { label: desc.label.map(Borrowed), - layout: desc.layout.map(|l| l.id.into()), + layout: desc.layout.map(downcast_pipeline_layout).copied(), stage: pipe::ProgrammableStageDescriptor { - module: desc.module.id.into(), + module: downcast_shader_module(desc.module).id, entry_point: desc.entry_point.map(Borrowed), constants: Borrowed(desc.compilation_options.constants), zero_initialize_workgroup_memory: desc .compilation_options .zero_initialize_workgroup_memory, }, - cache: desc.cache.map(|c| c.id.into()), + cache: desc.cache.map(downcast_pipeline_cache).copied(), }; - let (id, error) = self - .0 - .device_create_compute_pipeline(*device, &descriptor, None, None); + let (id, error) = + self.0 + .device_create_compute_pipeline(device_data.id, &descriptor, None, None); if let Some(cause) = error { if let wgc::pipeline::CreateComputePipelineError::Internal(ref error) = cause { log::error!( @@ -1264,15 +1154,17 @@ impl crate::Context for ContextWgpuCore { "Device::create_compute_pipeline", ); } - (id, ()) + ComputePipeline { + id, + error_sink: Arc::clone(&device_data.error_sink), + } } unsafe fn device_create_pipeline_cache( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &PipelineCacheDescriptor<'_>, - ) -> (Self::PipelineCacheId, Self::PipelineCacheData) { + ) -> Self::PipelineCacheData { use wgc::pipeline as pipe; let descriptor = pipe::PipelineCacheDescriptor { @@ -1282,7 +1174,7 @@ impl crate::Context for ContextWgpuCore { }; let (id, error) = unsafe { self.0 - .device_create_pipeline_cache(*device, &descriptor, None) + .device_create_pipeline_cache(device_data.id, &descriptor, None) }; if let Some(cause) = error { self.handle_error( @@ -1292,18 +1184,17 @@ impl crate::Context for ContextWgpuCore { "Device::device_create_pipeline_cache_init", ); } - (id, ()) + id } fn device_create_buffer( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &crate::BufferDescriptor<'_>, - ) -> (Self::BufferId, Self::BufferData) { + ) -> Self::BufferData { let (id, error) = self.0 - .device_create_buffer(*device, &desc.map_label(|l| l.map(Borrowed)), None); + .device_create_buffer(device_data.id, &desc.map_label(|l| l.map(Borrowed)), None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1312,21 +1203,21 @@ impl crate::Context for ContextWgpuCore { "Device::create_buffer", ); } - ( + + Buffer { id, - Buffer { - error_sink: Arc::clone(&device_data.error_sink), - }, - ) + error_sink: Arc::clone(&device_data.error_sink), + } } fn device_create_texture( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &TextureDescriptor<'_>, - ) -> (Self::TextureId, Self::TextureData) { + ) -> Self::TextureData { let wgt_desc = desc.map_label_and_view_formats(|l| l.map(Borrowed), |v| v.to_vec()); - let (id, error) = self.0.device_create_texture(*device, &wgt_desc, None); + let (id, error) = self + .0 + .device_create_texture(device_data.id, &wgt_desc, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1335,20 +1226,17 @@ impl crate::Context for ContextWgpuCore { "Device::create_texture", ); } - ( + + Texture { id, - Texture { - id, - error_sink: Arc::clone(&device_data.error_sink), - }, - ) + error_sink: Arc::clone(&device_data.error_sink), + } } fn device_create_sampler( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &SamplerDescriptor<'_>, - ) -> (Self::SamplerId, Self::SamplerData) { + ) -> Self::SamplerData { let descriptor = wgc::resource::SamplerDescriptor { label: desc.label.map(Borrowed), address_modes: [ @@ -1366,7 +1254,9 @@ impl crate::Context for ContextWgpuCore { border_color: desc.border_color, }; - let (id, error) = self.0.device_create_sampler(*device, &descriptor, None); + let (id, error) = self + .0 + .device_create_sampler(device_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &device_data.error_sink, @@ -1375,30 +1265,30 @@ impl crate::Context for ContextWgpuCore { "Device::create_sampler", ); } - (id, ()) + id } fn device_create_query_set( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &wgt::QuerySetDescriptor>, - ) -> (Self::QuerySetId, Self::QuerySetData) { - let (id, error) = - self.0 - .device_create_query_set(*device, &desc.map_label(|l| l.map(Borrowed)), None); + ) -> Self::QuerySetData { + let (id, error) = self.0.device_create_query_set( + device_data.id, + &desc.map_label(|l| l.map(Borrowed)), + None, + ); if let Some(cause) = error { self.handle_error_nolabel(&device_data.error_sink, cause, "Device::create_query_set"); } - (id, ()) + id } fn device_create_command_encoder( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &CommandEncoderDescriptor<'_>, - ) -> (Self::CommandEncoderId, Self::CommandEncoderData) { + ) -> Self::CommandEncoderData { let (id, error) = self.0.device_create_command_encoder( - *device, + device_data.id, &desc.map_label(|l| l.map(Borrowed)), None, ); @@ -1410,20 +1300,18 @@ impl crate::Context for ContextWgpuCore { "Device::create_command_encoder", ); } - ( + + CommandEncoder { id, - CommandEncoder { - error_sink: Arc::clone(&device_data.error_sink), - open: true, - }, - ) + error_sink: Arc::clone(&device_data.error_sink), + open: true, + } } fn device_create_render_bundle_encoder( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, desc: &RenderBundleEncoderDescriptor<'_>, - ) -> (Self::RenderBundleEncoderId, Self::RenderBundleEncoderData) { + ) -> Self::RenderBundleEncoderData { let descriptor = wgc::command::RenderBundleEncoderDescriptor { label: desc.label.map(Borrowed), color_formats: Borrowed(desc.color_formats), @@ -1431,60 +1319,44 @@ impl crate::Context for ContextWgpuCore { sample_count: desc.sample_count, multiview: desc.multiview, }; - match wgc::command::RenderBundleEncoder::new(&descriptor, *device, None) { - Ok(encoder) => (Unused, encoder), + match wgc::command::RenderBundleEncoder::new(&descriptor, device_data.id, None) { + Ok(encoder) => encoder, Err(e) => panic!("Error in Device::create_render_bundle_encoder: {e}"), } } - #[doc(hidden)] - fn device_make_invalid(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - self.0.device_make_invalid(*device); - } #[cfg_attr(not(any(native, Emscripten)), allow(unused))] - fn device_drop(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { + fn device_drop(&self, device_data: &Self::DeviceData) { #[cfg(any(native, Emscripten))] { // Call device_poll, but don't check for errors. We have to use its // return value, but we just drop it. - let _ = self.0.device_poll(*device, wgt::Maintain::wait()); - self.0.device_drop(*device); + let _ = self.0.device_poll(device_data.id, wgt::Maintain::wait()); + self.0.device_drop(device_data.id); } } #[cfg_attr(target_arch = "wasm32", allow(unused))] - fn queue_drop(&self, queue: &Self::QueueId, _device_data: &Self::QueueData) { - self.0.queue_drop(*queue); + fn queue_drop(&self, queue_data: &Self::QueueData) { + self.0.queue_drop(queue_data.id); } fn device_set_device_lost_callback( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, device_lost_callback: crate::context::DeviceLostCallback, ) { let device_lost_closure = DeviceLostClosure::from_rust(device_lost_callback); self.0 - .device_set_device_lost_closure(*device, device_lost_closure); + .device_set_device_lost_closure(device_data.id, device_lost_closure); } - fn device_destroy(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - self.0.device_destroy(*device); - } - fn device_mark_lost( - &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, - message: &str, - ) { - // We do not provide a reason to device_lose, because all reasons other than - // destroyed (which this is not) are "unknown". - self.0.device_mark_lost(*device, message); + fn device_destroy(&self, device_data: &Self::DeviceData) { + self.0.device_destroy(device_data.id); } fn device_poll( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, maintain: crate::Maintain, ) -> wgt::MaintainResult { - let maintain_inner = maintain.map_index(|i| *i.0.as_ref().downcast_ref().unwrap()); - match self.0.device_poll(*device, maintain_inner) { + let maintain_inner = maintain.map_index(|i| *i.data.as_ref().downcast_ref().unwrap()); + match self.0.device_poll(device_data.id, maintain_inner) { Ok(done) => match done { true => wgt::MaintainResult::SubmissionQueueEmpty, false => wgt::MaintainResult::Ok, @@ -1494,30 +1366,20 @@ impl crate::Context for ContextWgpuCore { } fn device_on_uncaptured_error( &self, - _device: &Self::DeviceId, device_data: &Self::DeviceData, handler: Box, ) { let mut error_sink = device_data.error_sink.lock(); - error_sink.uncaptured_handler = handler; + error_sink.uncaptured_handler = Some(handler); } - fn device_push_error_scope( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - filter: crate::ErrorFilter, - ) { + fn device_push_error_scope(&self, device_data: &Self::DeviceData, filter: crate::ErrorFilter) { let mut error_sink = device_data.error_sink.lock(); error_sink.scopes.push(ErrorScope { error: None, filter, }); } - fn device_pop_error_scope( - &self, - _device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> Self::PopErrorScopeFuture { + fn device_pop_error_scope(&self, device_data: &Self::DeviceData) -> Self::PopErrorScopeFuture { let mut error_sink = device_data.error_sink.lock(); let scope = error_sink.scopes.pop().unwrap(); ready(scope.error) @@ -1525,7 +1387,6 @@ impl crate::Context for ContextWgpuCore { fn buffer_map_async( &self, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, mode: MapMode, range: Range, @@ -1545,7 +1406,7 @@ impl crate::Context for ContextWgpuCore { }; match self.0.buffer_map_async( - *buffer, + buffer_data.id, range.start, Some(range.end - range.start), operation, @@ -1558,14 +1419,13 @@ impl crate::Context for ContextWgpuCore { } fn buffer_get_mapped_range( &self, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, sub_range: Range, ) -> Box { let size = sub_range.end - sub_range.start; match self .0 - .buffer_get_mapped_range(*buffer, sub_range.start, Some(size)) + .buffer_get_mapped_range(buffer_data.id, sub_range.start, Some(size)) { Ok((ptr, size)) => Box::new(BufferMappedRange { ptr, @@ -1575,8 +1435,8 @@ impl crate::Context for ContextWgpuCore { } } - fn buffer_unmap(&self, buffer: &Self::BufferId, buffer_data: &Self::BufferData) { - match self.0.buffer_unmap(*buffer) { + fn buffer_unmap(&self, buffer_data: &Self::BufferData) { + match self.0.buffer_unmap(buffer_data.id) { Ok(()) => (), Err(cause) => { self.handle_error_nolabel(&buffer_data.error_sink, cause, "Buffer::buffer_unmap") @@ -1586,7 +1446,6 @@ impl crate::Context for ContextWgpuCore { fn shader_get_compilation_info( &self, - _shader: &Self::ShaderModuleId, shader_data: &Self::ShaderModuleData, ) -> Self::CompilationInfoFuture { ready(shader_data.compilation_info.clone()) @@ -1594,10 +1453,9 @@ impl crate::Context for ContextWgpuCore { fn texture_create_view( &self, - texture: &Self::TextureId, texture_data: &Self::TextureData, desc: &TextureViewDescriptor<'_>, - ) -> (Self::TextureViewId, Self::TextureViewData) { + ) -> Self::TextureViewData { let descriptor = wgc::resource::TextureViewDescriptor { label: desc.label.map(Borrowed), format: desc.format, @@ -1610,7 +1468,9 @@ impl crate::Context for ContextWgpuCore { array_layer_count: desc.array_layer_count, }, }; - let (id, error) = self.0.texture_create_view(*texture, &descriptor, None); + let (id, error) = self + .0 + .texture_create_view(texture_data.id, &descriptor, None); if let Some(cause) = error { self.handle_error( &texture_data.error_sink, @@ -1619,178 +1479,137 @@ impl crate::Context for ContextWgpuCore { "Texture::create_view", ); } - (id, ()) + id } - fn surface_drop(&self, surface: &Self::SurfaceId, _surface_data: &Self::SurfaceData) { - self.0.surface_drop(*surface) + fn surface_drop(&self, surface_data: &Self::SurfaceData) { + self.0.surface_drop(surface_data.id) } - fn adapter_drop(&self, adapter: &Self::AdapterId, _adapter_data: &Self::AdapterData) { - self.0.adapter_drop(*adapter) + fn adapter_drop(&self, adapter_data: &Self::AdapterData) { + self.0.adapter_drop(*adapter_data) } - fn buffer_destroy(&self, buffer: &Self::BufferId, _buffer_data: &Self::BufferData) { + fn buffer_destroy(&self, buffer_data: &Self::BufferData) { // Per spec, no error to report. Even calling destroy multiple times is valid. - let _ = self.0.buffer_destroy(*buffer); + let _ = self.0.buffer_destroy(buffer_data.id); } - fn buffer_drop(&self, buffer: &Self::BufferId, _buffer_data: &Self::BufferData) { - self.0.buffer_drop(*buffer) + fn buffer_drop(&self, buffer_data: &Self::BufferData) { + self.0.buffer_drop(buffer_data.id) } - fn texture_destroy(&self, texture: &Self::TextureId, _texture_data: &Self::TextureData) { + fn texture_destroy(&self, texture_data: &Self::TextureData) { // Per spec, no error to report. Even calling destroy multiple times is valid. - let _ = self.0.texture_destroy(*texture); + let _ = self.0.texture_destroy(texture_data.id); } - fn texture_drop(&self, texture: &Self::TextureId, _texture_data: &Self::TextureData) { - self.0.texture_drop(*texture) + fn texture_drop(&self, texture_data: &Self::TextureData) { + self.0.texture_drop(texture_data.id) } - fn texture_view_drop( - &self, - texture_view: &Self::TextureViewId, - __texture_view_data: &Self::TextureViewData, - ) { - let _ = self.0.texture_view_drop(*texture_view); + fn texture_view_drop(&self, texture_view_data: &Self::TextureViewData) { + let _ = self.0.texture_view_drop(*texture_view_data); } - fn sampler_drop(&self, sampler: &Self::SamplerId, _sampler_data: &Self::SamplerData) { - self.0.sampler_drop(*sampler) + fn sampler_drop(&self, sampler_data: &Self::SamplerData) { + self.0.sampler_drop(*sampler_data) } - fn query_set_drop(&self, query_set: &Self::QuerySetId, _query_set_data: &Self::QuerySetData) { - self.0.query_set_drop(*query_set) + fn query_set_drop(&self, query_set_data: &Self::QuerySetData) { + self.0.query_set_drop(*query_set_data) } - fn bind_group_drop( - &self, - bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, - ) { - self.0.bind_group_drop(*bind_group) + fn bind_group_drop(&self, bind_group_data: &Self::BindGroupData) { + self.0.bind_group_drop(*bind_group_data) } - fn bind_group_layout_drop( - &self, - bind_group_layout: &Self::BindGroupLayoutId, - _bind_group_layout_data: &Self::BindGroupLayoutData, - ) { - self.0.bind_group_layout_drop(*bind_group_layout) + fn bind_group_layout_drop(&self, bind_group_layout_data: &Self::BindGroupLayoutData) { + self.0.bind_group_layout_drop(*bind_group_layout_data) } - fn pipeline_layout_drop( - &self, - pipeline_layout: &Self::PipelineLayoutId, - _pipeline_layout_data: &Self::PipelineLayoutData, - ) { - self.0.pipeline_layout_drop(*pipeline_layout) + fn pipeline_layout_drop(&self, pipeline_layout_data: &Self::PipelineLayoutData) { + self.0.pipeline_layout_drop(*pipeline_layout_data) } - fn shader_module_drop( - &self, - shader_module: &Self::ShaderModuleId, - _shader_module_data: &Self::ShaderModuleData, - ) { - self.0.shader_module_drop(*shader_module) + fn shader_module_drop(&self, shader_module_data: &Self::ShaderModuleData) { + self.0.shader_module_drop(shader_module_data.id) } - fn command_encoder_drop( - &self, - command_encoder: &Self::CommandEncoderId, - command_encoder_data: &Self::CommandEncoderData, - ) { + fn command_encoder_drop(&self, command_encoder_data: &Self::CommandEncoderData) { if command_encoder_data.open { - self.0.command_encoder_drop(*command_encoder) + self.0.command_encoder_drop(command_encoder_data.id) } } - fn command_buffer_drop( - &self, - command_buffer: &Self::CommandBufferId, - _command_buffer_data: &Self::CommandBufferData, - ) { - self.0.command_buffer_drop(*command_buffer) + fn command_buffer_drop(&self, command_buffer_data: &Self::CommandBufferData) { + self.0.command_buffer_drop(*command_buffer_data) } - fn render_bundle_drop( - &self, - render_bundle: &Self::RenderBundleId, - _render_bundle_data: &Self::RenderBundleData, - ) { - self.0.render_bundle_drop(*render_bundle) + fn render_bundle_drop(&self, render_bundle_data: &Self::RenderBundleData) { + self.0.render_bundle_drop(*render_bundle_data) } - fn compute_pipeline_drop( - &self, - pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, - ) { - self.0.compute_pipeline_drop(*pipeline) + fn compute_pipeline_drop(&self, pipeline_data: &Self::ComputePipelineData) { + self.0.compute_pipeline_drop(pipeline_data.id) } - fn render_pipeline_drop( - &self, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, - ) { - self.0.render_pipeline_drop(*pipeline) + fn render_pipeline_drop(&self, pipeline_data: &Self::RenderPipelineData) { + self.0.render_pipeline_drop(pipeline_data.id) } - fn pipeline_cache_drop( - &self, - cache: &Self::PipelineCacheId, - _cache_data: &Self::PipelineCacheData, - ) { - self.0.pipeline_cache_drop(*cache) + fn pipeline_cache_drop(&self, cache_data: &Self::PipelineCacheData) { + self.0.pipeline_cache_drop(*cache_data) } fn compute_pipeline_get_bind_group_layout( &self, - pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, + pipeline_data: &Self::ComputePipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { - let (id, error) = self - .0 - .compute_pipeline_get_bind_group_layout(*pipeline, index, None); + ) -> Self::BindGroupLayoutData { + let (id, error) = + self.0 + .compute_pipeline_get_bind_group_layout(pipeline_data.id, index, None); if let Some(err) = error { - panic!("Error reflecting bind group {index}: {err}"); + self.handle_error_nolabel( + &pipeline_data.error_sink, + err, + "ComputePipeline::get_bind_group_layout", + ) } - (id, ()) + id } fn render_pipeline_get_bind_group_layout( &self, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, + pipeline_data: &Self::RenderPipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { - let (id, error) = self - .0 - .render_pipeline_get_bind_group_layout(*pipeline, index, None); + ) -> Self::BindGroupLayoutData { + let (id, error) = + self.0 + .render_pipeline_get_bind_group_layout(pipeline_data.id, index, None); if let Some(err) = error { - panic!("Error reflecting bind group {index}: {err}"); + self.handle_error_nolabel( + &pipeline_data.error_sink, + err, + "RenderPipeline::get_bind_group_layout", + ) } - (id, ()) + id } fn command_encoder_copy_buffer_to_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - source: &Self::BufferId, - _source_data: &Self::BufferData, + source_data: &Self::BufferData, source_offset: wgt::BufferAddress, - destination: &Self::BufferId, - _destination_data: &Self::BufferData, + destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, copy_size: wgt::BufferAddress, ) { if let Err(cause) = self.0.command_encoder_copy_buffer_to_buffer( - *encoder, - *source, + encoder_data.id, + source_data.id, source_offset, - *destination, + destination_data.id, destination_offset, copy_size, ) { @@ -1804,14 +1623,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_copy_buffer_to_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyBuffer<'_>, destination: crate::ImageCopyTexture<'_>, copy_size: wgt::Extent3d, ) { if let Err(cause) = self.0.command_encoder_copy_buffer_to_texture( - *encoder, + encoder_data.id, &map_buffer_copy_view(source), &map_texture_copy_view(destination), ©_size, @@ -1826,14 +1644,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_copy_texture_to_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture<'_>, destination: crate::ImageCopyBuffer<'_>, copy_size: wgt::Extent3d, ) { if let Err(cause) = self.0.command_encoder_copy_texture_to_buffer( - *encoder, + encoder_data.id, &map_texture_copy_view(source), &map_buffer_copy_view(destination), ©_size, @@ -1848,14 +1665,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_copy_texture_to_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture<'_>, destination: crate::ImageCopyTexture<'_>, copy_size: wgt::Extent3d, ) { if let Err(cause) = self.0.command_encoder_copy_texture_to_texture( - *encoder, + encoder_data.id, &map_texture_copy_view(source), &map_texture_copy_view(destination), ©_size, @@ -1870,21 +1686,20 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_begin_compute_pass( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &ComputePassDescriptor<'_>, - ) -> (Self::ComputePassId, Self::ComputePassData) { + ) -> Self::ComputePassData { let timestamp_writes = desc.timestamp_writes .as_ref() .map(|tw| wgc::command::PassTimestampWrites { - query_set: tw.query_set.id.into(), + query_set: *downcast_query_set(tw.query_set), beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }); let (pass, err) = self.0.command_encoder_create_compute_pass( - *encoder, + encoder_data.id, &wgc::command::ComputePassDescriptor { label: desc.label.map(Borrowed), timestamp_writes: timestamp_writes.as_ref(), @@ -1900,29 +1715,25 @@ impl crate::Context for ContextWgpuCore { ); } - ( - Unused, - Self::ComputePassData { - pass, - error_sink: encoder_data.error_sink.clone(), - }, - ) + Self::ComputePassData { + pass, + error_sink: encoder_data.error_sink.clone(), + } } fn command_encoder_begin_render_pass( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &crate::RenderPassDescriptor<'_>, - ) -> (Self::RenderPassId, Self::RenderPassData) { + ) -> Self::RenderPassData { let colors = desc .color_attachments .iter() .map(|ca| { ca.as_ref() .map(|at| wgc::command::RenderPassColorAttachment { - view: at.view.id.into(), - resolve_target: at.resolve_target.map(|rt| rt.id.into()), + view: *downcast_texture_view(at.view), + resolve_target: at.resolve_target.map(downcast_texture_view).copied(), channel: map_pass_channel(Some(&at.ops)), }) }) @@ -1930,7 +1741,7 @@ impl crate::Context for ContextWgpuCore { let depth_stencil = desc.depth_stencil_attachment.as_ref().map(|dsa| { wgc::command::RenderPassDepthStencilAttachment { - view: dsa.view.id.into(), + view: *downcast_texture_view(dsa.view), depth: map_pass_channel(dsa.depth_ops.as_ref()), stencil: map_pass_channel(dsa.stencil_ops.as_ref()), } @@ -1940,21 +1751,19 @@ impl crate::Context for ContextWgpuCore { desc.timestamp_writes .as_ref() .map(|tw| wgc::command::PassTimestampWrites { - query_set: tw.query_set.id.into(), + query_set: *downcast_query_set(tw.query_set), beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }); let (pass, err) = self.0.command_encoder_create_render_pass( - *encoder, + encoder_data.id, &wgc::command::RenderPassDescriptor { label: desc.label.map(Borrowed), timestamp_writes: timestamp_writes.as_ref(), color_attachments: std::borrow::Cow::Borrowed(&colors), depth_stencil_attachment: depth_stencil.as_ref(), - occlusion_query_set: desc - .occlusion_query_set - .map(|query_set| query_set.id.into()), + occlusion_query_set: desc.occlusion_query_set.map(downcast_query_set).copied(), }, ); @@ -1967,40 +1776,36 @@ impl crate::Context for ContextWgpuCore { ); } - ( - Unused, - Self::RenderPassData { - pass, - error_sink: encoder_data.error_sink.clone(), - }, - ) + Self::RenderPassData { + pass, + error_sink: encoder_data.error_sink.clone(), + } } fn command_encoder_finish( &self, - encoder: Self::CommandEncoderId, encoder_data: &mut Self::CommandEncoderData, - ) -> (Self::CommandBufferId, Self::CommandBufferData) { + ) -> Self::CommandBufferData { let descriptor = wgt::CommandBufferDescriptor::default(); encoder_data.open = false; // prevent the drop - let (id, error) = self.0.command_encoder_finish(encoder, &descriptor); + let (id, error) = self.0.command_encoder_finish(encoder_data.id, &descriptor); if let Some(cause) = error { self.handle_error_nolabel(&encoder_data.error_sink, cause, "a CommandEncoder"); } - (id, ()) + id } fn command_encoder_clear_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - texture: &crate::Texture, + texture_data: &Self::TextureData, subresource_range: &wgt::ImageSubresourceRange, ) { - if let Err(cause) = - self.0 - .command_encoder_clear_texture(*encoder, texture.id.into(), subresource_range) - { + if let Err(cause) = self.0.command_encoder_clear_texture( + encoder_data.id, + texture_data.id, + subresource_range, + ) { self.handle_error_nolabel( &encoder_data.error_sink, cause, @@ -2011,15 +1816,14 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_clear_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - buffer: &crate::Buffer, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { if let Err(cause) = self.0 - .command_encoder_clear_buffer(*encoder, buffer.id.into(), offset, size) + .command_encoder_clear_buffer(encoder_data.id, buffer_data.id, offset, size) { self.handle_error_nolabel( &encoder_data.error_sink, @@ -2031,11 +1835,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_insert_debug_marker( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, label: &str, ) { - if let Err(cause) = self.0.command_encoder_insert_debug_marker(*encoder, label) { + if let Err(cause) = self + .0 + .command_encoder_insert_debug_marker(encoder_data.id, label) + { self.handle_error_nolabel( &encoder_data.error_sink, cause, @@ -2046,11 +1852,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_push_debug_group( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, label: &str, ) { - if let Err(cause) = self.0.command_encoder_push_debug_group(*encoder, label) { + if let Err(cause) = self + .0 + .command_encoder_push_debug_group(encoder_data.id, label) + { self.handle_error_nolabel( &encoder_data.error_sink, cause, @@ -2059,12 +1867,8 @@ impl crate::Context for ContextWgpuCore { } } - fn command_encoder_pop_debug_group( - &self, - encoder: &Self::CommandEncoderId, - encoder_data: &Self::CommandEncoderData, - ) { - if let Err(cause) = self.0.command_encoder_pop_debug_group(*encoder) { + fn command_encoder_pop_debug_group(&self, encoder_data: &Self::CommandEncoderData) { + if let Err(cause) = self.0.command_encoder_pop_debug_group(encoder_data.id) { self.handle_error_nolabel( &encoder_data.error_sink, cause, @@ -2075,15 +1879,13 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_write_timestamp( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0 - .command_encoder_write_timestamp(*encoder, *query_set, query_index) + .command_encoder_write_timestamp(encoder_data.id, *query_set_data, query_index) { self.handle_error_nolabel( &encoder_data.error_sink, @@ -2095,22 +1897,19 @@ impl crate::Context for ContextWgpuCore { fn command_encoder_resolve_query_set( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, first_query: u32, query_count: u32, - destination: &Self::BufferId, - _destination_data: &Self::BufferData, + destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, ) { if let Err(cause) = self.0.command_encoder_resolve_query_set( - *encoder, - *query_set, + encoder_data.id, + *query_set_data, first_query, query_count, - *destination, + destination_data.id, destination_offset, ) { self.handle_error_nolabel( @@ -2123,10 +1922,9 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_finish( &self, - _encoder: Self::RenderBundleEncoderId, encoder_data: Self::RenderBundleEncoderData, desc: &crate::RenderBundleDescriptor<'_>, - ) -> (Self::RenderBundleId, Self::RenderBundleData) { + ) -> Self::RenderBundleData { let (id, error) = self.0.render_bundle_encoder_finish( encoder_data, &desc.map_label(|l| l.map(Borrowed)), @@ -2135,19 +1933,20 @@ impl crate::Context for ContextWgpuCore { if let Some(err) = error { self.handle_error_fatal(err, "RenderBundleEncoder::finish"); } - (id, ()) + id } fn queue_write_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, data: &[u8], ) { - match self.0.queue_write_buffer(*queue, *buffer, offset, data) { + match self + .0 + .queue_write_buffer(queue_data.id, buffer_data.id, offset, data) + { Ok(()) => (), Err(err) => { self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::write_buffer") @@ -2157,16 +1956,14 @@ impl crate::Context for ContextWgpuCore { fn queue_validate_write_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: wgt::BufferSize, ) -> Option<()> { match self .0 - .queue_validate_write_buffer(*queue, *buffer, offset, size) + .queue_validate_write_buffer(queue_data.id, buffer_data.id, offset, size) { Ok(()) => Some(()), Err(err) => { @@ -2178,11 +1975,13 @@ impl crate::Context for ContextWgpuCore { fn queue_create_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, size: wgt::BufferSize, ) -> Option> { - match self.0.queue_create_staging_buffer(*queue, size, None) { + match self + .0 + .queue_create_staging_buffer(queue_data.id, size, None) + { Ok((buffer_id, ptr)) => Some(Box::new(QueueWriteBuffer { buffer_id, mapping: BufferMappedRange { @@ -2199,10 +1998,8 @@ impl crate::Context for ContextWgpuCore { fn queue_write_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, staging_buffer: &dyn crate::context::QueueWriteBuffer, ) { @@ -2210,10 +2007,12 @@ impl crate::Context for ContextWgpuCore { .as_any() .downcast_ref::() .unwrap(); - match self - .0 - .queue_write_staging_buffer(*queue, *buffer, offset, staging_buffer.buffer_id) - { + match self.0.queue_write_staging_buffer( + queue_data.id, + buffer_data.id, + offset, + staging_buffer.buffer_id, + ) { Ok(()) => (), Err(err) => { self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::write_buffer_with"); @@ -2223,7 +2022,6 @@ impl crate::Context for ContextWgpuCore { fn queue_write_texture( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, texture: crate::ImageCopyTexture<'_>, data: &[u8], @@ -2231,7 +2029,7 @@ impl crate::Context for ContextWgpuCore { size: wgt::Extent3d, ) { match self.0.queue_write_texture( - *queue, + queue_data.id, &map_texture_copy_view(texture), data, &data_layout, @@ -2247,14 +2045,13 @@ impl crate::Context for ContextWgpuCore { #[cfg(any(webgpu, webgl))] fn queue_copy_external_image_to_texture( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, size: wgt::Extent3d, ) { match self.0.queue_copy_external_image_to_texture( - *queue, + queue_data.id, source, map_texture_tagged_copy_view(dest), size, @@ -2268,17 +2065,14 @@ impl crate::Context for ContextWgpuCore { } } - fn queue_submit>( + fn queue_submit>( &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, + queue_data: &Self::QueueData, command_buffers: I, ) -> Self::SubmissionIndexData { - let temp_command_buffers = command_buffers - .map(|(i, _)| i) - .collect::>(); + let temp_command_buffers = command_buffers.collect::>(); - let index = match self.0.queue_submit(*queue, &temp_command_buffers) { + let index = match self.0.queue_submit(queue_data.id, &temp_command_buffers) { Ok(index) => index, Err(err) => self.handle_error_fatal(err, "Queue::submit"), }; @@ -2290,77 +2084,57 @@ impl crate::Context for ContextWgpuCore { index } - fn queue_get_timestamp_period( - &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, - ) -> f32 { - let res = self.0.queue_get_timestamp_period(*queue); - match res { - Ok(v) => v, - Err(cause) => { - self.handle_error_fatal(cause, "Queue::get_timestamp_period"); - } - } + fn queue_get_timestamp_period(&self, queue_data: &Self::QueueData) -> f32 { + self.0.queue_get_timestamp_period(queue_data.id) } fn queue_on_submitted_work_done( &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, + queue_data: &Self::QueueData, callback: crate::context::SubmittedWorkDoneCallback, ) { let closure = wgc::device::queue::SubmittedWorkDoneClosure::from_rust(callback); - - let res = self.0.queue_on_submitted_work_done(*queue, closure); - if let Err(cause) = res { - self.handle_error_fatal(cause, "Queue::on_submitted_work_done"); - } + self.0.queue_on_submitted_work_done(queue_data.id, closure); } - fn device_start_capture(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - self.0.device_start_capture(*device); + fn device_start_capture(&self, device_data: &Self::DeviceData) { + self.0.device_start_capture(device_data.id); } - fn device_stop_capture(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - self.0.device_stop_capture(*device); + fn device_stop_capture(&self, device_data: &Self::DeviceData) { + self.0.device_stop_capture(device_data.id); } fn device_get_internal_counters( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, ) -> wgt::InternalCounters { - self.0.device_get_internal_counters(*device) + self.0.device_get_internal_counters(device_data.id) } fn device_generate_allocator_report( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + device_data: &Self::DeviceData, ) -> Option { - self.0.device_generate_allocator_report(*device) + self.0.device_generate_allocator_report(device_data.id) } fn pipeline_cache_get_data( &self, - cache: &Self::PipelineCacheId, // TODO: Used for error handling? - _cache_data: &Self::PipelineCacheData, + cache_data: &Self::PipelineCacheData, ) -> Option> { - self.0.pipeline_cache_get_data(*cache) + self.0.pipeline_cache_get_data(*cache_data) } fn compute_pass_set_pipeline( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, + pipeline_data: &Self::ComputePipelineData, ) { if let Err(cause) = self .0 - .compute_pass_set_pipeline(&mut pass_data.pass, *pipeline) + .compute_pass_set_pipeline(&mut pass_data.pass, pipeline_data.id) { self.handle_error( &pass_data.error_sink, @@ -2373,16 +2147,15 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_set_bind_group( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, index: u32, - bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, + bind_group_data: Option<&Self::BindGroupData>, offsets: &[wgt::DynamicOffset], ) { + let bg = bind_group_data.cloned(); if let Err(cause) = self.0 - .compute_pass_set_bind_group(&mut pass_data.pass, index, *bind_group, offsets) + .compute_pass_set_bind_group(&mut pass_data.pass, index, bg, offsets) { self.handle_error( &pass_data.error_sink, @@ -2395,7 +2168,6 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_set_push_constants( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, offset: u32, data: &[u8], @@ -2413,12 +2185,7 @@ impl crate::Context for ContextWgpuCore { } } - fn compute_pass_insert_debug_marker( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - label: &str, - ) { + fn compute_pass_insert_debug_marker(&self, pass_data: &mut Self::ComputePassData, label: &str) { if let Err(cause) = self .0 .compute_pass_insert_debug_marker(&mut pass_data.pass, label, 0) @@ -2434,7 +2201,6 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_push_debug_group( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, group_label: &str, ) { @@ -2451,11 +2217,7 @@ impl crate::Context for ContextWgpuCore { } } - fn compute_pass_pop_debug_group( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_pop_debug_group(&self, pass_data: &mut Self::ComputePassData) { if let Err(cause) = self.0.compute_pass_pop_debug_group(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -2468,15 +2230,13 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_write_timestamp( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0 - .compute_pass_write_timestamp(&mut pass_data.pass, *query_set, query_index) + .compute_pass_write_timestamp(&mut pass_data.pass, *query_set_data, query_index) { self.handle_error( &pass_data.error_sink, @@ -2489,15 +2249,13 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_begin_pipeline_statistics_query( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0.compute_pass_begin_pipeline_statistics_query( &mut pass_data.pass, - *query_set, + *query_set_data, query_index, ) { self.handle_error( @@ -2509,11 +2267,7 @@ impl crate::Context for ContextWgpuCore { } } - fn compute_pass_end_pipeline_statistics_query( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::ComputePassData) { if let Err(cause) = self .0 .compute_pass_end_pipeline_statistics_query(&mut pass_data.pass) @@ -2529,7 +2283,6 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_dispatch_workgroups( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, x: u32, y: u32, @@ -2550,15 +2303,13 @@ impl crate::Context for ContextWgpuCore { fn compute_pass_dispatch_workgroups_indirect( &self, - _pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { if let Err(cause) = self.0.compute_pass_dispatch_workgroups_indirect( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, ) { self.handle_error( @@ -2570,11 +2321,7 @@ impl crate::Context for ContextWgpuCore { } } - fn compute_pass_end( - &self, - _pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ) { + fn compute_pass_end(&self, pass_data: &mut Self::ComputePassData) { if let Err(cause) = self.0.compute_pass_end(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -2587,28 +2334,25 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_set_pipeline( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, + pipeline_data: &Self::RenderPipelineData, ) { - wgpu_render_bundle_set_pipeline(encoder_data, *pipeline) + wgpu_render_bundle_set_pipeline(encoder_data, pipeline_data.id) } fn render_bundle_encoder_set_bind_group( &self, - __encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, index: u32, - bind_group: &Self::BindGroupId, - __bind_group_data: &Self::BindGroupData, + bind_group_data: Option<&Self::BindGroupData>, offsets: &[wgt::DynamicOffset], ) { + let bg = bind_group_data.cloned(); unsafe { wgpu_render_bundle_set_bind_group( encoder_data, index, - *bind_group, + bg, offsets.as_ptr(), offsets.len(), ) @@ -2617,33 +2361,28 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_set_index_buffer( &self, - __encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - buffer: &Self::BufferId, - __buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, size: Option, ) { - encoder_data.set_index_buffer(*buffer, index_format, offset, size) + encoder_data.set_index_buffer(buffer_data.id, index_format, offset, size) } fn render_bundle_encoder_set_vertex_buffer( &self, - __encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, slot: u32, - buffer: &Self::BufferId, - __buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { - wgpu_render_bundle_set_vertex_buffer(encoder_data, slot, *buffer, offset, size) + wgpu_render_bundle_set_vertex_buffer(encoder_data, slot, buffer_data.id, offset, size) } fn render_bundle_encoder_set_push_constants( &self, - __encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, stages: wgt::ShaderStages, offset: u32, @@ -2662,7 +2401,6 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_draw( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, vertices: Range, instances: Range, @@ -2678,7 +2416,6 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_draw_indexed( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, indices: Range, base_vertex: i32, @@ -2696,90 +2433,34 @@ impl crate::Context for ContextWgpuCore { fn render_bundle_encoder_draw_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - wgpu_render_bundle_draw_indirect(encoder_data, *indirect_buffer, indirect_offset) + wgpu_render_bundle_draw_indirect(encoder_data, indirect_buffer_data.id, indirect_offset) } fn render_bundle_encoder_draw_indexed_indirect( &self, - _encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - wgpu_render_bundle_draw_indexed_indirect(encoder_data, *indirect_buffer, indirect_offset) - } - - fn render_bundle_encoder_multi_draw_indirect( - &self, - _encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count: u32, - ) { - unimplemented!() - } - - fn render_bundle_encoder_multi_draw_indexed_indirect( - &self, - _encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count: u32, - ) { - unimplemented!() - } - - fn render_bundle_encoder_multi_draw_indirect_count( - &self, - _encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, - _count_buffer_data: &Self::BufferData, - _count_buffer_offset: wgt::BufferAddress, - _max_count: u32, - ) { - unimplemented!() - } - - fn render_bundle_encoder_multi_draw_indexed_indirect_count( - &self, - _encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - _indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count_buffer: &Self::BufferId, - _count_buffer_data: &Self::BufferData, - _count_buffer_offset: wgt::BufferAddress, - _max_count: u32, - ) { - unimplemented!() + wgpu_render_bundle_draw_indexed_indirect( + encoder_data, + indirect_buffer_data.id, + indirect_offset, + ) } fn render_pass_set_pipeline( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, + pipeline_data: &Self::RenderPipelineData, ) { if let Err(cause) = self .0 - .render_pass_set_pipeline(&mut pass_data.pass, *pipeline) + .render_pass_set_pipeline(&mut pass_data.pass, pipeline_data.id) { self.handle_error( &pass_data.error_sink, @@ -2792,16 +2473,15 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_bind_group( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, index: u32, - bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, + bind_group_data: Option<&Self::BindGroupData>, offsets: &[wgt::DynamicOffset], ) { + let bg = bind_group_data.cloned(); if let Err(cause) = self.0 - .render_pass_set_bind_group(&mut pass_data.pass, index, *bind_group, offsets) + .render_pass_set_bind_group(&mut pass_data.pass, index, bg, offsets) { self.handle_error( &pass_data.error_sink, @@ -2814,17 +2494,15 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_index_buffer( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, size: Option, ) { if let Err(cause) = self.0.render_pass_set_index_buffer( &mut pass_data.pass, - *buffer, + buffer_data.id, index_format, offset, size, @@ -2840,18 +2518,19 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_vertex_buffer( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, slot: u32, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { - if let Err(cause) = - self.0 - .render_pass_set_vertex_buffer(&mut pass_data.pass, slot, *buffer, offset, size) - { + if let Err(cause) = self.0.render_pass_set_vertex_buffer( + &mut pass_data.pass, + slot, + buffer_data.id, + offset, + size, + ) { self.handle_error( &pass_data.error_sink, cause, @@ -2863,7 +2542,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_push_constants( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, stages: wgt::ShaderStages, offset: u32, @@ -2884,7 +2562,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_draw( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, vertices: Range, instances: Range, @@ -2907,7 +2584,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_draw_indexed( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, indices: Range, base_vertex: i32, @@ -2932,16 +2608,15 @@ impl crate::Context for ContextWgpuCore { fn render_pass_draw_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - if let Err(cause) = - self.0 - .render_pass_draw_indirect(&mut pass_data.pass, *indirect_buffer, indirect_offset) - { + if let Err(cause) = self.0.render_pass_draw_indirect( + &mut pass_data.pass, + indirect_buffer_data.id, + indirect_offset, + ) { self.handle_error( &pass_data.error_sink, cause, @@ -2953,15 +2628,13 @@ impl crate::Context for ContextWgpuCore { fn render_pass_draw_indexed_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { if let Err(cause) = self.0.render_pass_draw_indexed_indirect( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, ) { self.handle_error( @@ -2975,16 +2648,14 @@ impl crate::Context for ContextWgpuCore { fn render_pass_multi_draw_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, count: u32, ) { if let Err(cause) = self.0.render_pass_multi_draw_indirect( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, count, ) { @@ -2999,16 +2670,14 @@ impl crate::Context for ContextWgpuCore { fn render_pass_multi_draw_indexed_indirect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, count: u32, ) { if let Err(cause) = self.0.render_pass_multi_draw_indexed_indirect( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, count, ) { @@ -3023,21 +2692,18 @@ impl crate::Context for ContextWgpuCore { fn render_pass_multi_draw_indirect_count( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, - count_buffer: &Self::BufferId, - _count_buffer_data: &Self::BufferData, + count_buffer_data: &Self::BufferData, count_buffer_offset: wgt::BufferAddress, max_count: u32, ) { if let Err(cause) = self.0.render_pass_multi_draw_indirect_count( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, - *count_buffer, + count_buffer_data.id, count_buffer_offset, max_count, ) { @@ -3052,21 +2718,18 @@ impl crate::Context for ContextWgpuCore { fn render_pass_multi_draw_indexed_indirect_count( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, - count_buffer: &Self::BufferId, - _count_buffer_data: &Self::BufferData, + count_buffer_data: &Self::BufferData, count_buffer_offset: wgt::BufferAddress, max_count: u32, ) { if let Err(cause) = self.0.render_pass_multi_draw_indexed_indirect_count( &mut pass_data.pass, - *indirect_buffer, + indirect_buffer_data.id, indirect_offset, - *count_buffer, + count_buffer_data.id, count_buffer_offset, max_count, ) { @@ -3081,7 +2744,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_blend_constant( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, color: wgt::Color, ) { @@ -3100,7 +2762,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_scissor_rect( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: u32, y: u32, @@ -3122,7 +2783,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_viewport( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: f32, y: f32, @@ -3151,7 +2811,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_set_stencil_reference( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, reference: u32, ) { @@ -3168,12 +2827,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_insert_debug_marker( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - label: &str, - ) { + fn render_pass_insert_debug_marker(&self, pass_data: &mut Self::RenderPassData, label: &str) { if let Err(cause) = self .0 .render_pass_insert_debug_marker(&mut pass_data.pass, label, 0) @@ -3189,7 +2843,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_push_debug_group( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, group_label: &str, ) { @@ -3206,11 +2859,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_pop_debug_group( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_pop_debug_group(&self, pass_data: &mut Self::RenderPassData) { if let Err(cause) = self.0.render_pass_pop_debug_group(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -3223,15 +2872,13 @@ impl crate::Context for ContextWgpuCore { fn render_pass_write_timestamp( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0 - .render_pass_write_timestamp(&mut pass_data.pass, *query_set, query_index) + .render_pass_write_timestamp(&mut pass_data.pass, *query_set_data, query_index) { self.handle_error( &pass_data.error_sink, @@ -3244,7 +2891,6 @@ impl crate::Context for ContextWgpuCore { fn render_pass_begin_occlusion_query( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, query_index: u32, ) { @@ -3261,11 +2907,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_end_occlusion_query( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end_occlusion_query(&self, pass_data: &mut Self::RenderPassData) { if let Err(cause) = self.0.render_pass_end_occlusion_query(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -3278,15 +2920,13 @@ impl crate::Context for ContextWgpuCore { fn render_pass_begin_pipeline_statistics_query( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + query_set_data: &Self::QuerySetData, query_index: u32, ) { if let Err(cause) = self.0.render_pass_begin_pipeline_statistics_query( &mut pass_data.pass, - *query_set, + *query_set_data, query_index, ) { self.handle_error( @@ -3298,11 +2938,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_end_pipeline_statistics_query( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::RenderPassData) { if let Err(cause) = self .0 .render_pass_end_pipeline_statistics_query(&mut pass_data.pass) @@ -3318,11 +2954,10 @@ impl crate::Context for ContextWgpuCore { fn render_pass_execute_bundles( &self, - _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ) { - let temp_render_bundles = render_bundles.map(|(i, _)| i).collect::>(); + let temp_render_bundles = render_bundles.copied().collect::>(); if let Err(cause) = self .0 .render_pass_execute_bundles(&mut pass_data.pass, &temp_render_bundles) @@ -3336,11 +2971,7 @@ impl crate::Context for ContextWgpuCore { } } - fn render_pass_end( - &self, - _pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ) { + fn render_pass_end(&self, pass_data: &mut Self::RenderPassData) { if let Err(cause) = self.0.render_pass_end(&mut pass_data.pass) { self.handle_error( &pass_data.error_sink, @@ -3352,27 +2983,6 @@ impl crate::Context for ContextWgpuCore { } } -impl From for wgc::id::Id -where - T: wgc::id::Marker, -{ - fn from(id: ObjectId) -> Self { - let id = wgc::id::RawId::from_non_zero(id.id()); - // SAFETY: The id was created via the impl below - unsafe { Self::from_raw(id) } - } -} - -impl From> for ObjectId -where - T: wgc::id::Marker, -{ - fn from(id: wgc::id::Id) -> Self { - let id = id.into_raw().into_non_zero(); - Self::from_global_id(id) - } -} - #[derive(Debug)] pub struct SurfaceOutputDetail { surface_id: wgc::id::SurfaceId, @@ -3387,17 +2997,18 @@ struct ErrorScope { struct ErrorSinkRaw { scopes: Vec, - uncaptured_handler: Box, + uncaptured_handler: Option>, } impl ErrorSinkRaw { fn new() -> ErrorSinkRaw { ErrorSinkRaw { scopes: Vec::new(), - uncaptured_handler: Box::from(default_error_handler), + uncaptured_handler: None, } } + #[track_caller] fn handle_error(&mut self, err: crate::Error) { let filter = match err { crate::Error::OutOfMemory { .. } => crate::ErrorFilter::OutOfMemory, @@ -3416,7 +3027,12 @@ impl ErrorSinkRaw { } } None => { - (self.uncaptured_handler)(err); + if let Some(custom_handler) = self.uncaptured_handler.as_ref() { + (custom_handler)(err); + } else { + // direct call preserves #[track_caller] where dyn can't + default_error_handler(err); + } } } } @@ -3428,6 +3044,7 @@ impl fmt::Debug for ErrorSinkRaw { } } +#[track_caller] fn default_error_handler(err: crate::Error) { log::error!("Handling wgpu errors as fatal by default"); panic!("wgpu error: {err}\n"); @@ -3513,3 +3130,43 @@ impl Drop for BufferMappedRange { // implements `Drop`, to match the web backend } } + +fn downcast_buffer(buffer: &crate::Buffer) -> &::BufferData { + downcast_ref(buffer.data.as_ref()) +} +fn downcast_texture(texture: &crate::Texture) -> &::TextureData { + downcast_ref(texture.data.as_ref()) +} +fn downcast_texture_view( + texture_view: &crate::TextureView, +) -> &::TextureViewData { + downcast_ref(texture_view.data.as_ref()) +} +fn downcast_sampler(sampler: &crate::Sampler) -> &::SamplerData { + downcast_ref(sampler.data.as_ref()) +} +fn downcast_query_set( + query_set: &crate::QuerySet, +) -> &::QuerySetData { + downcast_ref(query_set.data.as_ref()) +} +fn downcast_bind_group_layout( + bind_group_layout: &crate::BindGroupLayout, +) -> &::BindGroupLayoutData { + downcast_ref(bind_group_layout.data.as_ref()) +} +fn downcast_pipeline_layout( + pipeline_layout: &crate::PipelineLayout, +) -> &::PipelineLayoutData { + downcast_ref(pipeline_layout.data.as_ref()) +} +fn downcast_shader_module( + shader_module: &crate::ShaderModule, +) -> &::ShaderModuleData { + downcast_ref(shader_module.data.as_ref()) +} +fn downcast_pipeline_cache( + pipeline_cache: &crate::PipelineCache, +) -> &::PipelineCacheData { + downcast_ref(pipeline_cache.data.as_ref()) +} diff --git a/wgpu/src/context.rs b/wgpu/src/context.rs index d28e4bc692..a27459ab45 100644 --- a/wgpu/src/context.rs +++ b/wgpu/src/context.rs @@ -1,116 +1,77 @@ -use std::{any::Any, fmt::Debug, future::Future, num::NonZeroU64, ops::Range, pin::Pin, sync::Arc}; +use std::{any::Any, fmt::Debug, future::Future, ops::Range, pin::Pin, sync::Arc}; use wgt::{ - strict_assert, strict_assert_eq, AdapterInfo, BufferAddress, BufferSize, Color, - DeviceLostReason, DownlevelCapabilities, DynamicOffset, Extent3d, Features, ImageDataLayout, + strict_assert, AdapterInfo, BufferAddress, BufferSize, Color, DeviceLostReason, + DownlevelCapabilities, DynamicOffset, Extent3d, Features, ImageDataLayout, ImageSubresourceRange, IndexFormat, Limits, ShaderStages, SurfaceStatus, TextureFormat, TextureFormatFeatures, WasmNotSend, WasmNotSendSync, }; use crate::{ - AnyWasmNotSendSync, BindGroupDescriptor, BindGroupLayoutDescriptor, Buffer, BufferAsyncError, + AnyWasmNotSendSync, BindGroupDescriptor, BindGroupLayoutDescriptor, BufferAsyncError, BufferDescriptor, CommandEncoderDescriptor, CompilationInfo, ComputePassDescriptor, ComputePipelineDescriptor, DeviceDescriptor, Error, ErrorFilter, ImageCopyBuffer, ImageCopyTexture, Maintain, MaintainResult, MapMode, PipelineCacheDescriptor, PipelineLayoutDescriptor, QuerySetDescriptor, RenderBundleDescriptor, RenderBundleEncoderDescriptor, RenderPassDescriptor, RenderPipelineDescriptor, RequestAdapterOptions, RequestDeviceError, SamplerDescriptor, ShaderModuleDescriptor, - ShaderModuleDescriptorSpirV, SurfaceTargetUnsafe, Texture, TextureDescriptor, - TextureViewDescriptor, UncapturedErrorHandler, + ShaderModuleDescriptorSpirV, SurfaceTargetUnsafe, TextureDescriptor, TextureViewDescriptor, + UncapturedErrorHandler, }; - -/// Meta trait for an id tracked by a context. -/// -/// There is no need to manually implement this trait since there is a blanket implementation for this trait. -pub trait ContextId: Into + From + Debug + 'static {} -impl + From + Debug + 'static> ContextId for T {} - /// Meta trait for an data associated with an id tracked by a context. /// /// There is no need to manually implement this trait since there is a blanket implementation for this trait. +#[cfg_attr(target_os = "emscripten", allow(dead_code))] pub trait ContextData: Debug + WasmNotSendSync + 'static {} impl ContextData for T {} pub trait Context: Debug + WasmNotSendSync + Sized { - type AdapterId: ContextId + WasmNotSendSync; type AdapterData: ContextData; - type DeviceId: ContextId + WasmNotSendSync; type DeviceData: ContextData; - type QueueId: ContextId + WasmNotSendSync; type QueueData: ContextData; - type ShaderModuleId: ContextId + WasmNotSendSync; type ShaderModuleData: ContextData; - type BindGroupLayoutId: ContextId + WasmNotSendSync; type BindGroupLayoutData: ContextData; - type BindGroupId: ContextId + WasmNotSendSync; type BindGroupData: ContextData; - type TextureViewId: ContextId + WasmNotSendSync; type TextureViewData: ContextData; - type SamplerId: ContextId + WasmNotSendSync; type SamplerData: ContextData; - type BufferId: ContextId + WasmNotSendSync; type BufferData: ContextData; - type TextureId: ContextId + WasmNotSendSync; type TextureData: ContextData; - type QuerySetId: ContextId + WasmNotSendSync; type QuerySetData: ContextData; - type PipelineLayoutId: ContextId + WasmNotSendSync; type PipelineLayoutData: ContextData; - type RenderPipelineId: ContextId + WasmNotSendSync; type RenderPipelineData: ContextData; - type ComputePipelineId: ContextId + WasmNotSendSync; type ComputePipelineData: ContextData; - type PipelineCacheId: ContextId + WasmNotSendSync; type PipelineCacheData: ContextData; - type CommandEncoderId: ContextId + WasmNotSendSync; type CommandEncoderData: ContextData; - type ComputePassId: ContextId; type ComputePassData: ContextData; - type RenderPassId: ContextId; type RenderPassData: ContextData; - type CommandBufferId: ContextId + WasmNotSendSync; type CommandBufferData: ContextData; - type RenderBundleEncoderId: ContextId; type RenderBundleEncoderData: ContextData; - type RenderBundleId: ContextId + WasmNotSendSync; type RenderBundleData: ContextData; - type SurfaceId: ContextId + WasmNotSendSync; type SurfaceData: ContextData; type SurfaceOutputDetail: WasmNotSendSync + 'static; type SubmissionIndexData: ContextData + Copy; - type RequestAdapterFuture: Future> + type RequestAdapterFuture: Future> + WasmNotSend + 'static; + type RequestDeviceFuture: Future> + WasmNotSend + 'static; - type RequestDeviceFuture: Future< - Output = Result< - ( - Self::DeviceId, - Self::DeviceData, - Self::QueueId, - Self::QueueData, - ), - RequestDeviceError, - >, - > + WasmNotSend - + 'static; type PopErrorScopeFuture: Future> + WasmNotSend + 'static; type CompilationInfoFuture: Future + WasmNotSend + 'static; + #[cfg(not(target_os = "emscripten"))] fn init(instance_desc: wgt::InstanceDescriptor) -> Self; unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(Self::SurfaceId, Self::SurfaceData), crate::CreateSurfaceError>; + ) -> Result; fn instance_request_adapter( &self, options: &RequestAdapterOptions<'_, '_>, ) -> Self::RequestAdapterFuture; fn adapter_request_device( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, desc: &DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, @@ -118,62 +79,42 @@ pub trait Context: Debug + WasmNotSendSync + Sized { fn instance_poll_all_devices(&self, force_wait: bool) -> bool; fn adapter_is_surface_supported( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, ) -> bool; - fn adapter_features( - &self, - adapter: &Self::AdapterId, - adapter_data: &Self::AdapterData, - ) -> Features; - fn adapter_limits(&self, adapter: &Self::AdapterId, adapter_data: &Self::AdapterData) - -> Limits; + fn adapter_features(&self, adapter_data: &Self::AdapterData) -> Features; + fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> Limits; fn adapter_downlevel_capabilities( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, ) -> DownlevelCapabilities; - fn adapter_get_info( - &self, - adapter: &Self::AdapterId, - adapter_data: &Self::AdapterData, - ) -> AdapterInfo; + fn adapter_get_info(&self, adapter_data: &Self::AdapterData) -> AdapterInfo; fn adapter_get_texture_format_features( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, format: TextureFormat, ) -> TextureFormatFeatures; fn adapter_get_presentation_timestamp( &self, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, ) -> wgt::PresentationTimestamp; fn surface_get_capabilities( &self, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, - adapter: &Self::AdapterId, adapter_data: &Self::AdapterData, ) -> wgt::SurfaceCapabilities; fn surface_configure( &self, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, - device: &Self::DeviceId, device_data: &Self::DeviceData, config: &crate::SurfaceConfiguration, ); #[allow(clippy::type_complexity)] fn surface_get_current_texture( &self, - surface: &Self::SurfaceId, surface_data: &Self::SurfaceData, ) -> ( - Option, Option, SurfaceStatus, Self::SurfaceOutputDetail, @@ -181,142 +122,98 @@ pub trait Context: Debug + WasmNotSendSync + Sized { fn surface_present(&self, detail: &Self::SurfaceOutputDetail); fn surface_texture_discard(&self, detail: &Self::SurfaceOutputDetail); - fn device_features(&self, device: &Self::DeviceId, device_data: &Self::DeviceData) -> Features; - fn device_limits(&self, device: &Self::DeviceId, device_data: &Self::DeviceData) -> Limits; - fn device_downlevel_properties( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> DownlevelCapabilities; + fn device_features(&self, device_data: &Self::DeviceData) -> Features; + fn device_limits(&self, device_data: &Self::DeviceData) -> Limits; fn device_create_shader_module( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: ShaderModuleDescriptor<'_>, shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData); + ) -> Self::ShaderModuleData; unsafe fn device_create_shader_module_spirv( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> (Self::ShaderModuleId, Self::ShaderModuleData); + ) -> Self::ShaderModuleData; fn device_create_bind_group_layout( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BindGroupLayoutDescriptor<'_>, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData); + ) -> Self::BindGroupLayoutData; fn device_create_bind_group( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BindGroupDescriptor<'_>, - ) -> (Self::BindGroupId, Self::BindGroupData); + ) -> Self::BindGroupData; fn device_create_pipeline_layout( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &PipelineLayoutDescriptor<'_>, - ) -> (Self::PipelineLayoutId, Self::PipelineLayoutData); + ) -> Self::PipelineLayoutData; fn device_create_render_pipeline( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &RenderPipelineDescriptor<'_>, - ) -> (Self::RenderPipelineId, Self::RenderPipelineData); + ) -> Self::RenderPipelineData; fn device_create_compute_pipeline( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &ComputePipelineDescriptor<'_>, - ) -> (Self::ComputePipelineId, Self::ComputePipelineData); + ) -> Self::ComputePipelineData; unsafe fn device_create_pipeline_cache( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &PipelineCacheDescriptor<'_>, - ) -> (Self::PipelineCacheId, Self::PipelineCacheData); + ) -> Self::PipelineCacheData; fn device_create_buffer( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &BufferDescriptor<'_>, - ) -> (Self::BufferId, Self::BufferData); + ) -> Self::BufferData; fn device_create_texture( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &TextureDescriptor<'_>, - ) -> (Self::TextureId, Self::TextureData); + ) -> Self::TextureData; fn device_create_sampler( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &SamplerDescriptor<'_>, - ) -> (Self::SamplerId, Self::SamplerData); + ) -> Self::SamplerData; fn device_create_query_set( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &QuerySetDescriptor<'_>, - ) -> (Self::QuerySetId, Self::QuerySetData); + ) -> Self::QuerySetData; fn device_create_command_encoder( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &CommandEncoderDescriptor<'_>, - ) -> (Self::CommandEncoderId, Self::CommandEncoderData); + ) -> Self::CommandEncoderData; fn device_create_render_bundle_encoder( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, desc: &RenderBundleEncoderDescriptor<'_>, - ) -> (Self::RenderBundleEncoderId, Self::RenderBundleEncoderData); - #[doc(hidden)] - fn device_make_invalid(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); - fn device_drop(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); + ) -> Self::RenderBundleEncoderData; + fn device_drop(&self, device_data: &Self::DeviceData); fn device_set_device_lost_callback( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, device_lost_callback: DeviceLostCallback, ); - fn device_destroy(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); - fn device_mark_lost( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - message: &str, - ); - fn queue_drop(&self, queue: &Self::QueueId, queue_data: &Self::QueueData); - fn device_poll( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - maintain: Maintain, - ) -> MaintainResult; + fn device_destroy(&self, device_data: &Self::DeviceData); + fn queue_drop(&self, queue_data: &Self::QueueData); + fn device_poll(&self, device_data: &Self::DeviceData, maintain: Maintain) -> MaintainResult; fn device_on_uncaptured_error( &self, - device: &Self::DeviceId, device_data: &Self::DeviceData, handler: Box, ); - fn device_push_error_scope( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - filter: ErrorFilter, - ); - fn device_pop_error_scope( - &self, - device: &Self::DeviceId, - device_data: &Self::DeviceData, - ) -> Self::PopErrorScopeFuture; + fn device_push_error_scope(&self, device_data: &Self::DeviceData, filter: ErrorFilter); + fn device_pop_error_scope(&self, device_data: &Self::DeviceData) -> Self::PopErrorScopeFuture; fn buffer_map_async( &self, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, mode: MapMode, range: Range, @@ -324,116 +221,63 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn buffer_get_mapped_range( &self, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, sub_range: Range, ) -> Box; - fn buffer_unmap(&self, buffer: &Self::BufferId, buffer_data: &Self::BufferData); + fn buffer_unmap(&self, buffer_data: &Self::BufferData); fn shader_get_compilation_info( &self, - shader: &Self::ShaderModuleId, shader_data: &Self::ShaderModuleData, ) -> Self::CompilationInfoFuture; fn texture_create_view( &self, - texture: &Self::TextureId, texture_data: &Self::TextureData, desc: &TextureViewDescriptor<'_>, - ) -> (Self::TextureViewId, Self::TextureViewData); - - fn surface_drop(&self, surface: &Self::SurfaceId, surface_data: &Self::SurfaceData); - fn adapter_drop(&self, adapter: &Self::AdapterId, adapter_data: &Self::AdapterData); - fn buffer_destroy(&self, buffer: &Self::BufferId, buffer_data: &Self::BufferData); - fn buffer_drop(&self, buffer: &Self::BufferId, buffer_data: &Self::BufferData); - fn texture_destroy(&self, texture: &Self::TextureId, texture_data: &Self::TextureData); - fn texture_drop(&self, texture: &Self::TextureId, texture_data: &Self::TextureData); - fn texture_view_drop( - &self, - texture_view: &Self::TextureViewId, - texture_view_data: &Self::TextureViewData, - ); - fn sampler_drop(&self, sampler: &Self::SamplerId, sampler_data: &Self::SamplerData); - fn query_set_drop(&self, query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData); - fn bind_group_drop( - &self, - bind_group: &Self::BindGroupId, - bind_group_data: &Self::BindGroupData, - ); - fn bind_group_layout_drop( - &self, - bind_group_layout: &Self::BindGroupLayoutId, - bind_group_layout_data: &Self::BindGroupLayoutData, - ); - fn pipeline_layout_drop( - &self, - pipeline_layout: &Self::PipelineLayoutId, - pipeline_layout_data: &Self::PipelineLayoutData, - ); - fn shader_module_drop( - &self, - shader_module: &Self::ShaderModuleId, - shader_module_data: &Self::ShaderModuleData, - ); - fn command_encoder_drop( - &self, - command_encoder: &Self::CommandEncoderId, - command_encoder_data: &Self::CommandEncoderData, - ); - fn command_buffer_drop( - &self, - command_buffer: &Self::CommandBufferId, - command_buffer_data: &Self::CommandBufferData, - ); - fn render_bundle_drop( - &self, - render_bundle: &Self::RenderBundleId, - render_bundle_data: &Self::RenderBundleData, - ); - fn compute_pipeline_drop( - &self, - pipeline: &Self::ComputePipelineId, - pipeline_data: &Self::ComputePipelineData, - ); - fn render_pipeline_drop( - &self, - pipeline: &Self::RenderPipelineId, - pipeline_data: &Self::RenderPipelineData, - ); - fn pipeline_cache_drop( - &self, - cache: &Self::PipelineCacheId, - cache_data: &Self::PipelineCacheData, - ); + ) -> Self::TextureViewData; + + fn surface_drop(&self, surface_data: &Self::SurfaceData); + fn adapter_drop(&self, adapter_data: &Self::AdapterData); + fn buffer_destroy(&self, buffer_data: &Self::BufferData); + fn buffer_drop(&self, buffer_data: &Self::BufferData); + fn texture_destroy(&self, texture_data: &Self::TextureData); + fn texture_drop(&self, texture_data: &Self::TextureData); + fn texture_view_drop(&self, texture_view_data: &Self::TextureViewData); + fn sampler_drop(&self, sampler_data: &Self::SamplerData); + fn query_set_drop(&self, query_set_data: &Self::QuerySetData); + fn bind_group_drop(&self, bind_group_data: &Self::BindGroupData); + fn bind_group_layout_drop(&self, bind_group_layout_data: &Self::BindGroupLayoutData); + fn pipeline_layout_drop(&self, pipeline_layout_data: &Self::PipelineLayoutData); + fn shader_module_drop(&self, shader_module_data: &Self::ShaderModuleData); + fn command_encoder_drop(&self, command_encoder_data: &Self::CommandEncoderData); + fn command_buffer_drop(&self, command_buffer_data: &Self::CommandBufferData); + fn render_bundle_drop(&self, render_bundle_data: &Self::RenderBundleData); + fn compute_pipeline_drop(&self, pipeline_data: &Self::ComputePipelineData); + fn render_pipeline_drop(&self, pipeline_data: &Self::RenderPipelineData); + fn pipeline_cache_drop(&self, cache_data: &Self::PipelineCacheData); fn compute_pipeline_get_bind_group_layout( &self, - pipeline: &Self::ComputePipelineId, pipeline_data: &Self::ComputePipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData); + ) -> Self::BindGroupLayoutData; fn render_pipeline_get_bind_group_layout( &self, - pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, index: u32, - ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData); + ) -> Self::BindGroupLayoutData; #[allow(clippy::too_many_arguments)] fn command_encoder_copy_buffer_to_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - source: &Self::BufferId, source_data: &Self::BufferData, source_offset: BufferAddress, - destination: &Self::BufferId, destination_data: &Self::BufferData, destination_offset: BufferAddress, copy_size: BufferAddress, ); fn command_encoder_copy_buffer_to_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: ImageCopyBuffer<'_>, destination: ImageCopyTexture<'_>, @@ -441,7 +285,6 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn command_encoder_copy_texture_to_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: ImageCopyTexture<'_>, destination: ImageCopyBuffer<'_>, @@ -449,7 +292,6 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn command_encoder_copy_texture_to_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, source: ImageCopyTexture<'_>, destination: ImageCopyTexture<'_>, @@ -458,120 +300,95 @@ pub trait Context: Debug + WasmNotSendSync + Sized { fn command_encoder_begin_compute_pass( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &ComputePassDescriptor<'_>, - ) -> (Self::ComputePassId, Self::ComputePassData); + ) -> Self::ComputePassData; fn command_encoder_begin_render_pass( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, desc: &RenderPassDescriptor<'_>, - ) -> (Self::RenderPassId, Self::RenderPassData); + ) -> Self::RenderPassData; fn command_encoder_finish( &self, - encoder: Self::CommandEncoderId, encoder_data: &mut Self::CommandEncoderData, - ) -> (Self::CommandBufferId, Self::CommandBufferData); + ) -> Self::CommandBufferData; fn command_encoder_clear_texture( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - texture: &Texture, // TODO: Decompose? + texture_data: &Self::TextureData, subresource_range: &ImageSubresourceRange, ); fn command_encoder_clear_buffer( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - buffer: &Buffer, + buffer_data: &Self::BufferData, offset: BufferAddress, size: Option, ); fn command_encoder_insert_debug_marker( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, label: &str, ); fn command_encoder_push_debug_group( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, label: &str, ); - fn command_encoder_pop_debug_group( - &self, - encoder: &Self::CommandEncoderId, - encoder_data: &Self::CommandEncoderData, - ); + fn command_encoder_pop_debug_group(&self, encoder_data: &Self::CommandEncoderData); fn command_encoder_write_timestamp( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); #[allow(clippy::too_many_arguments)] fn command_encoder_resolve_query_set( &self, - encoder: &Self::CommandEncoderId, encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, first_query: u32, query_count: u32, - destination: &Self::BufferId, destination_data: &Self::BufferData, destination_offset: BufferAddress, ); fn render_bundle_encoder_finish( &self, - encoder: Self::RenderBundleEncoderId, encoder_data: Self::RenderBundleEncoderData, desc: &RenderBundleDescriptor<'_>, - ) -> (Self::RenderBundleId, Self::RenderBundleData); + ) -> Self::RenderBundleData; fn queue_write_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: BufferAddress, data: &[u8], ); fn queue_validate_write_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: wgt::BufferSize, ) -> Option<()>; fn queue_create_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, size: BufferSize, ) -> Option>; fn queue_write_staging_buffer( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: BufferAddress, staging_buffer: &dyn QueueWriteBuffer, ); fn queue_write_texture( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, texture: ImageCopyTexture<'_>, data: &[u8], @@ -581,115 +398,78 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[cfg(any(webgl, webgpu))] fn queue_copy_external_image_to_texture( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, size: wgt::Extent3d, ); - fn queue_submit>( + fn queue_submit>( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, command_buffers: I, ) -> Self::SubmissionIndexData; - fn queue_get_timestamp_period( - &self, - queue: &Self::QueueId, - queue_data: &Self::QueueData, - ) -> f32; + fn queue_get_timestamp_period(&self, queue_data: &Self::QueueData) -> f32; fn queue_on_submitted_work_done( &self, - queue: &Self::QueueId, queue_data: &Self::QueueData, callback: SubmittedWorkDoneCallback, ); - fn device_start_capture(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); - fn device_stop_capture(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); + fn device_start_capture(&self, device_data: &Self::DeviceData); + fn device_stop_capture(&self, device_data: &Self::DeviceData); fn device_get_internal_counters( &self, - device: &Self::DeviceId, _device_data: &Self::DeviceData, ) -> wgt::InternalCounters; fn device_generate_allocator_report( &self, - device: &Self::DeviceId, _device_data: &Self::DeviceData, ) -> Option; - fn pipeline_cache_get_data( - &self, - cache: &Self::PipelineCacheId, - cache_data: &Self::PipelineCacheData, - ) -> Option>; + fn pipeline_cache_get_data(&self, cache_data: &Self::PipelineCacheData) -> Option>; fn compute_pass_set_pipeline( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - pipeline: &Self::ComputePipelineId, pipeline_data: &Self::ComputePipelineData, ); fn compute_pass_set_bind_group( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, index: u32, - bind_group: &Self::BindGroupId, - bind_group_data: &Self::BindGroupData, + bind_group_data: Option<&Self::BindGroupData>, offsets: &[DynamicOffset], ); fn compute_pass_set_push_constants( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, offset: u32, data: &[u8], ); - fn compute_pass_insert_debug_marker( - &self, - pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - label: &str, - ); + fn compute_pass_insert_debug_marker(&self, pass_data: &mut Self::ComputePassData, label: &str); fn compute_pass_push_debug_group( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, group_label: &str, ); - fn compute_pass_pop_debug_group( - &self, - pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ); + fn compute_pass_pop_debug_group(&self, pass_data: &mut Self::ComputePassData); fn compute_pass_write_timestamp( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); fn compute_pass_begin_pipeline_statistics_query( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); - fn compute_pass_end_pipeline_statistics_query( - &self, - pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ); + fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::ComputePassData); fn compute_pass_dispatch_workgroups( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, x: u32, y: u32, @@ -697,40 +477,28 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn compute_pass_dispatch_workgroups_indirect( &self, - pass: &mut Self::ComputePassId, pass_data: &mut Self::ComputePassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, ); - fn compute_pass_end( - &self, - pass: &mut Self::ComputePassId, - pass_data: &mut Self::ComputePassData, - ); + fn compute_pass_end(&self, pass_data: &mut Self::ComputePassData); fn render_bundle_encoder_set_pipeline( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, ); fn render_bundle_encoder_set_bind_group( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, index: u32, - bind_group: &Self::BindGroupId, - bind_group_data: &Self::BindGroupData, + bind_group_data: Option<&Self::BindGroupData>, offsets: &[DynamicOffset], ); #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_set_index_buffer( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, index_format: IndexFormat, offset: BufferAddress, @@ -739,17 +507,14 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_set_vertex_buffer( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, slot: u32, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: BufferAddress, size: Option, ); fn render_bundle_encoder_set_push_constants( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, stages: ShaderStages, offset: u32, @@ -757,14 +522,12 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_bundle_encoder_draw( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, vertices: Range, instances: Range, ); fn render_bundle_encoder_draw_indexed( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, indices: Range, base_vertex: i32, @@ -772,87 +535,33 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_bundle_encoder_draw_indirect( &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, ); fn render_bundle_encoder_draw_indexed_indirect( &self, - encoder: &mut Self::RenderBundleEncoderId, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - ); - fn render_bundle_encoder_multi_draw_indirect( - &self, - encoder: &mut Self::RenderBundleEncoderId, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - count: u32, - ); - fn render_bundle_encoder_multi_draw_indexed_indirect( - &self, - encoder: &mut Self::RenderBundleEncoderId, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - count: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_bundle_encoder_multi_draw_indirect_count( - &self, - encoder: &mut Self::RenderBundleEncoderId, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - count_buffer: &Self::BufferId, - count_buffer_data: &Self::BufferData, - count_buffer_offset: BufferAddress, - max_count: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_bundle_encoder_multi_draw_indexed_indirect_count( - &self, - encoder: &mut Self::RenderBundleEncoderId, encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, - count_buffer: &Self::BufferId, - count_buffer_data: &Self::BufferData, - count_buffer_offset: BufferAddress, - max_count: u32, ); fn render_pass_set_pipeline( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - pipeline: &Self::RenderPipelineId, pipeline_data: &Self::RenderPipelineData, ); fn render_pass_set_bind_group( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, index: u32, - bind_group: &Self::BindGroupId, - bind_group_data: &Self::BindGroupData, + bind_group_data: Option<&Self::BindGroupData>, offsets: &[DynamicOffset], ); #[allow(clippy::too_many_arguments)] fn render_pass_set_index_buffer( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, index_format: IndexFormat, offset: BufferAddress, @@ -861,17 +570,14 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_pass_set_vertex_buffer( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, slot: u32, - buffer: &Self::BufferId, buffer_data: &Self::BufferData, offset: BufferAddress, size: Option, ); fn render_pass_set_push_constants( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, stages: ShaderStages, offset: u32, @@ -879,14 +585,12 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_pass_draw( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, vertices: Range, instances: Range, ); fn render_pass_draw_indexed( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, indices: Range, base_vertex: i32, @@ -894,34 +598,26 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_pass_draw_indirect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, ); fn render_pass_draw_indexed_indirect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, ); fn render_pass_multi_draw_indirect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, count: u32, ); fn render_pass_multi_draw_indexed_indirect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, count: u32, @@ -929,12 +625,9 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_pass_multi_draw_indirect_count( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, - count_buffer: &Self::BufferId, count_buffer_data: &Self::BufferData, count_buffer_offset: BufferAddress, max_count: u32, @@ -942,25 +635,16 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_pass_multi_draw_indexed_indirect_count( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, indirect_buffer_data: &Self::BufferData, indirect_offset: BufferAddress, - count_buffer: &Self::BufferId, count_buffer_data: &Self::BufferData, count_buffer_offset: BufferAddress, max_count: u32, ); - fn render_pass_set_blend_constant( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - color: Color, - ); + fn render_pass_set_blend_constant(&self, pass_data: &mut Self::RenderPassData, color: Color); fn render_pass_set_scissor_rect( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: u32, y: u32, @@ -970,7 +654,6 @@ pub trait Context: Debug + WasmNotSendSync + Sized { #[allow(clippy::too_many_arguments)] fn render_pass_set_viewport( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, x: f32, y: f32, @@ -981,112 +664,39 @@ pub trait Context: Debug + WasmNotSendSync + Sized { ); fn render_pass_set_stencil_reference( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, reference: u32, ); - fn render_pass_insert_debug_marker( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - label: &str, - ); - fn render_pass_push_debug_group( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - group_label: &str, - ); - fn render_pass_pop_debug_group( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ); + fn render_pass_insert_debug_marker(&self, pass_data: &mut Self::RenderPassData, label: &str); + fn render_pass_push_debug_group(&self, pass_data: &mut Self::RenderPassData, group_label: &str); + fn render_pass_pop_debug_group(&self, pass_data: &mut Self::RenderPassData); fn render_pass_write_timestamp( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); fn render_pass_begin_occlusion_query( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, query_index: u32, ); - fn render_pass_end_occlusion_query( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ); + fn render_pass_end_occlusion_query(&self, pass_data: &mut Self::RenderPassData); fn render_pass_begin_pipeline_statistics_query( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - query_set: &Self::QuerySetId, query_set_data: &Self::QuerySetData, query_index: u32, ); - fn render_pass_end_pipeline_statistics_query( - &self, - pass: &mut Self::RenderPassId, - pass_data: &mut Self::RenderPassData, - ); + fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::RenderPassData); fn render_pass_execute_bundles( &self, - pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ); - fn render_pass_end(&self, pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData); + fn render_pass_end(&self, pass_data: &mut Self::RenderPassData); } -/// Object id. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct ObjectId { - /// ID that is unique at any given time - id: Option, - /// ID that is unique at all times - global_id: Option, -} - -impl ObjectId { - pub(crate) const UNUSED: Self = ObjectId { - id: None, - global_id: None, - }; - - #[allow(dead_code)] - pub fn new(id: NonZeroU64, global_id: NonZeroU64) -> Self { - Self { - id: Some(id), - global_id: Some(global_id), - } - } - - #[allow(dead_code)] - pub fn from_global_id(global_id: NonZeroU64) -> Self { - Self { - id: Some(global_id), - global_id: Some(global_id), - } - } - - #[allow(dead_code)] - pub fn id(&self) -> NonZeroU64 { - self.id.unwrap() - } - - pub fn global_id(&self) -> NonZeroU64 { - self.global_id.unwrap() - } -} - -#[cfg(send_sync)] -static_assertions::assert_impl_all!(ObjectId: Send, Sync); - pub(crate) fn downcast_ref(data: &crate::Data) -> &T { strict_assert!(data.is::()); // Copied from std. @@ -1099,29 +709,8 @@ fn downcast_mut(data: &mut crate::Data) -> unsafe { &mut *(data as *mut dyn Any as *mut T) } } -/// Representation of an object id that is not used. -/// -/// This may be used as the id type when only a the data associated type is used for a specific type of object. -#[derive(Debug, Clone, Copy)] -pub struct Unused; - -impl From for Unused { - fn from(id: ObjectId) -> Self { - strict_assert_eq!(id, ObjectId::UNUSED); - Self - } -} - -impl From for ObjectId { - fn from(_: Unused) -> Self { - ObjectId::UNUSED - } -} - pub(crate) struct DeviceRequest { - pub device_id: ObjectId, pub device_data: Box, - pub queue_id: ObjectId, pub queue_data: Box, } @@ -1138,11 +727,9 @@ pub(crate) type AdapterRequestDeviceFuture = Box>>; #[cfg(send_sync)] -pub type InstanceRequestAdapterFuture = - Box)>> + Send>; +pub type InstanceRequestAdapterFuture = Box>> + Send>; #[cfg(not(send_sync))] -pub type InstanceRequestAdapterFuture = - Box)>>>; +pub type InstanceRequestAdapterFuture = Box>>>; #[cfg(send_sync)] pub type DevicePopErrorFuture = Box> + Send>; @@ -1165,12 +752,13 @@ pub type DeviceLostCallback = Box; /// An object safe variant of [`Context`] implemented by all types that implement [`Context`]. pub(crate) trait DynContext: Debug + WasmNotSendSync { + #[cfg(not(target_os = "emscripten"))] fn as_any(&self) -> &dyn Any; unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(ObjectId, Box), crate::CreateSurfaceError>; + ) -> Result, crate::CreateSurfaceError>; #[allow(clippy::type_complexity)] fn instance_request_adapter( &self, @@ -1178,7 +766,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ) -> Pin; fn adapter_request_device( &self, - adapter: &ObjectId, adapter_data: &crate::Data, desc: &DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, @@ -1187,52 +774,38 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { fn instance_poll_all_devices(&self, force_wait: bool) -> bool; fn adapter_is_surface_supported( &self, - adapter: &ObjectId, adapter_data: &crate::Data, - surface: &ObjectId, surface_data: &crate::Data, ) -> bool; - fn adapter_features(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> Features; - fn adapter_limits(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> Limits; - fn adapter_downlevel_capabilities( - &self, - adapter: &ObjectId, - adapter_data: &crate::Data, - ) -> DownlevelCapabilities; - fn adapter_get_info(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> AdapterInfo; + fn adapter_features(&self, adapter_data: &crate::Data) -> Features; + fn adapter_limits(&self, adapter_data: &crate::Data) -> Limits; + fn adapter_downlevel_capabilities(&self, adapter_data: &crate::Data) -> DownlevelCapabilities; + fn adapter_get_info(&self, adapter_data: &crate::Data) -> AdapterInfo; fn adapter_get_texture_format_features( &self, - adapter: &ObjectId, adapter_data: &crate::Data, format: TextureFormat, ) -> TextureFormatFeatures; fn adapter_get_presentation_timestamp( &self, - adapter: &ObjectId, adapter_data: &crate::Data, ) -> wgt::PresentationTimestamp; fn surface_get_capabilities( &self, - surface: &ObjectId, surface_data: &crate::Data, - adapter: &ObjectId, adapter_data: &crate::Data, ) -> wgt::SurfaceCapabilities; fn surface_configure( &self, - surface: &ObjectId, surface_data: &crate::Data, - device: &ObjectId, device_data: &crate::Data, config: &crate::SurfaceConfiguration, ); fn surface_get_current_texture( &self, - surface: &ObjectId, surface_data: &crate::Data, ) -> ( - Option, Option>, SurfaceStatus, Box, @@ -1240,136 +813,97 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { fn surface_present(&self, detail: &dyn AnyWasmNotSendSync); fn surface_texture_discard(&self, detail: &dyn AnyWasmNotSendSync); - fn device_features(&self, device: &ObjectId, device_data: &crate::Data) -> Features; - fn device_limits(&self, device: &ObjectId, device_data: &crate::Data) -> Limits; - fn device_downlevel_properties( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> DownlevelCapabilities; + fn device_features(&self, device_data: &crate::Data) -> Features; + fn device_limits(&self, device_data: &crate::Data) -> Limits; fn device_create_shader_module( &self, - device: &ObjectId, device_data: &crate::Data, desc: ShaderModuleDescriptor<'_>, shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (ObjectId, Box); + ) -> Box; unsafe fn device_create_shader_module_spirv( &self, - device: &ObjectId, device_data: &crate::Data, desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_bind_group_layout( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BindGroupLayoutDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_bind_group( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BindGroupDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_pipeline_layout( &self, - device: &ObjectId, device_data: &crate::Data, desc: &PipelineLayoutDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_render_pipeline( &self, - device: &ObjectId, device_data: &crate::Data, desc: &RenderPipelineDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_compute_pipeline( &self, - device: &ObjectId, device_data: &crate::Data, desc: &ComputePipelineDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; unsafe fn device_create_pipeline_cache( &self, - device: &ObjectId, device_data: &crate::Data, desc: &PipelineCacheDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_buffer( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BufferDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_texture( &self, - device: &ObjectId, device_data: &crate::Data, desc: &TextureDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_sampler( &self, - device: &ObjectId, device_data: &crate::Data, desc: &SamplerDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_query_set( &self, - device: &ObjectId, device_data: &crate::Data, desc: &QuerySetDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_command_encoder( &self, - device: &ObjectId, device_data: &crate::Data, desc: &CommandEncoderDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn device_create_render_bundle_encoder( &self, - device: &ObjectId, device_data: &crate::Data, desc: &RenderBundleEncoderDescriptor<'_>, - ) -> (ObjectId, Box); - #[doc(hidden)] - fn device_make_invalid(&self, device: &ObjectId, device_data: &crate::Data); - fn device_drop(&self, device: &ObjectId, device_data: &crate::Data); + ) -> Box; + fn device_drop(&self, device_data: &crate::Data); fn device_set_device_lost_callback( &self, - device: &ObjectId, device_data: &crate::Data, device_lost_callback: DeviceLostCallback, ); - fn device_destroy(&self, device: &ObjectId, device_data: &crate::Data); - fn device_mark_lost(&self, device: &ObjectId, device_data: &crate::Data, message: &str); - fn queue_drop(&self, queue: &ObjectId, queue_data: &crate::Data); - fn device_poll( - &self, - device: &ObjectId, - device_data: &crate::Data, - maintain: Maintain, - ) -> MaintainResult; + fn device_destroy(&self, device_data: &crate::Data); + fn queue_drop(&self, queue_data: &crate::Data); + fn device_poll(&self, device_data: &crate::Data, maintain: Maintain) -> MaintainResult; fn device_on_uncaptured_error( &self, - device: &ObjectId, device_data: &crate::Data, handler: Box, ); - fn device_push_error_scope( - &self, - device: &ObjectId, - device_data: &crate::Data, - filter: ErrorFilter, - ); - fn device_pop_error_scope( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> Pin; + fn device_push_error_scope(&self, device_data: &crate::Data, filter: ErrorFilter); + fn device_pop_error_scope(&self, device_data: &crate::Data) -> Pin; fn buffer_map_async( &self, - buffer: &ObjectId, buffer_data: &crate::Data, mode: MapMode, range: Range, @@ -1377,76 +911,63 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn buffer_get_mapped_range( &self, - buffer: &ObjectId, buffer_data: &crate::Data, sub_range: Range, ) -> Box; - fn buffer_unmap(&self, buffer: &ObjectId, buffer_data: &crate::Data); + fn buffer_unmap(&self, buffer_data: &crate::Data); fn shader_get_compilation_info( &self, - shader: &ObjectId, shader_data: &crate::Data, ) -> Pin; fn texture_create_view( &self, - texture: &ObjectId, texture_data: &crate::Data, desc: &TextureViewDescriptor<'_>, - ) -> (ObjectId, Box); - - fn surface_drop(&self, surface: &ObjectId, surface_data: &crate::Data); - fn adapter_drop(&self, adapter: &ObjectId, adapter_data: &crate::Data); - fn buffer_destroy(&self, buffer: &ObjectId, buffer_data: &crate::Data); - fn buffer_drop(&self, buffer: &ObjectId, buffer_data: &crate::Data); - fn texture_destroy(&self, buffer: &ObjectId, buffer_data: &crate::Data); - fn texture_drop(&self, texture: &ObjectId, texture_data: &crate::Data); - fn texture_view_drop(&self, texture_view: &ObjectId, texture_view_data: &crate::Data); - fn sampler_drop(&self, sampler: &ObjectId, sampler_data: &crate::Data); - fn query_set_drop(&self, query_set: &ObjectId, query_set_data: &crate::Data); - fn bind_group_drop(&self, bind_group: &ObjectId, bind_group_data: &crate::Data); - fn bind_group_layout_drop( - &self, - bind_group_layout: &ObjectId, - bind_group_layout_data: &crate::Data, - ); - fn pipeline_layout_drop(&self, pipeline_layout: &ObjectId, pipeline_layout_data: &crate::Data); - fn shader_module_drop(&self, shader_module: &ObjectId, shader_module_data: &crate::Data); - fn command_encoder_drop(&self, command_encoder: &ObjectId, command_encoder_data: &crate::Data); - fn command_buffer_drop(&self, command_buffer: &ObjectId, command_buffer_data: &crate::Data); - fn render_bundle_drop(&self, render_bundle: &ObjectId, render_bundle_data: &crate::Data); - fn compute_pipeline_drop(&self, pipeline: &ObjectId, pipeline_data: &crate::Data); - fn render_pipeline_drop(&self, pipeline: &ObjectId, pipeline_data: &crate::Data); - fn pipeline_cache_drop(&self, cache: &ObjectId, _cache_data: &crate::Data); + ) -> Box; + + fn surface_drop(&self, surface_data: &crate::Data); + fn adapter_drop(&self, adapter_data: &crate::Data); + fn buffer_destroy(&self, buffer_data: &crate::Data); + fn buffer_drop(&self, buffer_data: &crate::Data); + fn texture_destroy(&self, buffer_data: &crate::Data); + fn texture_drop(&self, texture_data: &crate::Data); + fn texture_view_drop(&self, texture_view_data: &crate::Data); + fn sampler_drop(&self, sampler_data: &crate::Data); + fn query_set_drop(&self, query_set_data: &crate::Data); + fn bind_group_drop(&self, bind_group_data: &crate::Data); + fn bind_group_layout_drop(&self, bind_group_layout_data: &crate::Data); + fn pipeline_layout_drop(&self, pipeline_layout_data: &crate::Data); + fn shader_module_drop(&self, shader_module_data: &crate::Data); + fn command_encoder_drop(&self, command_encoder_data: &crate::Data); + fn command_buffer_drop(&self, command_buffer_data: &crate::Data); + fn render_bundle_drop(&self, render_bundle_data: &crate::Data); + fn compute_pipeline_drop(&self, pipeline_data: &crate::Data); + fn render_pipeline_drop(&self, pipeline_data: &crate::Data); + fn pipeline_cache_drop(&self, _cache_data: &crate::Data); fn compute_pipeline_get_bind_group_layout( &self, - pipeline: &ObjectId, pipeline_data: &crate::Data, index: u32, - ) -> (ObjectId, Box); + ) -> Box; fn render_pipeline_get_bind_group_layout( &self, - pipeline: &ObjectId, pipeline_data: &crate::Data, index: u32, - ) -> (ObjectId, Box); + ) -> Box; #[allow(clippy::too_many_arguments)] fn command_encoder_copy_buffer_to_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - source: &ObjectId, source_data: &crate::Data, source_offset: BufferAddress, - destination: &ObjectId, destination_data: &crate::Data, destination_offset: BufferAddress, copy_size: BufferAddress, ); fn command_encoder_copy_buffer_to_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyBuffer<'_>, destination: ImageCopyTexture<'_>, @@ -1454,7 +975,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn command_encoder_copy_texture_to_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyTexture<'_>, destination: ImageCopyBuffer<'_>, @@ -1462,7 +982,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn command_encoder_copy_texture_to_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyTexture<'_>, destination: ImageCopyTexture<'_>, @@ -1471,116 +990,84 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { fn command_encoder_begin_compute_pass( &self, - encoder: &ObjectId, encoder_data: &crate::Data, desc: &ComputePassDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn command_encoder_begin_render_pass( &self, - encoder: &ObjectId, encoder_data: &crate::Data, desc: &RenderPassDescriptor<'_>, - ) -> (ObjectId, Box); - fn command_encoder_finish( - &self, - encoder: ObjectId, - encoder_data: &mut crate::Data, - ) -> (ObjectId, Box); + ) -> Box; + fn command_encoder_finish(&self, encoder_data: &mut crate::Data) -> Box; fn command_encoder_clear_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - texture: &Texture, + texture_data: &crate::Data, subresource_range: &ImageSubresourceRange, ); fn command_encoder_clear_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - buffer: &Buffer, + buffer_data: &crate::Data, offset: BufferAddress, size: Option, ); - fn command_encoder_insert_debug_marker( - &self, - encoder: &ObjectId, - encoder_data: &crate::Data, - label: &str, - ); - fn command_encoder_push_debug_group( - &self, - encoder: &ObjectId, - encoder_data: &crate::Data, - label: &str, - ); - fn command_encoder_pop_debug_group(&self, encoder: &ObjectId, encoder_data: &crate::Data); + fn command_encoder_insert_debug_marker(&self, encoder_data: &crate::Data, label: &str); + fn command_encoder_push_debug_group(&self, encoder_data: &crate::Data, label: &str); + fn command_encoder_pop_debug_group(&self, encoder_data: &crate::Data); fn command_encoder_write_timestamp( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); #[allow(clippy::too_many_arguments)] fn command_encoder_resolve_query_set( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, first_query: u32, query_count: u32, - destination: &ObjectId, destination_data: &crate::Data, destination_offset: BufferAddress, ); fn render_bundle_encoder_finish( &self, - encoder: ObjectId, encoder_data: Box, desc: &RenderBundleDescriptor<'_>, - ) -> (ObjectId, Box); + ) -> Box; fn queue_write_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, data: &[u8], ); fn queue_validate_write_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: wgt::BufferAddress, size: wgt::BufferSize, ) -> Option<()>; fn queue_create_staging_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, size: BufferSize, ) -> Option>; fn queue_write_staging_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, staging_buffer: &dyn QueueWriteBuffer, ); fn queue_write_texture( &self, - queue: &ObjectId, queue_data: &crate::Data, texture: ImageCopyTexture<'_>, data: &[u8], @@ -1590,7 +1077,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[cfg(any(webgpu, webgl))] fn queue_copy_external_image_to_texture( &self, - queue: &ObjectId, queue_data: &crate::Data, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, @@ -1598,136 +1084,80 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn queue_submit( &self, - queue: &ObjectId, queue_data: &crate::Data, - command_buffers: &mut dyn Iterator)>, + command_buffers: &mut dyn Iterator>, ) -> Arc; - fn queue_get_timestamp_period(&self, queue: &ObjectId, queue_data: &crate::Data) -> f32; + fn queue_get_timestamp_period(&self, queue_data: &crate::Data) -> f32; fn queue_on_submitted_work_done( &self, - queue: &ObjectId, queue_data: &crate::Data, callback: SubmittedWorkDoneCallback, ); - fn device_start_capture(&self, device: &ObjectId, data: &crate::Data); - fn device_stop_capture(&self, device: &ObjectId, data: &crate::Data); + fn device_start_capture(&self, data: &crate::Data); + fn device_stop_capture(&self, data: &crate::Data); - fn device_get_internal_counters( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> wgt::InternalCounters; + fn device_get_internal_counters(&self, device_data: &crate::Data) -> wgt::InternalCounters; - fn generate_allocator_report( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> Option; + fn generate_allocator_report(&self, device_data: &crate::Data) -> Option; - fn pipeline_cache_get_data( - &self, - cache: &ObjectId, - cache_data: &crate::Data, - ) -> Option>; + fn pipeline_cache_get_data(&self, cache_data: &crate::Data) -> Option>; - fn compute_pass_set_pipeline( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - pipeline: &ObjectId, - pipeline_data: &crate::Data, - ); + fn compute_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data); fn compute_pass_set_bind_group( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, - bind_group_data: &crate::Data, + bind_group_data: Option<&crate::Data>, offsets: &[DynamicOffset], ); fn compute_pass_set_push_constants( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, offset: u32, data: &[u8], ); - fn compute_pass_insert_debug_marker( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - label: &str, - ); - fn compute_pass_push_debug_group( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - group_label: &str, - ); - fn compute_pass_pop_debug_group(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn compute_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str); + fn compute_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str); + fn compute_pass_pop_debug_group(&self, pass_data: &mut crate::Data); fn compute_pass_write_timestamp( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); fn compute_pass_begin_pipeline_statistics_query( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); - fn compute_pass_end_pipeline_statistics_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - ); - fn compute_pass_dispatch_workgroups( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - x: u32, - y: u32, - z: u32, - ); + fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data); + fn compute_pass_dispatch_workgroups(&self, pass_data: &mut crate::Data, x: u32, y: u32, z: u32); fn compute_pass_dispatch_workgroups_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); - fn compute_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn compute_pass_end(&self, pass_data: &mut crate::Data); fn render_bundle_encoder_set_pipeline( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - pipeline: &ObjectId, pipeline_data: &crate::Data, ); fn render_bundle_encoder_set_bind_group( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, - bind_group_data: &crate::Data, + bind_group_data: Option<&crate::Data>, offsets: &[DynamicOffset], ); #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_set_index_buffer( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, index_format: IndexFormat, offset: BufferAddress, @@ -1736,17 +1166,14 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_bundle_encoder_set_vertex_buffer( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, slot: u32, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, size: Option, ); fn render_bundle_encoder_set_push_constants( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, stages: ShaderStages, offset: u32, @@ -1754,14 +1181,12 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn render_bundle_encoder_draw( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, vertices: Range, instances: Range, ); fn render_bundle_encoder_draw_indexed( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, indices: Range, base_vertex: i32, @@ -1769,87 +1194,29 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn render_bundle_encoder_draw_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); fn render_bundle_encoder_draw_indexed_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); - fn render_bundle_encoder_multi_draw_indirect( - &self, - encoder: &mut ObjectId, - encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count: u32, - ); - fn render_bundle_encoder_multi_draw_indexed_indirect( - &self, - encoder: &mut ObjectId, - encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_bundle_encoder_multi_draw_indirect_count( - &self, - encoder: &mut ObjectId, - encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count_buffer: &ObjectId, - count_buffer_data: &crate::Data, - count_buffer_offset: BufferAddress, - max_count: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_bundle_encoder_multi_draw_indexed_indirect_count( - &self, - encoder: &mut ObjectId, - encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count_buffer: &ObjectId, - command_buffer_data: &crate::Data, - count_buffer_offset: BufferAddress, - max_count: u32, - ); - fn render_pass_set_pipeline( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - pipeline: &ObjectId, - pipeline_data: &crate::Data, - ); + fn render_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data); fn render_pass_set_bind_group( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, - bind_group_data: &crate::Data, + bind_group_data: Option<&crate::Data>, offsets: &[DynamicOffset], ); #[allow(clippy::too_many_arguments)] fn render_pass_set_index_buffer( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, index_format: IndexFormat, offset: BufferAddress, @@ -1858,17 +1225,14 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_pass_set_vertex_buffer( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, slot: u32, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, size: Option, ); fn render_pass_set_push_constants( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, stages: ShaderStages, offset: u32, @@ -1876,14 +1240,12 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn render_pass_draw( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, vertices: Range, instances: Range, ); fn render_pass_draw_indexed( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, indices: Range, base_vertex: i32, @@ -1891,34 +1253,26 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { ); fn render_pass_draw_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); fn render_pass_draw_indexed_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ); fn render_pass_multi_draw_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, ); fn render_pass_multi_draw_indexed_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, @@ -1926,12 +1280,9 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_pass_multi_draw_indirect_count( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, count_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, @@ -1939,25 +1290,16 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_pass_multi_draw_indexed_indirect_count( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, command_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, ); - fn render_pass_set_blend_constant( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - color: Color, - ); + fn render_pass_set_blend_constant(&self, pass_data: &mut crate::Data, color: Color); fn render_pass_set_scissor_rect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: u32, y: u32, @@ -1967,7 +1309,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { #[allow(clippy::too_many_arguments)] fn render_pass_set_viewport( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: f32, y: f32, @@ -1976,60 +1317,31 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { min_depth: f32, max_depth: f32, ); - fn render_pass_set_stencil_reference( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - reference: u32, - ); - fn render_pass_insert_debug_marker( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - label: &str, - ); - fn render_pass_push_debug_group( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - group_label: &str, - ); - fn render_pass_pop_debug_group(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn render_pass_set_stencil_reference(&self, pass_data: &mut crate::Data, reference: u32); + fn render_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str); + fn render_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str); + fn render_pass_pop_debug_group(&self, pass_data: &mut crate::Data); fn render_pass_write_timestamp( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); - fn render_pass_begin_occlusion_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - query_index: u32, - ); - fn render_pass_end_occlusion_query(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn render_pass_begin_occlusion_query(&self, pass_data: &mut crate::Data, query_index: u32); + fn render_pass_end_occlusion_query(&self, pass_data: &mut crate::Data); fn render_pass_begin_pipeline_statistics_query( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ); - fn render_pass_end_pipeline_statistics_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - ); + fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data); fn render_pass_execute_bundles( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ); - fn render_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data); + fn render_pass_end(&self, pass_data: &mut crate::Data); } // Blanket impl of DynContext for all types which implement Context. @@ -2037,6 +1349,7 @@ impl DynContext for T where T: Context + 'static, { + #[cfg(not(target_os = "emscripten"))] fn as_any(&self) -> &dyn Any { self } @@ -2044,9 +1357,9 @@ where unsafe fn instance_create_surface( &self, target: SurfaceTargetUnsafe, - ) -> Result<(ObjectId, Box), crate::CreateSurfaceError> { - let (surface, data) = unsafe { Context::instance_create_surface(self, target) }?; - Ok((surface.into(), Box::new(data) as _)) + ) -> Result, crate::CreateSurfaceError> { + let data = unsafe { Context::instance_create_surface(self, target) }?; + Ok(Box::new(data) as _) } fn instance_request_adapter( @@ -2054,29 +1367,22 @@ where options: &RequestAdapterOptions<'_, '_>, ) -> Pin { let future: T::RequestAdapterFuture = Context::instance_request_adapter(self, options); - Box::pin(async move { - let result: Option<(T::AdapterId, T::AdapterData)> = future.await; - result.map(|(adapter, data)| (adapter.into(), Box::new(data) as _)) - }) + Box::pin(async move { future.await.map(|data| Box::new(data) as _) }) } fn adapter_request_device( &self, - adapter: &ObjectId, adapter_data: &crate::Data, desc: &DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, ) -> Pin { - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - let future = Context::adapter_request_device(self, &adapter, adapter_data, desc, trace_dir); + let future = Context::adapter_request_device(self, adapter_data, desc, trace_dir); Box::pin(async move { - let (device_id, device_data, queue_id, queue_data) = future.await?; + let (device_data, queue_data) = future.await?; Ok(DeviceRequest { - device_id: device_id.into(), device_data: Box::new(device_data) as _, - queue_id: queue_id.into(), queue_data: Box::new(queue_data) as _, }) }) @@ -2088,116 +1394,84 @@ where fn adapter_is_surface_supported( &self, - adapter: &ObjectId, adapter_data: &crate::Data, - surface: &ObjectId, surface_data: &crate::Data, ) -> bool { - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - let surface = ::from(*surface); let surface_data = downcast_ref(surface_data); - Context::adapter_is_surface_supported(self, &adapter, adapter_data, &surface, surface_data) + Context::adapter_is_surface_supported(self, adapter_data, surface_data) } - fn adapter_features(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> Features { - let adapter = ::from(*adapter); + fn adapter_features(&self, adapter_data: &crate::Data) -> Features { let adapter_data = downcast_ref(adapter_data); - Context::adapter_features(self, &adapter, adapter_data) + Context::adapter_features(self, adapter_data) } - fn adapter_limits(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> Limits { - let adapter = ::from(*adapter); + fn adapter_limits(&self, adapter_data: &crate::Data) -> Limits { let adapter_data = downcast_ref(adapter_data); - Context::adapter_limits(self, &adapter, adapter_data) + Context::adapter_limits(self, adapter_data) } - fn adapter_downlevel_capabilities( - &self, - adapter: &ObjectId, - adapter_data: &crate::Data, - ) -> DownlevelCapabilities { - let adapter = ::from(*adapter); + fn adapter_downlevel_capabilities(&self, adapter_data: &crate::Data) -> DownlevelCapabilities { let adapter_data = downcast_ref(adapter_data); - Context::adapter_downlevel_capabilities(self, &adapter, adapter_data) + Context::adapter_downlevel_capabilities(self, adapter_data) } - fn adapter_get_info(&self, adapter: &ObjectId, adapter_data: &crate::Data) -> AdapterInfo { - let adapter = ::from(*adapter); + fn adapter_get_info(&self, adapter_data: &crate::Data) -> AdapterInfo { let adapter_data = downcast_ref(adapter_data); - Context::adapter_get_info(self, &adapter, adapter_data) + Context::adapter_get_info(self, adapter_data) } fn adapter_get_texture_format_features( &self, - adapter: &ObjectId, adapter_data: &crate::Data, format: TextureFormat, ) -> TextureFormatFeatures { - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - Context::adapter_get_texture_format_features(self, &adapter, adapter_data, format) + Context::adapter_get_texture_format_features(self, adapter_data, format) } fn adapter_get_presentation_timestamp( &self, - adapter: &ObjectId, adapter_data: &crate::Data, ) -> wgt::PresentationTimestamp { - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - Context::adapter_get_presentation_timestamp(self, &adapter, adapter_data) + Context::adapter_get_presentation_timestamp(self, adapter_data) } fn surface_get_capabilities( &self, - surface: &ObjectId, surface_data: &crate::Data, - adapter: &ObjectId, adapter_data: &crate::Data, ) -> wgt::SurfaceCapabilities { - let surface = ::from(*surface); let surface_data = downcast_ref(surface_data); - let adapter = ::from(*adapter); let adapter_data = downcast_ref(adapter_data); - Context::surface_get_capabilities(self, &surface, surface_data, &adapter, adapter_data) + Context::surface_get_capabilities(self, surface_data, adapter_data) } fn surface_configure( &self, - surface: &ObjectId, surface_data: &crate::Data, - device: &ObjectId, device_data: &crate::Data, config: &crate::SurfaceConfiguration, ) { - let surface = ::from(*surface); let surface_data = downcast_ref(surface_data); - let device = ::from(*device); let device_data = downcast_ref(device_data); - Context::surface_configure(self, &surface, surface_data, &device, device_data, config) + Context::surface_configure(self, surface_data, device_data, config) } fn surface_get_current_texture( &self, - surface: &ObjectId, surface_data: &crate::Data, ) -> ( - Option, Option>, SurfaceStatus, Box, ) { - let surface = ::from(*surface); let surface_data = downcast_ref(surface_data); - let (texture, texture_data, status, detail) = - Context::surface_get_current_texture(self, &surface, surface_data); + let (texture_data, status, detail) = + Context::surface_get_current_texture(self, surface_data); let detail = Box::new(detail) as Box; - ( - texture.map(Into::into), - texture_data.map(|b| Box::new(b) as _), - status, - detail, - ) + (texture_data.map(|b| Box::new(b) as _), status, detail) } fn surface_present(&self, detail: &dyn AnyWasmNotSendSync) { @@ -2208,520 +1482,382 @@ where Context::surface_texture_discard(self, detail.downcast_ref().unwrap()) } - fn device_features(&self, device: &ObjectId, device_data: &crate::Data) -> Features { - let device = ::from(*device); - let device_data = downcast_ref(device_data); - Context::device_features(self, &device, device_data) - } - - fn device_limits(&self, device: &ObjectId, device_data: &crate::Data) -> Limits { - let device = ::from(*device); + fn device_features(&self, device_data: &crate::Data) -> Features { let device_data = downcast_ref(device_data); - Context::device_limits(self, &device, device_data) + Context::device_features(self, device_data) } - fn device_downlevel_properties( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> DownlevelCapabilities { - let device = ::from(*device); + fn device_limits(&self, device_data: &crate::Data) -> Limits { let device_data = downcast_ref(device_data); - Context::device_downlevel_properties(self, &device, device_data) + Context::device_limits(self, device_data) } fn device_create_shader_module( &self, - device: &ObjectId, device_data: &crate::Data, desc: ShaderModuleDescriptor<'_>, shader_bound_checks: wgt::ShaderBoundChecks, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (shader_module, data) = Context::device_create_shader_module( - self, - &device, - device_data, - desc, - shader_bound_checks, - ); - (shader_module.into(), Box::new(data) as _) + let data = + Context::device_create_shader_module(self, device_data, desc, shader_bound_checks); + Box::new(data) as _ } unsafe fn device_create_shader_module_spirv( &self, - device: &ObjectId, device_data: &crate::Data, desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (shader_module, data) = - unsafe { Context::device_create_shader_module_spirv(self, &device, device_data, desc) }; - (shader_module.into(), Box::new(data) as _) + let data = unsafe { Context::device_create_shader_module_spirv(self, device_data, desc) }; + Box::new(data) as _ } fn device_create_bind_group_layout( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BindGroupLayoutDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (bind_group_layout, data) = - Context::device_create_bind_group_layout(self, &device, device_data, desc); - (bind_group_layout.into(), Box::new(data) as _) + let data = Context::device_create_bind_group_layout(self, device_data, desc); + Box::new(data) as _ } fn device_create_bind_group( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BindGroupDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (bind_group, data) = - Context::device_create_bind_group(self, &device, device_data, desc); - (bind_group.into(), Box::new(data) as _) + let data = Context::device_create_bind_group(self, device_data, desc); + Box::new(data) as _ } fn device_create_pipeline_layout( &self, - device: &ObjectId, device_data: &crate::Data, desc: &PipelineLayoutDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (pipeline_layout, data) = - Context::device_create_pipeline_layout(self, &device, device_data, desc); - (pipeline_layout.into(), Box::new(data) as _) + let data = Context::device_create_pipeline_layout(self, device_data, desc); + Box::new(data) as _ } fn device_create_render_pipeline( &self, - device: &ObjectId, device_data: &crate::Data, desc: &RenderPipelineDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (render_pipeline, data) = - Context::device_create_render_pipeline(self, &device, device_data, desc); - (render_pipeline.into(), Box::new(data) as _) + let data = Context::device_create_render_pipeline(self, device_data, desc); + Box::new(data) as _ } fn device_create_compute_pipeline( &self, - device: &ObjectId, device_data: &crate::Data, desc: &ComputePipelineDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (compute_pipeline, data) = - Context::device_create_compute_pipeline(self, &device, device_data, desc); - (compute_pipeline.into(), Box::new(data) as _) + let data = Context::device_create_compute_pipeline(self, device_data, desc); + Box::new(data) as _ } unsafe fn device_create_pipeline_cache( &self, - device: &ObjectId, device_data: &crate::Data, desc: &PipelineCacheDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (pipeline_cache, data) = - unsafe { Context::device_create_pipeline_cache(self, &device, device_data, desc) }; - (pipeline_cache.into(), Box::new(data) as _) + let data = unsafe { Context::device_create_pipeline_cache(self, device_data, desc) }; + Box::new(data) as _ } fn device_create_buffer( &self, - device: &ObjectId, device_data: &crate::Data, desc: &BufferDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (buffer, data) = Context::device_create_buffer(self, &device, device_data, desc); - (buffer.into(), Box::new(data) as _) + let data = Context::device_create_buffer(self, device_data, desc); + Box::new(data) as _ } fn device_create_texture( &self, - device: &ObjectId, device_data: &crate::Data, desc: &TextureDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (texture, data) = Context::device_create_texture(self, &device, device_data, desc); - (texture.into(), Box::new(data) as _) + let data = Context::device_create_texture(self, device_data, desc); + Box::new(data) as _ } fn device_create_sampler( &self, - device: &ObjectId, device_data: &crate::Data, desc: &SamplerDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (sampler, data) = Context::device_create_sampler(self, &device, device_data, desc); - (sampler.into(), Box::new(data) as _) + let data = Context::device_create_sampler(self, device_data, desc); + Box::new(data) as _ } fn device_create_query_set( &self, - device: &ObjectId, device_data: &crate::Data, desc: &QuerySetDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (query_set, data) = Context::device_create_query_set(self, &device, device_data, desc); - (query_set.into(), Box::new(data) as _) + let data = Context::device_create_query_set(self, device_data, desc); + Box::new(data) as _ } fn device_create_command_encoder( &self, - device: &ObjectId, device_data: &crate::Data, desc: &CommandEncoderDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (command_encoder, data) = - Context::device_create_command_encoder(self, &device, device_data, desc); - (command_encoder.into(), Box::new(data) as _) + let data = Context::device_create_command_encoder(self, device_data, desc); + Box::new(data) as _ } fn device_create_render_bundle_encoder( &self, - device: &ObjectId, device_data: &crate::Data, desc: &RenderBundleEncoderDescriptor<'_>, - ) -> (ObjectId, Box) { - let device = ::from(*device); + ) -> Box { let device_data = downcast_ref(device_data); - let (render_bundle_encoder, data) = - Context::device_create_render_bundle_encoder(self, &device, device_data, desc); - (render_bundle_encoder.into(), Box::new(data) as _) + let data = Context::device_create_render_bundle_encoder(self, device_data, desc); + Box::new(data) as _ } - #[doc(hidden)] - fn device_make_invalid(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); + fn device_drop(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); - Context::device_make_invalid(self, &device, device_data) - } - - fn device_drop(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); - let device_data = downcast_ref(device_data); - Context::device_drop(self, &device, device_data) + Context::device_drop(self, device_data) } fn device_set_device_lost_callback( &self, - device: &ObjectId, device_data: &crate::Data, device_lost_callback: DeviceLostCallback, ) { - let device = ::from(*device); let device_data = downcast_ref(device_data); - Context::device_set_device_lost_callback(self, &device, device_data, device_lost_callback) + Context::device_set_device_lost_callback(self, device_data, device_lost_callback) } - fn device_destroy(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); + fn device_destroy(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); - Context::device_destroy(self, &device, device_data) + Context::device_destroy(self, device_data) } - fn device_mark_lost(&self, device: &ObjectId, device_data: &crate::Data, message: &str) { - let device = ::from(*device); - let device_data = downcast_ref(device_data); - Context::device_mark_lost(self, &device, device_data, message) - } - - fn queue_drop(&self, queue: &ObjectId, queue_data: &crate::Data) { - let queue = ::from(*queue); + fn queue_drop(&self, queue_data: &crate::Data) { let queue_data = downcast_ref(queue_data); - Context::queue_drop(self, &queue, queue_data) + Context::queue_drop(self, queue_data) } - fn device_poll( - &self, - device: &ObjectId, - device_data: &crate::Data, - maintain: Maintain, - ) -> MaintainResult { - let device = ::from(*device); + fn device_poll(&self, device_data: &crate::Data, maintain: Maintain) -> MaintainResult { let device_data = downcast_ref(device_data); - Context::device_poll(self, &device, device_data, maintain) + Context::device_poll(self, device_data, maintain) } fn device_on_uncaptured_error( &self, - device: &ObjectId, device_data: &crate::Data, handler: Box, ) { - let device = ::from(*device); let device_data = downcast_ref(device_data); - Context::device_on_uncaptured_error(self, &device, device_data, handler) + Context::device_on_uncaptured_error(self, device_data, handler) } - fn device_push_error_scope( - &self, - device: &ObjectId, - device_data: &crate::Data, - filter: ErrorFilter, - ) { - let device = ::from(*device); + fn device_push_error_scope(&self, device_data: &crate::Data, filter: ErrorFilter) { let device_data = downcast_ref(device_data); - Context::device_push_error_scope(self, &device, device_data, filter) + Context::device_push_error_scope(self, device_data, filter) } - fn device_pop_error_scope( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> Pin { - let device = ::from(*device); + fn device_pop_error_scope(&self, device_data: &crate::Data) -> Pin { let device_data = downcast_ref(device_data); - Box::pin(Context::device_pop_error_scope(self, &device, device_data)) + Box::pin(Context::device_pop_error_scope(self, device_data)) } fn buffer_map_async( &self, - buffer: &ObjectId, buffer_data: &crate::Data, mode: MapMode, range: Range, callback: BufferMapCallback, ) { - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::buffer_map_async(self, &buffer, buffer_data, mode, range, callback) + Context::buffer_map_async(self, buffer_data, mode, range, callback) } fn buffer_get_mapped_range( &self, - buffer: &ObjectId, buffer_data: &crate::Data, sub_range: Range, ) -> Box { - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::buffer_get_mapped_range(self, &buffer, buffer_data, sub_range) + Context::buffer_get_mapped_range(self, buffer_data, sub_range) } - fn buffer_unmap(&self, buffer: &ObjectId, buffer_data: &crate::Data) { - let buffer = ::from(*buffer); + fn buffer_unmap(&self, buffer_data: &crate::Data) { let buffer_data = downcast_ref(buffer_data); - Context::buffer_unmap(self, &buffer, buffer_data) + Context::buffer_unmap(self, buffer_data) } fn shader_get_compilation_info( &self, - shader: &ObjectId, shader_data: &crate::Data, ) -> Pin { - let shader = ::from(*shader); let shader_data = downcast_ref(shader_data); - let future = Context::shader_get_compilation_info(self, &shader, shader_data); + let future = Context::shader_get_compilation_info(self, shader_data); Box::pin(future) } fn texture_create_view( &self, - texture: &ObjectId, texture_data: &crate::Data, desc: &TextureViewDescriptor<'_>, - ) -> (ObjectId, Box) { - let texture = ::from(*texture); + ) -> Box { let texture_data = downcast_ref(texture_data); - let (texture_view, data) = Context::texture_create_view(self, &texture, texture_data, desc); - (texture_view.into(), Box::new(data) as _) + let data = Context::texture_create_view(self, texture_data, desc); + Box::new(data) as _ } - fn surface_drop(&self, surface: &ObjectId, surface_data: &crate::Data) { - let surface = ::from(*surface); + fn surface_drop(&self, surface_data: &crate::Data) { let surface_data = downcast_ref(surface_data); - Context::surface_drop(self, &surface, surface_data) + Context::surface_drop(self, surface_data) } - fn adapter_drop(&self, adapter: &ObjectId, adapter_data: &crate::Data) { - let adapter = ::from(*adapter); + fn adapter_drop(&self, adapter_data: &crate::Data) { let adapter_data = downcast_ref(adapter_data); - Context::adapter_drop(self, &adapter, adapter_data) + Context::adapter_drop(self, adapter_data) } - fn buffer_destroy(&self, buffer: &ObjectId, buffer_data: &crate::Data) { - let buffer = ::from(*buffer); + fn buffer_destroy(&self, buffer_data: &crate::Data) { let buffer_data = downcast_ref(buffer_data); - Context::buffer_destroy(self, &buffer, buffer_data) + Context::buffer_destroy(self, buffer_data) } - fn buffer_drop(&self, buffer: &ObjectId, buffer_data: &crate::Data) { - let buffer = ::from(*buffer); + fn buffer_drop(&self, buffer_data: &crate::Data) { let buffer_data = downcast_ref(buffer_data); - Context::buffer_drop(self, &buffer, buffer_data) + Context::buffer_drop(self, buffer_data) } - fn texture_destroy(&self, texture: &ObjectId, texture_data: &crate::Data) { - let texture = ::from(*texture); + fn texture_destroy(&self, texture_data: &crate::Data) { let texture_data = downcast_ref(texture_data); - Context::texture_destroy(self, &texture, texture_data) + Context::texture_destroy(self, texture_data) } - fn texture_drop(&self, texture: &ObjectId, texture_data: &crate::Data) { - let texture = ::from(*texture); + fn texture_drop(&self, texture_data: &crate::Data) { let texture_data = downcast_ref(texture_data); - Context::texture_drop(self, &texture, texture_data) + Context::texture_drop(self, texture_data) } - fn texture_view_drop(&self, texture_view: &ObjectId, texture_view_data: &crate::Data) { - let texture_view = ::from(*texture_view); + fn texture_view_drop(&self, texture_view_data: &crate::Data) { let texture_view_data = downcast_ref(texture_view_data); - Context::texture_view_drop(self, &texture_view, texture_view_data) + Context::texture_view_drop(self, texture_view_data) } - fn sampler_drop(&self, sampler: &ObjectId, sampler_data: &crate::Data) { - let sampler = ::from(*sampler); + fn sampler_drop(&self, sampler_data: &crate::Data) { let sampler_data = downcast_ref(sampler_data); - Context::sampler_drop(self, &sampler, sampler_data) + Context::sampler_drop(self, sampler_data) } - fn query_set_drop(&self, query_set: &ObjectId, query_set_data: &crate::Data) { - let query_set = ::from(*query_set); + fn query_set_drop(&self, query_set_data: &crate::Data) { let query_set_data = downcast_ref(query_set_data); - Context::query_set_drop(self, &query_set, query_set_data) + Context::query_set_drop(self, query_set_data) } - fn bind_group_drop(&self, bind_group: &ObjectId, bind_group_data: &crate::Data) { - let bind_group = ::from(*bind_group); + fn bind_group_drop(&self, bind_group_data: &crate::Data) { let bind_group_data = downcast_ref(bind_group_data); - Context::bind_group_drop(self, &bind_group, bind_group_data) + Context::bind_group_drop(self, bind_group_data) } - fn bind_group_layout_drop( - &self, - bind_group_layout: &ObjectId, - bind_group_layout_data: &crate::Data, - ) { - let bind_group_layout = ::from(*bind_group_layout); + fn bind_group_layout_drop(&self, bind_group_layout_data: &crate::Data) { let bind_group_layout_data = downcast_ref(bind_group_layout_data); - Context::bind_group_layout_drop(self, &bind_group_layout, bind_group_layout_data) + Context::bind_group_layout_drop(self, bind_group_layout_data) } - fn pipeline_layout_drop(&self, pipeline_layout: &ObjectId, pipeline_layout_data: &crate::Data) { - let pipeline_layout = ::from(*pipeline_layout); + fn pipeline_layout_drop(&self, pipeline_layout_data: &crate::Data) { let pipeline_layout_data = downcast_ref(pipeline_layout_data); - Context::pipeline_layout_drop(self, &pipeline_layout, pipeline_layout_data) + Context::pipeline_layout_drop(self, pipeline_layout_data) } - fn shader_module_drop(&self, shader_module: &ObjectId, shader_module_data: &crate::Data) { - let shader_module = ::from(*shader_module); + fn shader_module_drop(&self, shader_module_data: &crate::Data) { let shader_module_data = downcast_ref(shader_module_data); - Context::shader_module_drop(self, &shader_module, shader_module_data) + Context::shader_module_drop(self, shader_module_data) } - fn command_encoder_drop(&self, command_encoder: &ObjectId, command_encoder_data: &crate::Data) { - let command_encoder = ::from(*command_encoder); + fn command_encoder_drop(&self, command_encoder_data: &crate::Data) { let command_encoder_data = downcast_ref(command_encoder_data); - Context::command_encoder_drop(self, &command_encoder, command_encoder_data) + Context::command_encoder_drop(self, command_encoder_data) } - fn command_buffer_drop(&self, command_buffer: &ObjectId, command_buffer_data: &crate::Data) { - let command_buffer = ::from(*command_buffer); + fn command_buffer_drop(&self, command_buffer_data: &crate::Data) { let command_buffer_data = downcast_ref(command_buffer_data); - Context::command_buffer_drop(self, &command_buffer, command_buffer_data) + Context::command_buffer_drop(self, command_buffer_data) } - fn render_bundle_drop(&self, render_bundle: &ObjectId, render_bundle_data: &crate::Data) { - let render_bundle = ::from(*render_bundle); + fn render_bundle_drop(&self, render_bundle_data: &crate::Data) { let render_bundle_data = downcast_ref(render_bundle_data); - Context::render_bundle_drop(self, &render_bundle, render_bundle_data) + Context::render_bundle_drop(self, render_bundle_data) } - fn compute_pipeline_drop(&self, pipeline: &ObjectId, pipeline_data: &crate::Data) { - let pipeline = ::from(*pipeline); + fn compute_pipeline_drop(&self, pipeline_data: &crate::Data) { let pipeline_data = downcast_ref(pipeline_data); - Context::compute_pipeline_drop(self, &pipeline, pipeline_data) + Context::compute_pipeline_drop(self, pipeline_data) } - fn render_pipeline_drop(&self, pipeline: &ObjectId, pipeline_data: &crate::Data) { - let pipeline = ::from(*pipeline); + fn render_pipeline_drop(&self, pipeline_data: &crate::Data) { let pipeline_data = downcast_ref(pipeline_data); - Context::render_pipeline_drop(self, &pipeline, pipeline_data) + Context::render_pipeline_drop(self, pipeline_data) } - fn pipeline_cache_drop(&self, cache: &ObjectId, cache_data: &crate::Data) { - let cache = ::from(*cache); + fn pipeline_cache_drop(&self, cache_data: &crate::Data) { let cache_data = downcast_ref(cache_data); - Context::pipeline_cache_drop(self, &cache, cache_data) + Context::pipeline_cache_drop(self, cache_data) } fn compute_pipeline_get_bind_group_layout( &self, - pipeline: &ObjectId, pipeline_data: &crate::Data, index: u32, - ) -> (ObjectId, Box) { - let pipeline = ::from(*pipeline); + ) -> Box { let pipeline_data = downcast_ref(pipeline_data); - let (bind_group_layout, data) = - Context::compute_pipeline_get_bind_group_layout(self, &pipeline, pipeline_data, index); - (bind_group_layout.into(), Box::new(data) as _) + let data = Context::compute_pipeline_get_bind_group_layout(self, pipeline_data, index); + Box::new(data) as _ } fn render_pipeline_get_bind_group_layout( &self, - pipeline: &ObjectId, pipeline_data: &crate::Data, index: u32, - ) -> (ObjectId, Box) { - let pipeline = ::from(*pipeline); + ) -> Box { let pipeline_data = downcast_ref(pipeline_data); - let (bind_group_layout, data) = - Context::render_pipeline_get_bind_group_layout(self, &pipeline, pipeline_data, index); - (bind_group_layout.into(), Box::new(data) as _) + let data = Context::render_pipeline_get_bind_group_layout(self, pipeline_data, index); + Box::new(data) as _ } fn command_encoder_copy_buffer_to_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - source: &ObjectId, source_data: &crate::Data, source_offset: BufferAddress, - destination: &ObjectId, destination_data: &crate::Data, destination_offset: BufferAddress, copy_size: BufferAddress, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - let source = ::from(*source); let source_data = downcast_ref(source_data); - let destination = ::from(*destination); let destination_data = downcast_ref(destination_data); Context::command_encoder_copy_buffer_to_buffer( self, - &encoder, encoder_data, - &source, source_data, source_offset, - &destination, destination_data, destination_offset, copy_size, @@ -2730,17 +1866,14 @@ where fn command_encoder_copy_buffer_to_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyBuffer<'_>, destination: ImageCopyTexture<'_>, copy_size: Extent3d, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); Context::command_encoder_copy_buffer_to_texture( self, - &encoder, encoder_data, source, destination, @@ -2750,17 +1883,14 @@ where fn command_encoder_copy_texture_to_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyTexture<'_>, destination: ImageCopyBuffer<'_>, copy_size: Extent3d, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); Context::command_encoder_copy_texture_to_buffer( self, - &encoder, encoder_data, source, destination, @@ -2770,17 +1900,14 @@ where fn command_encoder_copy_texture_to_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, source: ImageCopyTexture<'_>, destination: ImageCopyTexture<'_>, copy_size: Extent3d, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); Context::command_encoder_copy_texture_to_texture( self, - &encoder, encoder_data, source, destination, @@ -2790,148 +1917,96 @@ where fn command_encoder_begin_compute_pass( &self, - encoder: &ObjectId, encoder_data: &crate::Data, desc: &ComputePassDescriptor<'_>, - ) -> (ObjectId, Box) { - let encoder = ::from(*encoder); + ) -> Box { let encoder_data = downcast_ref(encoder_data); - let (compute_pass, data) = - Context::command_encoder_begin_compute_pass(self, &encoder, encoder_data, desc); - (compute_pass.into(), Box::new(data) as _) + let data = Context::command_encoder_begin_compute_pass(self, encoder_data, desc); + Box::new(data) as _ } fn command_encoder_begin_render_pass( &self, - encoder: &ObjectId, encoder_data: &crate::Data, desc: &RenderPassDescriptor<'_>, - ) -> (ObjectId, Box) { - let encoder = ::from(*encoder); + ) -> Box { let encoder_data = downcast_ref(encoder_data); - let (render_pass, data) = - Context::command_encoder_begin_render_pass(self, &encoder, encoder_data, desc); - (render_pass.into(), Box::new(data) as _) + let data = Context::command_encoder_begin_render_pass(self, encoder_data, desc); + Box::new(data) as _ } - fn command_encoder_finish( - &self, - encoder: ObjectId, - encoder_data: &mut crate::Data, - ) -> (ObjectId, Box) { - let (command_buffer, data) = - Context::command_encoder_finish(self, encoder.into(), downcast_mut(encoder_data)); - (command_buffer.into(), Box::new(data) as _) + fn command_encoder_finish(&self, encoder_data: &mut crate::Data) -> Box { + let data = Context::command_encoder_finish(self, downcast_mut(encoder_data)); + Box::new(data) as _ } fn command_encoder_clear_texture( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - texture: &Texture, + texture_data: &crate::Data, subresource_range: &ImageSubresourceRange, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_clear_texture( - self, - &encoder, - encoder_data, - texture, - subresource_range, - ) + let texture_data = downcast_ref(texture_data); + Context::command_encoder_clear_texture(self, encoder_data, texture_data, subresource_range) } fn command_encoder_clear_buffer( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - buffer: &Buffer, + buffer_data: &crate::Data, offset: BufferAddress, size: Option, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_clear_buffer(self, &encoder, encoder_data, buffer, offset, size) + let buffer_data = downcast_ref(buffer_data); + Context::command_encoder_clear_buffer(self, encoder_data, buffer_data, offset, size) } - fn command_encoder_insert_debug_marker( - &self, - encoder: &ObjectId, - encoder_data: &crate::Data, - label: &str, - ) { - let encoder = ::from(*encoder); + fn command_encoder_insert_debug_marker(&self, encoder_data: &crate::Data, label: &str) { let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_insert_debug_marker(self, &encoder, encoder_data, label) + Context::command_encoder_insert_debug_marker(self, encoder_data, label) } - fn command_encoder_push_debug_group( - &self, - encoder: &ObjectId, - encoder_data: &crate::Data, - label: &str, - ) { - let encoder = ::from(*encoder); + fn command_encoder_push_debug_group(&self, encoder_data: &crate::Data, label: &str) { let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_push_debug_group(self, &encoder, encoder_data, label) + Context::command_encoder_push_debug_group(self, encoder_data, label) } - fn command_encoder_pop_debug_group(&self, encoder: &ObjectId, encoder_data: &crate::Data) { - let encoder = ::from(*encoder); + fn command_encoder_pop_debug_group(&self, encoder_data: &crate::Data) { let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_pop_debug_group(self, &encoder, encoder_data) + Context::command_encoder_pop_debug_group(self, encoder_data) } fn command_encoder_write_timestamp( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); - Context::command_encoder_write_timestamp( - self, - &encoder, - encoder_data, - &query_set, - query_set_data, - query_index, - ) + Context::command_encoder_write_timestamp(self, encoder_data, query_set_data, query_index) } fn command_encoder_resolve_query_set( &self, - encoder: &ObjectId, encoder_data: &crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, first_query: u32, query_count: u32, - destination: &ObjectId, destination_data: &crate::Data, destination_offset: BufferAddress, ) { - let encoder = ::from(*encoder); let encoder_data = downcast_ref(encoder_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); - let destination = ::from(*destination); let destination_data = downcast_ref(destination_data); Context::command_encoder_resolve_query_set( self, - &encoder, encoder_data, - &query_set, query_set_data, first_query, query_count, - &destination, destination_data, destination_offset, ) @@ -2939,430 +2014,275 @@ where fn render_bundle_encoder_finish( &self, - encoder: ObjectId, encoder_data: Box, desc: &RenderBundleDescriptor<'_>, - ) -> (ObjectId, Box) { + ) -> Box { let encoder_data = *encoder_data.downcast().unwrap(); - let (render_bundle, data) = - Context::render_bundle_encoder_finish(self, encoder.into(), encoder_data, desc); - (render_bundle.into(), Box::new(data) as _) + let data = Context::render_bundle_encoder_finish(self, encoder_data, desc); + Box::new(data) as _ } fn queue_write_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, data: &[u8], ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::queue_write_buffer(self, &queue, queue_data, &buffer, buffer_data, offset, data) + Context::queue_write_buffer(self, queue_data, buffer_data, offset, data) } fn queue_validate_write_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: wgt::BufferAddress, size: wgt::BufferSize, ) -> Option<()> { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::queue_validate_write_buffer( - self, - &queue, - queue_data, - &buffer, - buffer_data, - offset, - size, - ) + Context::queue_validate_write_buffer(self, queue_data, buffer_data, offset, size) } fn queue_create_staging_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, size: BufferSize, ) -> Option> { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - Context::queue_create_staging_buffer(self, &queue, queue_data, size) + Context::queue_create_staging_buffer(self, queue_data, size) } fn queue_write_staging_buffer( &self, - queue: &ObjectId, queue_data: &crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, staging_buffer: &dyn QueueWriteBuffer, ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::queue_write_staging_buffer( - self, - &queue, - queue_data, - &buffer, - buffer_data, - offset, - staging_buffer, - ) + Context::queue_write_staging_buffer(self, queue_data, buffer_data, offset, staging_buffer) } fn queue_write_texture( &self, - queue: &ObjectId, queue_data: &crate::Data, texture: ImageCopyTexture<'_>, data: &[u8], data_layout: ImageDataLayout, size: Extent3d, ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - Context::queue_write_texture(self, &queue, queue_data, texture, data, data_layout, size) + Context::queue_write_texture(self, queue_data, texture, data, data_layout, size) } #[cfg(any(webgpu, webgl))] fn queue_copy_external_image_to_texture( &self, - queue: &ObjectId, queue_data: &crate::Data, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged<'_>, size: wgt::Extent3d, ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - Context::queue_copy_external_image_to_texture(self, &queue, queue_data, source, dest, size) + Context::queue_copy_external_image_to_texture(self, queue_data, source, dest, size) } fn queue_submit( &self, - queue: &ObjectId, queue_data: &crate::Data, - command_buffers: &mut dyn Iterator)>, + command_buffers: &mut dyn Iterator>, ) -> Arc { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - let command_buffers = command_buffers.map(|(id, data)| { - let command_buffer_data: ::CommandBufferData = *data.downcast().unwrap(); - (::from(id), command_buffer_data) - }); - let data = Context::queue_submit(self, &queue, queue_data, command_buffers); + let command_buffers = command_buffers.map(|data| *data.downcast().unwrap()); + let data = Context::queue_submit(self, queue_data, command_buffers); Arc::new(data) as _ } - fn queue_get_timestamp_period(&self, queue: &ObjectId, queue_data: &crate::Data) -> f32 { - let queue = ::from(*queue); + fn queue_get_timestamp_period(&self, queue_data: &crate::Data) -> f32 { let queue_data = downcast_ref(queue_data); - Context::queue_get_timestamp_period(self, &queue, queue_data) + Context::queue_get_timestamp_period(self, queue_data) } fn queue_on_submitted_work_done( &self, - queue: &ObjectId, queue_data: &crate::Data, callback: SubmittedWorkDoneCallback, ) { - let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - Context::queue_on_submitted_work_done(self, &queue, queue_data, callback) + Context::queue_on_submitted_work_done(self, queue_data, callback) } - fn device_start_capture(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); + fn device_start_capture(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); - Context::device_start_capture(self, &device, device_data) + Context::device_start_capture(self, device_data) } - fn device_stop_capture(&self, device: &ObjectId, device_data: &crate::Data) { - let device = ::from(*device); + fn device_stop_capture(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); - Context::device_stop_capture(self, &device, device_data) + Context::device_stop_capture(self, device_data) } - fn device_get_internal_counters( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> wgt::InternalCounters { - let device = ::from(*device); + fn device_get_internal_counters(&self, device_data: &crate::Data) -> wgt::InternalCounters { let device_data = downcast_ref(device_data); - Context::device_get_internal_counters(self, &device, device_data) + Context::device_get_internal_counters(self, device_data) } - fn generate_allocator_report( - &self, - device: &ObjectId, - device_data: &crate::Data, - ) -> Option { - let device = ::from(*device); + fn generate_allocator_report(&self, device_data: &crate::Data) -> Option { let device_data = downcast_ref(device_data); - Context::device_generate_allocator_report(self, &device, device_data) + Context::device_generate_allocator_report(self, device_data) } - fn pipeline_cache_get_data( - &self, - cache: &ObjectId, - cache_data: &crate::Data, - ) -> Option> { - let cache = ::from(*cache); + fn pipeline_cache_get_data(&self, cache_data: &crate::Data) -> Option> { let cache_data = downcast_ref::(cache_data); - Context::pipeline_cache_get_data(self, &cache, cache_data) + Context::pipeline_cache_get_data(self, cache_data) } - fn compute_pass_set_pipeline( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - pipeline: &ObjectId, - pipeline_data: &crate::Data, - ) { - let mut pass = ::from(*pass); + fn compute_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data) { let pass_data = downcast_mut::(pass_data); - let pipeline = ::from(*pipeline); let pipeline_data = downcast_ref(pipeline_data); - Context::compute_pass_set_pipeline(self, &mut pass, pass_data, &pipeline, pipeline_data) + Context::compute_pass_set_pipeline(self, pass_data, pipeline_data) } fn compute_pass_set_bind_group( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, - bind_group_data: &crate::Data, + bind_group_data: Option<&crate::Data>, offsets: &[DynamicOffset], ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let bind_group = ::from(*bind_group); - let bind_group_data = downcast_ref(bind_group_data); - Context::compute_pass_set_bind_group( - self, - &mut pass, - pass_data, - index, - &bind_group, - bind_group_data, - offsets, - ) + let bg = bind_group_data.map(downcast_ref); + Context::compute_pass_set_bind_group(self, pass_data, index, bg, offsets) } fn compute_pass_set_push_constants( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, offset: u32, data: &[u8], ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::compute_pass_set_push_constants(self, &mut pass, pass_data, offset, data) + Context::compute_pass_set_push_constants(self, pass_data, offset, data) } - fn compute_pass_insert_debug_marker( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - label: &str, - ) { - let mut pass = ::from(*pass); + fn compute_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str) { let pass_data = downcast_mut::(pass_data); - Context::compute_pass_insert_debug_marker(self, &mut pass, pass_data, label) + Context::compute_pass_insert_debug_marker(self, pass_data, label) } - fn compute_pass_push_debug_group( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - group_label: &str, - ) { - let mut pass = ::from(*pass); + fn compute_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str) { let pass_data = downcast_mut::(pass_data); - Context::compute_pass_push_debug_group(self, &mut pass, pass_data, group_label) + Context::compute_pass_push_debug_group(self, pass_data, group_label) } - fn compute_pass_pop_debug_group(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn compute_pass_pop_debug_group(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::compute_pass_pop_debug_group(self, &mut pass, pass_data) + Context::compute_pass_pop_debug_group(self, pass_data) } fn compute_pass_write_timestamp( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); - Context::compute_pass_write_timestamp( - self, - &mut pass, - pass_data, - &query_set, - query_set_data, - query_index, - ) + Context::compute_pass_write_timestamp(self, pass_data, query_set_data, query_index) } fn compute_pass_begin_pipeline_statistics_query( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); Context::compute_pass_begin_pipeline_statistics_query( self, - &mut pass, pass_data, - &query_set, query_set_data, query_index, ) } - fn compute_pass_end_pipeline_statistics_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - ) { - let mut pass = ::from(*pass); + fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::compute_pass_end_pipeline_statistics_query(self, &mut pass, pass_data) + Context::compute_pass_end_pipeline_statistics_query(self, pass_data) } fn compute_pass_dispatch_workgroups( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: u32, y: u32, z: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::compute_pass_dispatch_workgroups(self, &mut pass, pass_data, x, y, z) + Context::compute_pass_dispatch_workgroups(self, pass_data, x, y, z) } fn compute_pass_dispatch_workgroups_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::compute_pass_dispatch_workgroups_indirect( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, ) } - fn compute_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn compute_pass_end(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut(pass_data); - Context::compute_pass_end(self, &mut pass, pass_data) + Context::compute_pass_end(self, pass_data) } fn render_bundle_encoder_set_pipeline( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - pipeline: &ObjectId, pipeline_data: &crate::Data, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let pipeline = ::from(*pipeline); let pipeline_data = downcast_ref(pipeline_data); - Context::render_bundle_encoder_set_pipeline( - self, - &mut encoder, - encoder_data, - &pipeline, - pipeline_data, - ) + Context::render_bundle_encoder_set_pipeline(self, encoder_data, pipeline_data) } fn render_bundle_encoder_set_bind_group( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, - bind_group_data: &crate::Data, + bind_group_data: Option<&crate::Data>, offsets: &[DynamicOffset], ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let bind_group = ::from(*bind_group); - let bind_group_data = downcast_ref(bind_group_data); - Context::render_bundle_encoder_set_bind_group( - self, - &mut encoder, - encoder_data, - index, - &bind_group, - bind_group_data, - offsets, - ) + let bg = bind_group_data.map(downcast_ref); + Context::render_bundle_encoder_set_bind_group(self, encoder_data, index, bg, offsets) } fn render_bundle_encoder_set_index_buffer( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, index_format: IndexFormat, offset: BufferAddress, size: Option, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); Context::render_bundle_encoder_set_index_buffer( self, - &mut encoder, encoder_data, - &buffer, buffer_data, index_format, offset, @@ -3372,24 +2292,18 @@ where fn render_bundle_encoder_set_vertex_buffer( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, slot: u32, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, size: Option, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); Context::render_bundle_encoder_set_vertex_buffer( self, - &mut encoder, encoder_data, slot, - &buffer, buffer_data, offset, size, @@ -3398,49 +2312,35 @@ where fn render_bundle_encoder_set_push_constants( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, stages: ShaderStages, offset: u32, data: &[u8], ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - Context::render_bundle_encoder_set_push_constants( - self, - &mut encoder, - encoder_data, - stages, - offset, - data, - ) + Context::render_bundle_encoder_set_push_constants(self, encoder_data, stages, offset, data) } fn render_bundle_encoder_draw( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, vertices: Range, instances: Range, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - Context::render_bundle_encoder_draw(self, &mut encoder, encoder_data, vertices, instances) + Context::render_bundle_encoder_draw(self, encoder_data, vertices, instances) } fn render_bundle_encoder_draw_indexed( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, indices: Range, base_vertex: i32, instances: Range, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); Context::render_bundle_encoder_draw_indexed( self, - &mut encoder, encoder_data, indices, base_vertex, @@ -3450,21 +2350,15 @@ where fn render_bundle_encoder_draw_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_bundle_encoder_draw_indirect( self, - &mut encoder, encoder_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, ) @@ -3472,195 +2366,51 @@ where fn render_bundle_encoder_draw_indexed_indirect( &self, - encoder: &mut ObjectId, encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut encoder = ::from(*encoder); let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_bundle_encoder_draw_indexed_indirect( self, - &mut encoder, - encoder_data, - &indirect_buffer, - indirect_buffer_data, - indirect_offset, - ) - } - - fn render_bundle_encoder_multi_draw_indirect( - &self, - encoder: &mut ObjectId, - encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count: u32, - ) { - let mut encoder = ::from(*encoder); - let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_bundle_encoder_multi_draw_indirect( - self, - &mut encoder, - encoder_data, - &indirect_buffer, - indirect_buffer_data, - indirect_offset, - count, - ) - } - - fn render_bundle_encoder_multi_draw_indexed_indirect( - &self, - encoder: &mut ObjectId, - encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count: u32, - ) { - let mut encoder = ::from(*encoder); - let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_bundle_encoder_multi_draw_indexed_indirect( - self, - &mut encoder, - encoder_data, - &indirect_buffer, - indirect_buffer_data, - indirect_offset, - count, - ) - } - - fn render_bundle_encoder_multi_draw_indirect_count( - &self, - encoder: &mut ObjectId, - encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count_buffer: &ObjectId, - count_buffer_data: &crate::Data, - count_buffer_offset: BufferAddress, - max_count: u32, - ) { - let mut encoder = ::from(*encoder); - let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer = ::from(*count_buffer); - let count_buffer_data = downcast_ref(count_buffer_data); - Context::render_bundle_encoder_multi_draw_indirect_count( - self, - &mut encoder, encoder_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, - &count_buffer, - count_buffer_data, - count_buffer_offset, - max_count, ) } - fn render_bundle_encoder_multi_draw_indexed_indirect_count( - &self, - encoder: &mut ObjectId, - encoder_data: &mut crate::Data, - indirect_buffer: &ObjectId, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count_buffer: &ObjectId, - count_buffer_data: &crate::Data, - count_buffer_offset: BufferAddress, - max_count: u32, - ) { - let mut encoder = ::from(*encoder); - let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer = ::from(*indirect_buffer); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer = ::from(*count_buffer); - let count_buffer_data = downcast_ref(count_buffer_data); - Context::render_bundle_encoder_multi_draw_indexed_indirect_count( - self, - &mut encoder, - encoder_data, - &indirect_buffer, - indirect_buffer_data, - indirect_offset, - &count_buffer, - count_buffer_data, - count_buffer_offset, - max_count, - ) - } - - fn render_pass_set_pipeline( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - pipeline: &ObjectId, - pipeline_data: &crate::Data, - ) { - let mut pass = ::from(*pass); + fn render_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data) { let pass_data = downcast_mut::(pass_data); - let pipeline = ::from(*pipeline); let pipeline_data = downcast_ref(pipeline_data); - Context::render_pass_set_pipeline(self, &mut pass, pass_data, &pipeline, pipeline_data) + Context::render_pass_set_pipeline(self, pass_data, pipeline_data) } fn render_pass_set_bind_group( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, index: u32, - bind_group: &ObjectId, - bind_group_data: &crate::Data, + bind_group_data: Option<&crate::Data>, offsets: &[DynamicOffset], ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let bind_group = ::from(*bind_group); - let bind_group_data = downcast_ref(bind_group_data); - Context::render_pass_set_bind_group( - self, - &mut pass, - pass_data, - index, - &bind_group, - bind_group_data, - offsets, - ) + let bg = bind_group_data.map(downcast_ref); + Context::render_pass_set_bind_group(self, pass_data, index, bg, offsets) } fn render_pass_set_index_buffer( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - buffer: &ObjectId, buffer_data: &crate::Data, index_format: IndexFormat, offset: BufferAddress, size: Option, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); Context::render_pass_set_index_buffer( self, - &mut pass, pass_data, - &buffer, buffer_data, index_format, offset, @@ -3670,114 +2420,71 @@ where fn render_pass_set_vertex_buffer( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, slot: u32, - buffer: &ObjectId, buffer_data: &crate::Data, offset: BufferAddress, size: Option, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let buffer = ::from(*buffer); let buffer_data = downcast_ref(buffer_data); - Context::render_pass_set_vertex_buffer( - self, - &mut pass, - pass_data, - slot, - &buffer, - buffer_data, - offset, - size, - ) + Context::render_pass_set_vertex_buffer(self, pass_data, slot, buffer_data, offset, size) } fn render_pass_set_push_constants( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, stages: ShaderStages, offset: u32, data: &[u8], ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_push_constants(self, &mut pass, pass_data, stages, offset, data) + Context::render_pass_set_push_constants(self, pass_data, stages, offset, data) } fn render_pass_draw( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, vertices: Range, instances: Range, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::render_pass_draw(self, &mut pass, pass_data, vertices, instances) + Context::render_pass_draw(self, pass_data, vertices, instances) } fn render_pass_draw_indexed( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, indices: Range, base_vertex: i32, instances: Range, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::render_pass_draw_indexed( - self, - &mut pass, - pass_data, - indices, - base_vertex, - instances, - ) + Context::render_pass_draw_indexed(self, pass_data, indices, base_vertex, instances) } fn render_pass_draw_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_pass_draw_indirect( - self, - &mut pass, - pass_data, - &indirect_buffer, - indirect_buffer_data, - indirect_offset, - ) + Context::render_pass_draw_indirect(self, pass_data, indirect_buffer_data, indirect_offset) } fn render_pass_draw_indexed_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_pass_draw_indexed_indirect( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, ) @@ -3785,22 +2492,16 @@ where fn render_pass_multi_draw_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_pass_multi_draw_indirect( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, count, @@ -3809,22 +2510,16 @@ where fn render_pass_multi_draw_indexed_indirect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, count: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); Context::render_pass_multi_draw_indexed_indirect( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, count, @@ -3833,30 +2528,21 @@ where fn render_pass_multi_draw_indirect_count( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, count_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer = ::from(*count_buffer); let count_buffer_data = downcast_ref(count_buffer_data); Context::render_pass_multi_draw_indirect_count( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, - &count_buffer, count_buffer_data, count_buffer_offset, max_count, @@ -3865,64 +2551,46 @@ where fn render_pass_multi_draw_indexed_indirect_count( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - indirect_buffer: &ObjectId, indirect_buffer_data: &crate::Data, indirect_offset: BufferAddress, - count_buffer: &ObjectId, count_buffer_data: &crate::Data, count_buffer_offset: BufferAddress, max_count: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let indirect_buffer = ::from(*indirect_buffer); let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer = ::from(*count_buffer); let count_buffer_data = downcast_ref(count_buffer_data); Context::render_pass_multi_draw_indexed_indirect_count( self, - &mut pass, pass_data, - &indirect_buffer, indirect_buffer_data, indirect_offset, - &count_buffer, count_buffer_data, count_buffer_offset, max_count, ) } - fn render_pass_set_blend_constant( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - color: Color, - ) { - let mut pass = ::from(*pass); + fn render_pass_set_blend_constant(&self, pass_data: &mut crate::Data, color: Color) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_blend_constant(self, &mut pass, pass_data, color) + Context::render_pass_set_blend_constant(self, pass_data, color) } fn render_pass_set_scissor_rect( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: u32, y: u32, width: u32, height: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_scissor_rect(self, &mut pass, pass_data, x, y, width, height) + Context::render_pass_set_scissor_rect(self, pass_data, x, y, width, height) } fn render_pass_set_viewport( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, x: f32, y: f32, @@ -3931,142 +2599,87 @@ where min_depth: f32, max_depth: f32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); Context::render_pass_set_viewport( - self, &mut pass, pass_data, x, y, width, height, min_depth, max_depth, + self, pass_data, x, y, width, height, min_depth, max_depth, ) } - fn render_pass_set_stencil_reference( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - reference: u32, - ) { - let mut pass = ::from(*pass); + fn render_pass_set_stencil_reference(&self, pass_data: &mut crate::Data, reference: u32) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_stencil_reference(self, &mut pass, pass_data, reference) + Context::render_pass_set_stencil_reference(self, pass_data, reference) } - fn render_pass_insert_debug_marker( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - label: &str, - ) { - let mut pass = ::from(*pass); + fn render_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_insert_debug_marker(self, &mut pass, pass_data, label) + Context::render_pass_insert_debug_marker(self, pass_data, label) } - fn render_pass_push_debug_group( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - group_label: &str, - ) { - let mut pass = ::from(*pass); + fn render_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_push_debug_group(self, &mut pass, pass_data, group_label) + Context::render_pass_push_debug_group(self, pass_data, group_label) } - fn render_pass_pop_debug_group(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn render_pass_pop_debug_group(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_pop_debug_group(self, &mut pass, pass_data) + Context::render_pass_pop_debug_group(self, pass_data) } fn render_pass_write_timestamp( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); - Context::render_pass_write_timestamp( - self, - &mut pass, - pass_data, - &query_set, - query_set_data, - query_index, - ) + Context::render_pass_write_timestamp(self, pass_data, query_set_data, query_index) } - fn render_pass_begin_occlusion_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - query_index: u32, - ) { - let mut pass = ::from(*pass); + fn render_pass_begin_occlusion_query(&self, pass_data: &mut crate::Data, query_index: u32) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_begin_occlusion_query(self, &mut pass, pass_data, query_index) + Context::render_pass_begin_occlusion_query(self, pass_data, query_index) } - fn render_pass_end_occlusion_query(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn render_pass_end_occlusion_query(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_end_occlusion_query(self, &mut pass, pass_data) + Context::render_pass_end_occlusion_query(self, pass_data) } fn render_pass_begin_pipeline_statistics_query( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - query_set: &ObjectId, query_set_data: &crate::Data, query_index: u32, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let query_set = ::from(*query_set); let query_set_data = downcast_ref(query_set_data); Context::render_pass_begin_pipeline_statistics_query( self, - &mut pass, pass_data, - &query_set, query_set_data, query_index, ) } - fn render_pass_end_pipeline_statistics_query( - &self, - pass: &mut ObjectId, - pass_data: &mut crate::Data, - ) { - let mut pass = ::from(*pass); + fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut::(pass_data); - Context::render_pass_end_pipeline_statistics_query(self, &mut pass, pass_data) + Context::render_pass_end_pipeline_statistics_query(self, pass_data) } fn render_pass_execute_bundles( &self, - pass: &mut ObjectId, pass_data: &mut crate::Data, - render_bundles: &mut dyn Iterator, + render_bundles: &mut dyn Iterator, ) { - let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let mut render_bundles = render_bundles.map(|(id, data)| { - let render_bundle_data: &::RenderBundleData = downcast_ref(data); - (::from(*id), render_bundle_data) - }); - Context::render_pass_execute_bundles(self, &mut pass, pass_data, &mut render_bundles) + let mut render_bundles = render_bundles.map(downcast_ref); + Context::render_pass_execute_bundles(self, pass_data, &mut render_bundles) } - fn render_pass_end(&self, pass: &mut ObjectId, pass_data: &mut crate::Data) { - let mut pass = ::from(*pass); + fn render_pass_end(&self, pass_data: &mut crate::Data) { let pass_data = downcast_mut(pass_data); - Context::render_pass_end(self, &mut pass, pass_data) + Context::render_pass_end(self, pass_data) } } @@ -4075,6 +2688,7 @@ pub trait QueueWriteBuffer: WasmNotSendSync + Debug { fn slice_mut(&mut self) -> &mut [u8]; + #[cfg(not(target_os = "emscripten"))] fn as_any(&self) -> &dyn Any; } diff --git a/wgpu/src/macros.rs b/wgpu/src/macros.rs index 594388528f..db9548d90c 100644 --- a/wgpu/src/macros.rs +++ b/wgpu/src/macros.rs @@ -32,12 +32,14 @@ macro_rules! vertex_attr_array { #[test] fn test_vertex_attr_array() { + use std::mem::size_of; + let attrs = vertex_attr_array![0 => Float32x2, 3 => Uint16x4]; // VertexAttribute does not support PartialEq, so we cannot test directly assert_eq!(attrs.len(), 2); assert_eq!(attrs[0].offset, 0); assert_eq!(attrs[0].shader_location, 0); - assert_eq!(attrs[1].offset, std::mem::size_of::<(f32, f32)>() as u64); + assert_eq!(attrs[1].offset, size_of::<(f32, f32)>() as u64); assert_eq!(attrs[1].shader_location, 3); } diff --git a/wgpu/src/util/encoder.rs b/wgpu/src/util/encoder.rs index bef8fe9509..c6d42e3eba 100644 --- a/wgpu/src/util/encoder.rs +++ b/wgpu/src/util/encoder.rs @@ -10,7 +10,12 @@ pub trait RenderEncoder<'a> { /// in the active pipeline when any `draw()` function is called must match the layout of this bind group. /// /// If the bind group have dynamic offsets, provide them in order of their declaration. - fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]); + fn set_bind_group( + &mut self, + index: u32, + bind_group: Option<&'a BindGroup>, + offsets: &[DynamicOffset], + ); /// Sets the active render pipeline. /// @@ -101,7 +106,12 @@ pub trait RenderEncoder<'a> { impl<'a> RenderEncoder<'a> for RenderPass<'a> { #[inline(always)] - fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]) { + fn set_bind_group( + &mut self, + index: u32, + bind_group: Option<&'a BindGroup>, + offsets: &[DynamicOffset], + ) { Self::set_bind_group(self, index, bind_group, offsets); } @@ -152,7 +162,12 @@ impl<'a> RenderEncoder<'a> for RenderPass<'a> { impl<'a> RenderEncoder<'a> for RenderBundleEncoder<'a> { #[inline(always)] - fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]) { + fn set_bind_group( + &mut self, + index: u32, + bind_group: Option<&'a BindGroup>, + offsets: &[DynamicOffset], + ) { Self::set_bind_group(self, index, bind_group, offsets); } diff --git a/wgpu/src/util/mod.rs b/wgpu/src/util/mod.rs index ff4fb7ecf8..11148179b4 100644 --- a/wgpu/src/util/mod.rs +++ b/wgpu/src/util/mod.rs @@ -125,7 +125,6 @@ impl DownloadBuffer { let mapped_range = crate::context::DynContext::buffer_get_mapped_range( &*download.context, - &download.id, download.data.as_ref(), 0..size, );