|
| 1 | +//! Tests that compute passes take ownership of resources that are associated with. |
| 2 | +//! I.e. once a resource is passed in to a compute pass, it can be dropped. |
| 3 | +//! |
| 4 | +//! TODO: Test doesn't check on timestamp writes & pipeline statistics queries yet. |
| 5 | +//! (Not important as long as they are lifetime constrained to the command encoder, |
| 6 | +//! but once we lift this constraint, we should add tests for this as well!) |
| 7 | +//! TODO: Also should test resource ownership for: |
| 8 | +//! * write_timestamp |
| 9 | +//! * begin_pipeline_statistics_query |
| 10 | +
|
| 11 | +use std::num::NonZeroU64; |
| 12 | + |
| 13 | +use wgpu::util::DeviceExt as _; |
| 14 | +use wgpu_test::{gpu_test, GpuTestConfiguration, TestParameters, TestingContext}; |
| 15 | + |
| 16 | +const SHADER_SRC: &str = " |
| 17 | +@group(0) @binding(0) |
| 18 | +var<storage, read_write> buffer: array<vec4f>; |
| 19 | +
|
| 20 | +@compute @workgroup_size(1, 1, 1) fn main() { |
| 21 | + buffer[0] *= 2.0; |
| 22 | +} |
| 23 | +"; |
| 24 | + |
| 25 | +#[gpu_test] |
| 26 | +static COMPUTE_PASS_RESOURCE_OWNERSHIP: GpuTestConfiguration = GpuTestConfiguration::new() |
| 27 | + .parameters(TestParameters::default().test_features_limits()) |
| 28 | + .run_async(compute_pass_resource_ownership); |
| 29 | + |
| 30 | +async fn compute_pass_resource_ownership(ctx: TestingContext) { |
| 31 | + let ResourceSetup { |
| 32 | + gpu_buffer, |
| 33 | + cpu_buffer, |
| 34 | + buffer_size, |
| 35 | + indirect_buffer, |
| 36 | + bind_group, |
| 37 | + pipeline, |
| 38 | + } = resource_setup(&ctx); |
| 39 | + |
| 40 | + let mut encoder = ctx |
| 41 | + .device |
| 42 | + .create_command_encoder(&wgpu::CommandEncoderDescriptor { |
| 43 | + label: Some("encoder"), |
| 44 | + }); |
| 45 | + |
| 46 | + { |
| 47 | + let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { |
| 48 | + label: Some("compute_pass"), |
| 49 | + timestamp_writes: None, // TODO: See description above, we should test this as well once we lift the lifetime bound. |
| 50 | + }); |
| 51 | + cpass.set_pipeline(&pipeline); |
| 52 | + cpass.set_bind_group(0, &bind_group, &[]); |
| 53 | + cpass.dispatch_workgroups_indirect(&indirect_buffer, 0); |
| 54 | + |
| 55 | + // Now drop all resources we set. Then do a device poll to make sure the resources are really not dropped too early, no matter what. |
| 56 | + drop(pipeline); |
| 57 | + drop(bind_group); |
| 58 | + drop(indirect_buffer); |
| 59 | + ctx.async_poll(wgpu::Maintain::wait()) |
| 60 | + .await |
| 61 | + .panic_on_timeout(); |
| 62 | + } |
| 63 | + |
| 64 | + // Ensure that the compute pass still executed normally. |
| 65 | + encoder.copy_buffer_to_buffer(&gpu_buffer, 0, &cpu_buffer, 0, buffer_size); |
| 66 | + ctx.queue.submit([encoder.finish()]); |
| 67 | + cpu_buffer.slice(..).map_async(wgpu::MapMode::Read, |_| ()); |
| 68 | + ctx.async_poll(wgpu::Maintain::wait()) |
| 69 | + .await |
| 70 | + .panic_on_timeout(); |
| 71 | + |
| 72 | + let data = cpu_buffer.slice(..).get_mapped_range(); |
| 73 | + |
| 74 | + let floats: &[f32] = bytemuck::cast_slice(&data); |
| 75 | + assert_eq!(floats, [2.0, 4.0, 6.0, 8.0]); |
| 76 | +} |
| 77 | + |
| 78 | +// Setup ------------------------------------------------------------ |
| 79 | + |
| 80 | +struct ResourceSetup { |
| 81 | + gpu_buffer: wgpu::Buffer, |
| 82 | + cpu_buffer: wgpu::Buffer, |
| 83 | + buffer_size: u64, |
| 84 | + |
| 85 | + indirect_buffer: wgpu::Buffer, |
| 86 | + bind_group: wgpu::BindGroup, |
| 87 | + pipeline: wgpu::ComputePipeline, |
| 88 | +} |
| 89 | + |
| 90 | +fn resource_setup(ctx: &TestingContext) -> ResourceSetup { |
| 91 | + let sm = ctx |
| 92 | + .device |
| 93 | + .create_shader_module(wgpu::ShaderModuleDescriptor { |
| 94 | + label: Some("shader"), |
| 95 | + source: wgpu::ShaderSource::Wgsl(SHADER_SRC.into()), |
| 96 | + }); |
| 97 | + |
| 98 | + let buffer_size = 4 * std::mem::size_of::<f32>() as u64; |
| 99 | + |
| 100 | + let bgl = ctx |
| 101 | + .device |
| 102 | + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { |
| 103 | + label: Some("bind_group_layout"), |
| 104 | + entries: &[wgpu::BindGroupLayoutEntry { |
| 105 | + binding: 0, |
| 106 | + visibility: wgpu::ShaderStages::COMPUTE, |
| 107 | + ty: wgpu::BindingType::Buffer { |
| 108 | + ty: wgpu::BufferBindingType::Storage { read_only: false }, |
| 109 | + has_dynamic_offset: false, |
| 110 | + min_binding_size: NonZeroU64::new(buffer_size), |
| 111 | + }, |
| 112 | + count: None, |
| 113 | + }], |
| 114 | + }); |
| 115 | + |
| 116 | + let gpu_buffer = ctx |
| 117 | + .device |
| 118 | + .create_buffer_init(&wgpu::util::BufferInitDescriptor { |
| 119 | + label: Some("gpu_buffer"), |
| 120 | + usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_SRC, |
| 121 | + contents: bytemuck::bytes_of(&[1.0_f32, 2.0, 3.0, 4.0]), |
| 122 | + }); |
| 123 | + |
| 124 | + let cpu_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor { |
| 125 | + label: Some("cpu_buffer"), |
| 126 | + size: buffer_size, |
| 127 | + usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, |
| 128 | + mapped_at_creation: false, |
| 129 | + }); |
| 130 | + |
| 131 | + let indirect_buffer = ctx |
| 132 | + .device |
| 133 | + .create_buffer_init(&wgpu::util::BufferInitDescriptor { |
| 134 | + label: Some("gpu_buffer"), |
| 135 | + usage: wgpu::BufferUsages::INDIRECT, |
| 136 | + contents: wgpu::util::DispatchIndirectArgs { x: 1, y: 1, z: 1 }.as_bytes(), |
| 137 | + }); |
| 138 | + |
| 139 | + let bind_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { |
| 140 | + label: Some("bind_group"), |
| 141 | + layout: &bgl, |
| 142 | + entries: &[wgpu::BindGroupEntry { |
| 143 | + binding: 0, |
| 144 | + resource: gpu_buffer.as_entire_binding(), |
| 145 | + }], |
| 146 | + }); |
| 147 | + |
| 148 | + let pipeline_layout = ctx |
| 149 | + .device |
| 150 | + .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { |
| 151 | + label: Some("pipeline_layout"), |
| 152 | + bind_group_layouts: &[&bgl], |
| 153 | + push_constant_ranges: &[], |
| 154 | + }); |
| 155 | + |
| 156 | + let pipeline = ctx |
| 157 | + .device |
| 158 | + .create_compute_pipeline(&wgpu::ComputePipelineDescriptor { |
| 159 | + label: Some("pipeline"), |
| 160 | + layout: Some(&pipeline_layout), |
| 161 | + module: &sm, |
| 162 | + entry_point: "main", |
| 163 | + compilation_options: Default::default(), |
| 164 | + }); |
| 165 | + |
| 166 | + ResourceSetup { |
| 167 | + gpu_buffer, |
| 168 | + cpu_buffer, |
| 169 | + buffer_size, |
| 170 | + indirect_buffer, |
| 171 | + bind_group, |
| 172 | + pipeline, |
| 173 | + } |
| 174 | +} |
0 commit comments