Skip to content

Commit d43a9bf

Browse files
cwfitzgeraldkpreid
andcommitted
[wgpu] add convience functions for deferring mapping/callbacks
Co-authored-by: Kevin Reid <[email protected]>
1 parent 2996c92 commit d43a9bf

File tree

13 files changed

+565
-45
lines changed

13 files changed

+565
-45
lines changed

CHANGELOG.md

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,28 @@ Bottom level categories:
4242

4343
### Major Changes
4444

45+
#### Deferred command buffer actions: `map_buffer_on_submit` and `on_submitted_work_done`
46+
47+
You may schedule buffer mapping and a submission-complete callback to run automatically after you submit, directly from encoders, command buffers, and passes.
48+
49+
```rust
50+
// Record some GPU work so the submission isn't empty and touches `buffer`.
51+
encoder.clear_buffer(&buffer, 0, None);
52+
53+
// Defer mapping until this encoder is submitted.
54+
encoder.map_buffer_on_submit(&buffer, wgpu::MapMode::Read, 0..size, |result| { .. });
55+
56+
// Fires after the command buffer's work is finished.
57+
encoder.on_submitted_work_done(|| { .. });
58+
59+
// Automatically calls `map_async` and `on_submitted_work_done` after this submission finishes.
60+
queue.submit([encoder.finish()]);
61+
```
62+
63+
Available on `CommandEncoder`, `CommandBuffer`, `RenderPass`, and `ComputePass`.
64+
65+
By @cwfitzgerald in [#8125](https://github.com/gfx-rs/wgpu/pull/8125).
66+
4567
#### `EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE` has been merged into `EXPERIMENTAL_RAY_QUERY`
4668

4769
We have merged the acceleration structure feature into the `RayQuery` feature. This is to help work around an AMD driver bug and reduce the feature complexity of ray tracing. In the future when ray tracing pipelines are implemented, if either feature is enabled, acceleration structures will be available.
Lines changed: 261 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,261 @@
1+
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering::SeqCst};
2+
use std::sync::Arc;
3+
4+
/// Helper to create a small mappable buffer for READ tests.
5+
fn make_read_buffer(device: &wgpu::Device, size: u64) -> wgpu::Buffer {
6+
device.create_buffer(&wgpu::BufferDescriptor {
7+
label: Some("read buffer"),
8+
size,
9+
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
10+
mapped_at_creation: false,
11+
})
12+
}
13+
14+
/// map_buffer_on_submit defers mapping until submit, then invokes the callback after polling.
15+
#[test]
16+
fn encoder_map_buffer_on_submit_defers_until_submit() {
17+
let (device, queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
18+
let buffer = make_read_buffer(&device, 16);
19+
20+
let fired = Arc::new(AtomicBool::new(false));
21+
let fired_cl = Arc::clone(&fired);
22+
23+
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
24+
label: Some("encoder"),
25+
});
26+
27+
// Register deferred map.
28+
encoder.map_buffer_on_submit(&buffer, wgpu::MapMode::Read, 0..4, move |_| {
29+
fired_cl.store(true, SeqCst);
30+
});
31+
// Include a trivial command that uses the buffer.
32+
encoder.clear_buffer(&buffer, 0, None);
33+
34+
// Polling before submit should not trigger the callback.
35+
_ = device.poll(wgpu::PollType::Poll);
36+
assert!(!fired.load(SeqCst));
37+
38+
// Submit and wait; callback should fire.
39+
queue.submit([encoder.finish()]);
40+
_ = device.poll(wgpu::PollType::Wait);
41+
assert!(fired.load(SeqCst));
42+
}
43+
44+
/// Empty ranges panic immediately when registering the deferred map.
45+
#[test]
46+
#[should_panic = "buffer slices can not be empty"]
47+
fn encoder_map_buffer_on_submit_empty_range_panics_immediately() {
48+
let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
49+
let buffer = make_read_buffer(&device, 16);
50+
51+
let encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
52+
53+
// This panics inside map_buffer_on_submit (range_to_offset_size).
54+
encoder.map_buffer_on_submit(&buffer, wgpu::MapMode::Read, 8..8, |_| {});
55+
}
56+
57+
/// Out-of-bounds ranges panic during submit (when the deferred map executes).
58+
#[test]
59+
#[should_panic = "is out of range for buffer of size"]
60+
fn encoder_map_buffer_on_submit_out_of_bounds_panics_on_submit() {
61+
let (device, queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
62+
let buffer = make_read_buffer(&device, 16);
63+
64+
let mut encoder =
65+
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
66+
// 12..24 overflows the 16-byte buffer (size=12, end=24).
67+
encoder.map_buffer_on_submit(&buffer, wgpu::MapMode::Read, 12..24, |_| {});
68+
encoder.clear_buffer(&buffer, 0, None);
69+
70+
// Panic happens inside submit when executing deferred actions.
71+
queue.submit([encoder.finish()]);
72+
}
73+
74+
/// If the buffer is already mapped when the deferred mapping executes, it panics during submit.
75+
#[test]
76+
#[should_panic = "Buffer with 'read buffer' label is still mapped"]
77+
fn encoder_map_buffer_on_submit_panics_if_already_mapped_on_submit() {
78+
let (device, queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
79+
let buffer = make_read_buffer(&device, 16);
80+
81+
// Start a mapping now so the buffer is considered mapped.
82+
buffer.slice(0..4).map_async(wgpu::MapMode::Read, |_| {});
83+
84+
let mut encoder =
85+
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
86+
// Deferred mapping of an already-mapped buffer will panic when executed on submit or be rejected by submit.
87+
encoder.map_buffer_on_submit(&buffer, wgpu::MapMode::Read, 0..4, |_| {});
88+
// Include any trivial work; using the same buffer ensures core validation catches the mapped hazard.
89+
encoder.clear_buffer(&buffer, 0, None);
90+
91+
queue.submit([encoder.finish()]);
92+
}
93+
94+
/// on_submitted_work_done is deferred until submit.
95+
#[test]
96+
fn encoder_on_submitted_work_done_defers_until_submit() {
97+
let (device, queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
98+
99+
let fired = Arc::new(AtomicBool::new(false));
100+
let fired_cl = Arc::clone(&fired);
101+
102+
let mut encoder =
103+
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
104+
105+
encoder.on_submitted_work_done(move || {
106+
fired_cl.store(true, SeqCst);
107+
});
108+
109+
// Include a trivial command so the command buffer isn't completely empty.
110+
let dummy = make_read_buffer(&device, 4);
111+
encoder.clear_buffer(&dummy, 0, None);
112+
113+
// Without submission, polling shouldn't invoke the callback.
114+
_ = device.poll(wgpu::PollType::Poll);
115+
assert!(!fired.load(SeqCst));
116+
117+
queue.submit([encoder.finish()]);
118+
_ = device.poll(wgpu::PollType::Wait);
119+
assert!(fired.load(SeqCst));
120+
}
121+
122+
/// Both kinds of deferred callbacks are enqueued and eventually invoked.
123+
#[test]
124+
fn encoder_both_callbacks_fire_after_submit() {
125+
let (device, queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
126+
let buffer = make_read_buffer(&device, 16);
127+
128+
let map_fired = Arc::new(AtomicBool::new(false));
129+
let map_fired_cl = Arc::clone(&map_fired);
130+
let queue_fired = Arc::new(AtomicBool::new(false));
131+
let queue_fired_cl = Arc::clone(&queue_fired);
132+
133+
let mut encoder =
134+
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
135+
encoder.map_buffer_on_submit(&buffer, wgpu::MapMode::Read, 0..4, move |_| {
136+
map_fired_cl.store(true, SeqCst);
137+
});
138+
encoder.on_submitted_work_done(move || {
139+
queue_fired_cl.store(true, SeqCst);
140+
});
141+
encoder.clear_buffer(&buffer, 0, None);
142+
143+
queue.submit([encoder.finish()]);
144+
_ = device.poll(wgpu::PollType::Wait);
145+
146+
assert!(map_fired.load(SeqCst));
147+
assert!(queue_fired.load(SeqCst));
148+
}
149+
150+
/// Registering multiple deferred mappings works; all callbacks fire after submit.
151+
#[test]
152+
fn encoder_multiple_map_buffer_on_submit_callbacks_fire() {
153+
let (device, queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
154+
let buffer1 = make_read_buffer(&device, 32);
155+
let buffer2 = make_read_buffer(&device, 32);
156+
157+
let counter = Arc::new(AtomicU32::new(0));
158+
let c1 = Arc::clone(&counter);
159+
let c2 = Arc::clone(&counter);
160+
161+
let mut encoder =
162+
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
163+
encoder.map_buffer_on_submit(&buffer1, wgpu::MapMode::Read, 0..4, move |_| {
164+
c1.fetch_add(1, SeqCst);
165+
});
166+
encoder.map_buffer_on_submit(&buffer2, wgpu::MapMode::Read, 8..12, move |_| {
167+
c2.fetch_add(1, SeqCst);
168+
});
169+
encoder.clear_buffer(&buffer1, 0, None);
170+
171+
queue.submit([encoder.finish()]);
172+
_ = device.poll(wgpu::PollType::Wait);
173+
174+
assert_eq!(counter.load(SeqCst), 2);
175+
}
176+
177+
/// Mapping with a buffer lacking MAP_* usage should panic when executed on submit.
178+
#[test]
179+
#[should_panic]
180+
fn encoder_map_buffer_on_submit_panics_if_usage_invalid_on_submit() {
181+
let (device, queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
182+
let unmappable = device.create_buffer(&wgpu::BufferDescriptor {
183+
label: Some("unmappable buffer"),
184+
size: 16,
185+
usage: wgpu::BufferUsages::COPY_DST, // No MAP_READ or MAP_WRITE
186+
mapped_at_creation: false,
187+
});
188+
189+
let mut encoder =
190+
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
191+
encoder.map_buffer_on_submit(&unmappable, wgpu::MapMode::Read, 0..4, |_| {});
192+
193+
// Add unrelated work so the submission isn't empty.
194+
let dummy = make_read_buffer(&device, 4);
195+
encoder.clear_buffer(&dummy, 0, None);
196+
197+
// Panic expected when deferred mapping executes.
198+
queue.submit([encoder.finish()]);
199+
}
200+
201+
/// Deferred map callbacks run before on_submitted_work_done for the same submission.
202+
#[test]
203+
fn encoder_deferred_map_runs_before_on_submitted_work_done() {
204+
let (device, queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
205+
let buffer = make_read_buffer(&device, 16);
206+
207+
#[derive(Default)]
208+
struct Order {
209+
map_order: AtomicU32,
210+
queue_order: AtomicU32,
211+
counter: AtomicU32,
212+
}
213+
let order = Arc::new(Order::default());
214+
let o_map = Arc::clone(&order);
215+
let o_queue = Arc::clone(&order);
216+
217+
let mut encoder =
218+
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
219+
encoder.map_buffer_on_submit(&buffer, wgpu::MapMode::Read, 0..4, move |_| {
220+
let v = o_map.counter.fetch_add(1, SeqCst);
221+
o_map.map_order.store(v, SeqCst);
222+
});
223+
encoder.on_submitted_work_done(move || {
224+
let v = o_queue.counter.fetch_add(1, SeqCst);
225+
o_queue.queue_order.store(v, SeqCst);
226+
});
227+
encoder.clear_buffer(&buffer, 0, None);
228+
229+
queue.submit([encoder.finish()]);
230+
_ = device.poll(wgpu::PollType::Wait);
231+
232+
assert_eq!(order.counter.load(SeqCst), 2);
233+
assert_eq!(order.map_order.load(SeqCst), 0);
234+
assert_eq!(order.queue_order.load(SeqCst), 1);
235+
}
236+
237+
/// Multiple on_submitted_work_done callbacks registered on encoder all fire after submit.
238+
#[test]
239+
fn encoder_multiple_on_submitted_callbacks_fire() {
240+
let (device, queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
241+
let buffer = make_read_buffer(&device, 4);
242+
243+
let counter = Arc::new(AtomicU32::new(0));
244+
let c1 = Arc::clone(&counter);
245+
let c2 = Arc::clone(&counter);
246+
247+
let mut encoder =
248+
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
249+
encoder.on_submitted_work_done(move || {
250+
c1.fetch_add(1, SeqCst);
251+
});
252+
encoder.on_submitted_work_done(move || {
253+
c2.fetch_add(1, SeqCst);
254+
});
255+
encoder.clear_buffer(&buffer, 0, None);
256+
257+
queue.submit([encoder.finish()]);
258+
_ = device.poll(wgpu::PollType::Wait);
259+
260+
assert_eq!(counter.load(SeqCst), 2);
261+
}

tests/tests/wgpu-validation/api/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
mod binding_arrays;
22
mod buffer;
33
mod buffer_slice;
4+
mod command_buffer_actions;
45
mod device;
56
mod external_texture;
67
mod instance;

0 commit comments

Comments
 (0)