@@ -2227,87 +2227,42 @@ impl super::Adapter {
2227
2227
signal_semaphores : Default :: default ( ) ,
2228
2228
} ;
2229
2229
2230
- let mem_allocator = {
2231
- let limits = self . phd_capabilities . properties . limits ;
2232
-
2233
- // Note: the parameters here are not set in stone nor where they picked with
2234
- // strong confidence.
2235
- // `final_free_list_chunk` should be bigger than starting_free_list_chunk if
2236
- // we want the behavior of starting with smaller block sizes and using larger
2237
- // ones only after we observe that the small ones aren't enough, which I think
2238
- // is a good "I don't know what the workload is going to be like" approach.
2239
- //
2240
- // For reference, `VMA`, and `gpu_allocator` both start with 256 MB blocks
2241
- // (then VMA doubles the block size each time it needs a new block).
2242
- // At some point it would be good to experiment with real workloads
2243
- //
2244
- // TODO(#5925): The plan is to switch the Vulkan backend from `gpu_alloc` to
2245
- // `gpu_allocator` which has a different (simpler) set of configuration options.
2246
- //
2247
- // TODO: These parameters should take hardware capabilities into account.
2248
- let mb = 1024 * 1024 ;
2249
- let perf_cfg = gpu_alloc:: Config {
2250
- starting_free_list_chunk : 128 * mb,
2251
- final_free_list_chunk : 512 * mb,
2252
- minimal_buddy_size : 1 ,
2253
- initial_buddy_dedicated_size : 8 * mb,
2254
- dedicated_threshold : 32 * mb,
2255
- preferred_dedicated_threshold : mb,
2256
- transient_dedicated_threshold : 128 * mb,
2257
- } ;
2258
- let mem_usage_cfg = gpu_alloc:: Config {
2259
- starting_free_list_chunk : 8 * mb,
2260
- final_free_list_chunk : 64 * mb,
2261
- minimal_buddy_size : 1 ,
2262
- initial_buddy_dedicated_size : 8 * mb,
2263
- dedicated_threshold : 8 * mb,
2264
- preferred_dedicated_threshold : mb,
2265
- transient_dedicated_threshold : 16 * mb,
2266
- } ;
2267
- let config = match memory_hints {
2268
- wgt:: MemoryHints :: Performance => perf_cfg,
2269
- wgt:: MemoryHints :: MemoryUsage => mem_usage_cfg,
2270
- wgt:: MemoryHints :: Manual {
2271
- suballocated_device_memory_block_size,
2272
- } => gpu_alloc:: Config {
2273
- starting_free_list_chunk : suballocated_device_memory_block_size. start ,
2274
- final_free_list_chunk : suballocated_device_memory_block_size. end ,
2275
- initial_buddy_dedicated_size : suballocated_device_memory_block_size. start ,
2276
- ..perf_cfg
2277
- } ,
2278
- } ;
2279
-
2280
- let max_memory_allocation_size =
2281
- if let Some ( maintenance_3) = self . phd_capabilities . maintenance_3 {
2282
- maintenance_3. max_memory_allocation_size
2283
- } else {
2284
- u64:: MAX
2285
- } ;
2286
- let properties = gpu_alloc:: DeviceProperties {
2287
- max_memory_allocation_count : limits. max_memory_allocation_count ,
2288
- max_memory_allocation_size,
2289
- non_coherent_atom_size : limits. non_coherent_atom_size ,
2290
- memory_types : memory_types
2291
- . iter ( )
2292
- . map ( |memory_type| gpu_alloc:: MemoryType {
2293
- props : gpu_alloc:: MemoryPropertyFlags :: from_bits_truncate (
2294
- memory_type. property_flags . as_raw ( ) as u8 ,
2295
- ) ,
2296
- heap : memory_type. heap_index ,
2297
- } )
2298
- . collect ( ) ,
2299
- memory_heaps : mem_properties
2300
- . memory_heaps_as_slice ( )
2301
- . iter ( )
2302
- . map ( |& memory_heap| gpu_alloc:: MemoryHeap {
2303
- size : memory_heap. size ,
2304
- } )
2305
- . collect ( ) ,
2306
- buffer_device_address : enabled_extensions
2307
- . contains ( & khr:: buffer_device_address:: NAME ) ,
2308
- } ;
2309
- gpu_alloc:: GpuAllocator :: new ( config, properties)
2230
+ // TODO: the allocator's configuration should take hardware capability into
2231
+ // account.
2232
+ const MB : u64 = 1024 * 1024 ;
2233
+ let allocation_sizes = match memory_hints {
2234
+ wgt:: MemoryHints :: Performance => gpu_allocator:: AllocationSizes :: new ( 128 * MB , 64 * MB )
2235
+ . with_max_device_memblock_size ( 256 * MB )
2236
+ . with_max_host_memblock_size ( 128 * MB ) ,
2237
+ wgt:: MemoryHints :: MemoryUsage => gpu_allocator:: AllocationSizes :: new ( 8 * MB , 4 * MB )
2238
+ . with_max_device_memblock_size ( 64 * MB )
2239
+ . with_max_host_memblock_size ( 32 * MB ) ,
2240
+ wgt:: MemoryHints :: Manual {
2241
+ suballocated_device_memory_block_size,
2242
+ } => {
2243
+ // TODO: Would it be useful to expose the host size in memory hints
2244
+ // instead of always using half of the device size?
2245
+ let device_size = suballocated_device_memory_block_size;
2246
+ let host_size = device_size. start / 2 ..device_size. end / 2 ;
2247
+
2248
+ gpu_allocator:: AllocationSizes :: new ( device_size. start , host_size. start )
2249
+ . with_max_device_memblock_size ( device_size. end )
2250
+ . with_max_host_memblock_size ( host_size. end )
2251
+ }
2310
2252
} ;
2253
+
2254
+ let buffer_device_address = enabled_extensions. contains ( & khr:: buffer_device_address:: NAME ) ;
2255
+
2256
+ let mem_allocator =
2257
+ gpu_allocator:: vulkan:: Allocator :: new ( & gpu_allocator:: vulkan:: AllocatorCreateDesc {
2258
+ instance : self . instance . raw . clone ( ) ,
2259
+ device : shared. raw . clone ( ) ,
2260
+ physical_device : self . raw ,
2261
+ debug_settings : Default :: default ( ) ,
2262
+ buffer_device_address,
2263
+ allocation_sizes,
2264
+ } ) ?;
2265
+
2311
2266
let desc_allocator = gpu_descriptor:: DescriptorAllocator :: new (
2312
2267
if let Some ( di) = self . phd_capabilities . descriptor_indexing {
2313
2268
di. max_update_after_bind_descriptors_in_all_pools
0 commit comments