22#define CUDA_INTEGER_COMPRESSION_H
33
44#include "../../pbs/pbs_enums.h"
5+ #include "../integer.h"
56
67typedef struct {
78 void * ptr ;
@@ -25,77 +26,65 @@ typedef struct {
2526
2627extern "C" {
2728uint64_t scratch_cuda_integer_compress_radix_ciphertext_64 (
28- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
29- int8_t * * mem_ptr , uint32_t compression_glwe_dimension ,
30- uint32_t compression_polynomial_size , uint32_t lwe_dimension ,
31- uint32_t ks_level , uint32_t ks_base_log , uint32_t num_radix_blocks ,
32- uint32_t message_modulus , uint32_t carry_modulus , PBS_TYPE pbs_type ,
33- uint32_t lwe_per_glwe , bool allocate_gpu_memory );
29+ CudaStreamsFFI streams , int8_t * * mem_ptr ,
30+ uint32_t compression_glwe_dimension , uint32_t compression_polynomial_size ,
31+ uint32_t lwe_dimension , uint32_t ks_level , uint32_t ks_base_log ,
32+ uint32_t num_radix_blocks , uint32_t message_modulus , uint32_t carry_modulus ,
33+ PBS_TYPE pbs_type , uint32_t lwe_per_glwe , bool allocate_gpu_memory );
3434
3535uint64_t scratch_cuda_integer_decompress_radix_ciphertext_64 (
36- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
37- int8_t * * mem_ptr , uint32_t encryption_glwe_dimension ,
38- uint32_t encryption_polynomial_size , uint32_t compression_glwe_dimension ,
39- uint32_t compression_polynomial_size , uint32_t lwe_dimension ,
40- uint32_t pbs_level , uint32_t pbs_base_log ,
36+ CudaStreamsFFI streams , int8_t * * mem_ptr ,
37+ uint32_t encryption_glwe_dimension , uint32_t encryption_polynomial_size ,
38+ uint32_t compression_glwe_dimension , uint32_t compression_polynomial_size ,
39+ uint32_t lwe_dimension , uint32_t pbs_level , uint32_t pbs_base_log ,
4140 uint32_t num_blocks_to_decompress , uint32_t message_modulus ,
4241 uint32_t carry_modulus , PBS_TYPE pbs_type , bool allocate_gpu_memory ,
4342 PBS_MS_REDUCTION_T noise_reduction_type );
4443
4544void cuda_integer_compress_radix_ciphertext_64 (
46- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
47- CudaPackedGlweCiphertextListFFI * glwe_array_out ,
45+ CudaStreamsFFI streams , CudaPackedGlweCiphertextListFFI * glwe_array_out ,
4846 CudaLweCiphertextListFFI const * lwe_array_in , void * const * fp_ksk ,
4947 int8_t * mem_ptr );
5048
5149void cuda_integer_decompress_radix_ciphertext_64 (
52- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
53- CudaLweCiphertextListFFI * lwe_array_out ,
50+ CudaStreamsFFI streams , CudaLweCiphertextListFFI * lwe_array_out ,
5451 CudaPackedGlweCiphertextListFFI const * glwe_in ,
5552 uint32_t const * indexes_array , void * const * bsks , int8_t * mem_ptr );
5653
57- void cleanup_cuda_integer_compress_radix_ciphertext_64 (
58- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
59- int8_t * * mem_ptr_void );
54+ void cleanup_cuda_integer_compress_radix_ciphertext_64 (CudaStreamsFFI streams ,
55+ int8_t * * mem_ptr_void );
6056
61- void cleanup_cuda_integer_decompress_radix_ciphertext_64 (
62- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
63- int8_t * * mem_ptr_void );
57+ void cleanup_cuda_integer_decompress_radix_ciphertext_64 (CudaStreamsFFI streams ,
58+ int8_t * * mem_ptr_void );
6459
6560uint64_t scratch_cuda_integer_compress_radix_ciphertext_128 (
66- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
67- int8_t * * mem_ptr , uint32_t compression_glwe_dimension ,
68- uint32_t compression_polynomial_size , uint32_t lwe_dimension ,
69- uint32_t ks_level , uint32_t ks_base_log , uint32_t num_radix_blocks ,
70- uint32_t message_modulus , uint32_t carry_modulus , PBS_TYPE pbs_type ,
71- uint32_t lwe_per_glwe , bool allocate_gpu_memory );
61+ CudaStreamsFFI streams , int8_t * * mem_ptr ,
62+ uint32_t compression_glwe_dimension , uint32_t compression_polynomial_size ,
63+ uint32_t lwe_dimension , uint32_t ks_level , uint32_t ks_base_log ,
64+ uint32_t num_radix_blocks , uint32_t message_modulus , uint32_t carry_modulus ,
65+ PBS_TYPE pbs_type , uint32_t lwe_per_glwe , bool allocate_gpu_memory );
7266
7367uint64_t scratch_cuda_integer_decompress_radix_ciphertext_128 (
74- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
75- int8_t * * mem_ptr , uint32_t compression_glwe_dimension ,
76- uint32_t compression_polynomial_size , uint32_t lwe_dimension ,
77- uint32_t num_radix_blocks , uint32_t message_modulus , uint32_t carry_modulus ,
78- bool allocate_gpu_memory );
68+ CudaStreamsFFI streams , int8_t * * mem_ptr ,
69+ uint32_t compression_glwe_dimension , uint32_t compression_polynomial_size ,
70+ uint32_t lwe_dimension , uint32_t num_radix_blocks , uint32_t message_modulus ,
71+ uint32_t carry_modulus , bool allocate_gpu_memory );
7972
8073void cuda_integer_compress_radix_ciphertext_128 (
81- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
82- CudaPackedGlweCiphertextListFFI * glwe_array_out ,
74+ CudaStreamsFFI streams , CudaPackedGlweCiphertextListFFI * glwe_array_out ,
8375 CudaLweCiphertextListFFI const * lwe_array_in , void * const * fp_ksk ,
8476 int8_t * mem_ptr );
8577
8678void cuda_integer_decompress_radix_ciphertext_128 (
87- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
88- CudaLweCiphertextListFFI * lwe_array_out ,
79+ CudaStreamsFFI streams , CudaLweCiphertextListFFI * lwe_array_out ,
8980 CudaPackedGlweCiphertextListFFI const * glwe_in ,
9081 uint32_t const * indexes_array , int8_t * mem_ptr );
9182
92- void cleanup_cuda_integer_compress_radix_ciphertext_128 (
93- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
94- int8_t * * mem_ptr_void );
83+ void cleanup_cuda_integer_compress_radix_ciphertext_128 (CudaStreamsFFI streams ,
84+ int8_t * * mem_ptr_void );
9585
9686void cleanup_cuda_integer_decompress_radix_ciphertext_128 (
97- void * const * streams , uint32_t const * gpu_indexes , uint32_t gpu_count ,
98- int8_t * * mem_ptr_void );
87+ CudaStreamsFFI streams , int8_t * * mem_ptr_void );
9988}
10089
10190#endif
0 commit comments