@@ -18,7 +18,7 @@ const MAX_XORB_CHUNKS = 8 * 1024;
18
18
const INTERVAL_BETWEEN_REMOTE_DEDUP = 4_000_000 ; // 4MB
19
19
20
20
export async function * createXorbs (
21
- fileSources : AsyncGenerator < { content : Blob ; path : string ; sha256 : string } > ,
21
+ fileSources : AsyncGenerator < { content : Blob ; path : string } > ,
22
22
params : XetWriteTokenParams
23
23
) : AsyncGenerator <
24
24
| {
@@ -36,7 +36,6 @@ export async function* createXorbs(
36
36
event : "file" ;
37
37
path : string ;
38
38
hash : string ;
39
- sha256 : string ;
40
39
representation : Array < {
41
40
xorbId : number | string ; // either xorb id (for local xorbs) or xorb hash (for remote xorbs)
42
41
offset : number ;
@@ -59,9 +58,30 @@ export async function* createXorbs(
59
58
let xorbOffset = 0 ;
60
59
let xorbChunks = Array < { hash : string ; length : number ; offset : number } > ( ) ;
61
60
/**
62
- * path => 0..1 mapping
61
+ * path => 0..1 mapping of the current xorb
62
+ *
63
+ * eg
64
+ *
65
+ * A => 1
66
+ * B => 1
67
+ * C => 0.345
68
+ *
69
+ * If the xorb contains the end of file A, B, and up to 34.5% of file C
63
70
*/
64
- let fileProgress : Record < string , number > = { } ;
71
+ let xorbFileProgress : Record < string , number > = { } ;
72
+
73
+ const pendingFileEvents : Array < {
74
+ event : "file" ;
75
+ path : string ;
76
+ hash : string ;
77
+ representation : Array < {
78
+ xorbId : number | string ;
79
+ offset : number ;
80
+ endOffset : number ;
81
+ length : number ;
82
+ rangeHash : string ;
83
+ } > ;
84
+ } > = [ ] ;
65
85
66
86
const remoteXorbHashes : string [ ] = [ "" ] ; // starts at index 1 (to simplify implem a bit)
67
87
let bytesSinceRemoteDedup = Infinity ;
@@ -148,14 +168,20 @@ export async function* createXorbs(
148
168
hash : chunkModule . compute_xorb_hash ( xorbChunks ) ,
149
169
chunks : [ ...xorbChunks ] ,
150
170
id : xorbId ,
151
- files : Object . entries ( fileProgress ) . map ( ( [ path , progress ] ) => ( { path, progress } ) ) ,
171
+ files : Object . entries ( xorbFileProgress ) . map ( ( [ path , progress ] ) => ( { path, progress } ) ) ,
152
172
} ;
153
173
xorbId ++ ;
154
174
xorb = new Uint8Array ( XORB_SIZE ) ;
155
175
chunkOffset = 0 ;
156
176
chunkXorbId = xorbId ;
177
+ xorbFileProgress = { } ;
178
+
179
+ for ( const event of pendingFileEvents ) {
180
+ yield event ;
181
+ }
182
+ pendingFileEvents . length = 0 ;
183
+
157
184
xorbOffset = writeChunk ( xorb , 0 , chunkToCopy ) ;
158
- fileProgress = { } ;
159
185
160
186
if ( xorbOffset === 0 ) {
161
187
throw new Error ( "Failed to write chunk into xorb" ) ;
@@ -202,21 +228,26 @@ export async function* createXorbs(
202
228
}
203
229
}
204
230
xorbChunks . push ( { hash : chunk . hash , length : chunk . length , offset : chunkOffset } ) ;
205
- fileProgress [ fileSource . path ] = processedBytes / fileSource . content . size ;
231
+ xorbFileProgress [ fileSource . path ] = processedBytes / fileSource . content . size ;
206
232
if ( xorbChunks . length >= MAX_XORB_CHUNKS ) {
207
233
yield {
208
234
event : "xorb" as const ,
209
235
xorb : xorb . subarray ( 0 , xorbOffset ) ,
210
236
hash : chunkModule . compute_xorb_hash ( xorbChunks ) ,
211
237
chunks : [ ...xorbChunks ] ,
212
238
id : xorbId ,
213
- files : Object . entries ( fileProgress ) . map ( ( [ path , progress ] ) => ( { path, progress } ) ) ,
239
+ files : Object . entries ( xorbFileProgress ) . map ( ( [ path , progress ] ) => ( { path, progress } ) ) ,
214
240
} ;
215
241
xorbId ++ ;
216
242
xorbOffset = 0 ;
217
243
xorbChunks = [ ] ;
218
- fileProgress = { } ;
244
+ xorbFileProgress = { } ;
219
245
xorb = new Uint8Array ( XORB_SIZE ) ;
246
+
247
+ for ( const event of pendingFileEvents ) {
248
+ yield event ;
249
+ }
250
+ pendingFileEvents . length = 0 ;
220
251
}
221
252
}
222
253
} ;
@@ -239,13 +270,12 @@ export async function* createXorbs(
239
270
) ;
240
271
}
241
272
242
- yield {
273
+ pendingFileEvents . push ( {
243
274
event : "file" as const ,
244
275
path : fileSource . path ,
245
276
hash : chunkModule . compute_file_hash ( fileChunks ) ,
246
277
representation : fileRepresentation ,
247
- sha256 : fileSource . sha256 ,
248
- } ;
278
+ } ) ;
249
279
}
250
280
251
281
if ( xorbOffset > 0 ) {
@@ -255,9 +285,13 @@ export async function* createXorbs(
255
285
hash : chunkModule . compute_xorb_hash ( xorbChunks ) ,
256
286
chunks : [ ...xorbChunks ] ,
257
287
id : xorbId ,
258
- files : Object . entries ( fileProgress ) . map ( ( [ path , progress ] ) => ( { path, progress } ) ) ,
288
+ files : Object . entries ( xorbFileProgress ) . map ( ( [ path , progress ] ) => ( { path, progress } ) ) ,
259
289
} ;
260
290
}
291
+
292
+ for ( const event of pendingFileEvents ) {
293
+ yield event ;
294
+ }
261
295
} finally {
262
296
chunker . free ( ) ;
263
297
// ^ is this really needed ?
0 commit comments