@@ -331,13 +331,77 @@ it(
331
331
const aiclient = new Client ( nativeClient ) ;
332
332
333
333
const modelBlob : Buffer = fs . readFileSync ( './tests/test_data/graph.pb' ) ;
334
- const model = new Model ( Backend . TF , 'CPU' , [ 'a' , 'b' ] , [ 'c' ] , modelBlob ) ;
334
+ const inputs : string [ ] = [ 'a' , 'b' ] ;
335
+ const outputs : string [ ] = [ 'c' ] ;
336
+ const model = new Model ( Backend . TF , 'CPU' , inputs , outputs , modelBlob ) ;
335
337
model . tag = 'test_tag' ;
336
338
const resultModelSet = await aiclient . modelset ( 'mymodel' , model ) ;
337
339
expect ( resultModelSet ) . to . equal ( 'OK' ) ;
338
340
339
- const modelOut = await aiclient . modelget ( 'mymodel' ) ;
341
+ const modelOut : Model = await aiclient . modelget ( 'mymodel' ) ;
340
342
expect ( modelOut . blob . toString ( ) ) . to . equal ( modelBlob . toString ( ) ) ;
343
+ for ( let index = 0 ; index < modelOut . outputs . length ; index ++ ) {
344
+ expect ( modelOut . outputs [ index ] ) . to . equal ( outputs [ index ] ) ;
345
+ expect ( modelOut . outputs [ index ] ) . to . equal ( model . outputs [ index ] ) ;
346
+ }
347
+ for ( let index = 0 ; index < modelOut . inputs . length ; index ++ ) {
348
+ expect ( modelOut . inputs [ index ] ) . to . equal ( inputs [ index ] ) ;
349
+ expect ( modelOut . inputs [ index ] ) . to . equal ( model . inputs [ index ] ) ;
350
+ }
351
+ expect ( modelOut . batchsize ) . to . equal ( model . batchsize ) ;
352
+ expect ( modelOut . minbatchsize ) . to . equal ( model . minbatchsize ) ;
353
+ aiclient . end ( true ) ;
354
+ } ) ,
355
+ ) ;
356
+
357
+ it (
358
+ 'ai.modelget batching positive testing' ,
359
+ mochaAsync ( async ( ) => {
360
+ const nativeClient = createClient ( ) ;
361
+ const aiclient = new Client ( nativeClient ) ;
362
+
363
+ const modelBlob : Buffer = fs . readFileSync ( './tests/test_data/graph.pb' ) ;
364
+ const inputs : string [ ] = [ 'a' , 'b' ] ;
365
+ const outputs : string [ ] = [ 'c' ] ;
366
+ const model = new Model ( Backend . TF , 'CPU' , inputs , outputs , modelBlob ) ;
367
+ model . tag = 'test_tag' ;
368
+ model . batchsize = 100 ;
369
+ model . minbatchsize = 5 ;
370
+ const resultModelSet = await aiclient . modelset ( 'mymodel-batching' , model ) ;
371
+ expect ( resultModelSet ) . to . equal ( 'OK' ) ;
372
+ const modelOut : Model = await aiclient . modelget ( 'mymodel-batching' ) ;
373
+ const resultModelSet2 = await aiclient . modelset ( 'mymodel-batching-loop' , modelOut ) ;
374
+ expect ( resultModelSet2 ) . to . equal ( 'OK' ) ;
375
+ const modelOut2 : Model = await aiclient . modelget ( 'mymodel-batching-loop' ) ;
376
+ expect ( modelOut . batchsize ) . to . equal ( model . batchsize ) ;
377
+ expect ( modelOut . minbatchsize ) . to . equal ( model . minbatchsize ) ;
378
+ aiclient . end ( true ) ;
379
+ } ) ,
380
+ ) ;
381
+
382
+ it (
383
+ 'ai.modelget batching via constructor positive testing' ,
384
+ mochaAsync ( async ( ) => {
385
+ const nativeClient = createClient ( ) ;
386
+ const aiclient = new Client ( nativeClient ) ;
387
+
388
+ const modelBlob : Buffer = fs . readFileSync ( './tests/test_data/graph.pb' ) ;
389
+ const inputs : string [ ] = [ 'a' , 'b' ] ;
390
+ const outputs : string [ ] = [ 'c' ] ;
391
+ const model = new Model ( Backend . TF , 'CPU' , inputs , outputs , modelBlob , 100 , 5 ) ;
392
+ model . tag = 'test_tag' ;
393
+ const resultModelSet = await aiclient . modelset ( 'mymodel-batching-t2' , model ) ;
394
+ expect ( resultModelSet ) . to . equal ( 'OK' ) ;
395
+ const modelOut : Model = await aiclient . modelget ( 'mymodel-batching-t2' ) ;
396
+ const resultModelSet2 = await aiclient . modelset ( 'mymodel-batching-loop-t2' , modelOut ) ;
397
+ expect ( resultModelSet2 ) . to . equal ( 'OK' ) ;
398
+ const modelOut2 : Model = await aiclient . modelget ( 'mymodel-batching-loop' ) ;
399
+ expect ( modelOut . batchsize ) . to . equal ( model . batchsize ) ;
400
+ expect ( modelOut . minbatchsize ) . to . equal ( model . minbatchsize ) ;
401
+
402
+ const model2 = new Model ( Backend . TF , 'CPU' , inputs , outputs , modelBlob , 1000 ) ;
403
+ expect ( model2 . batchsize ) . to . equal ( 1000 ) ;
404
+ expect ( model2 . minbatchsize ) . to . equal ( 0 ) ;
341
405
aiclient . end ( true ) ;
342
406
} ) ,
343
407
) ;
@@ -624,26 +688,26 @@ it(
624
688
) ;
625
689
626
690
it (
627
- 'ai.config positive and negative testing' ,
628
- mochaAsync ( async ( ) => {
629
- const nativeClient = createClient ( ) ;
630
- const aiclient = new Client ( nativeClient ) ;
631
- const result = await aiclient . configBackendsPath ( '/usr/lib/redis/modules/backends/' ) ;
632
- expect ( result ) . to . equal ( 'OK' ) ;
633
- // negative test
634
- try {
635
- const loadReply = await aiclient . configLoadBackend ( Backend . TF , 'notexist/redisai_tensorflow.so' ) ;
636
- } catch ( e ) {
637
- expect ( e . toString ( ) ) . to . equal ( 'ReplyError: ERR error loading backend' ) ;
638
- }
639
-
640
- try {
641
- // may throw error if backend already loaded
642
- const loadResult = await aiclient . configLoadBackend ( Backend . TF , 'redisai_tensorflow/redisai_tensorflow.so' ) ;
643
- expect ( loadResult ) . to . equal ( 'OK' ) ;
644
- } catch ( e ) {
645
- expect ( e . toString ( ) ) . to . equal ( 'ReplyError: ERR error loading backend' ) ;
646
- }
647
- aiclient . end ( true ) ;
648
- } ) ,
649
- ) ;
691
+ 'ai.config positive and negative testing' ,
692
+ mochaAsync ( async ( ) => {
693
+ const nativeClient = createClient ( ) ;
694
+ const aiclient = new Client ( nativeClient ) ;
695
+ const result = await aiclient . configBackendsPath ( '/usr/lib/redis/modules/backends/' ) ;
696
+ expect ( result ) . to . equal ( 'OK' ) ;
697
+ // negative test
698
+ try {
699
+ const loadReply = await aiclient . configLoadBackend ( Backend . TF , 'notexist/redisai_tensorflow.so' ) ;
700
+ } catch ( e ) {
701
+ expect ( e . toString ( ) ) . to . equal ( 'ReplyError: ERR error loading backend' ) ;
702
+ }
703
+
704
+ try {
705
+ // may throw error if backend already loaded
706
+ const loadResult = await aiclient . configLoadBackend ( Backend . TF , 'redisai_tensorflow/redisai_tensorflow.so' ) ;
707
+ expect ( loadResult ) . to . equal ( 'OK' ) ;
708
+ } catch ( e ) {
709
+ expect ( e . toString ( ) ) . to . equal ( 'ReplyError: ERR error loading backend' ) ;
710
+ }
711
+ aiclient . end ( true ) ;
712
+ } ) ,
713
+ ) ;
0 commit comments