99 "strings"
1010 "sync"
1111
12+ openai2 "github.com/gptscript-ai/chat-completion-client"
1213 "github.com/gptscript-ai/gptscript/pkg/cache"
1314 "github.com/gptscript-ai/gptscript/pkg/credentials"
1415 "github.com/gptscript-ai/gptscript/pkg/engine"
@@ -62,7 +63,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques
6263 return client .Call (ctx , messageRequest , env , status )
6364}
6465
65- func (c * Client ) ListModels (ctx context.Context , providers ... string ) (result []string , _ error ) {
66+ func (c * Client ) ListModels (ctx context.Context , providers ... string ) (result []openai2. Model , _ error ) {
6667 for _ , provider := range providers {
6768 client , err := c .load (ctx , provider )
6869 if err != nil {
@@ -72,12 +73,16 @@ func (c *Client) ListModels(ctx context.Context, providers ...string) (result []
7273 if err != nil {
7374 return nil , err
7475 }
75- for _ , model := range models {
76- result = append ( result , model + " from " + provider )
76+ for i := range models {
77+ models [ i ]. ID = fmt . Sprintf ( "%s from %s" , models [ i ]. ID , provider )
7778 }
79+
80+ result = append (result , models ... )
7881 }
7982
80- sort .Strings (result )
83+ sort .Slice (result , func (i , j int ) bool {
84+ return result [i ].ID < result [j ].ID
85+ })
8186 return
8287}
8388
0 commit comments