@@ -34,17 +34,22 @@ object NonOpenAIModelId {
34
34
// Llama
35
35
36
36
// 17B x 128E, 400B total params, 1M context window (500k currently supported)
37
- val llama_4_maverick_17B_128E_instruct_fp8 =
37
+ val meta_llama_llama_4_maverick_17b_128e_instruct_fp8 =
38
38
" meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" // Together AI
39
+
39
40
// 17B x 16E, 109B total params, 10M token context (300k currently supported)
40
- val llama_4_scout_17B_16E_instruct =
41
+ val meta_llama_llama_4_scout_17b_1eE_instruct =
41
42
" meta-llama/Llama-4-Scout-17B-16E-Instruct" // Together AI
43
+
42
44
// 17B x 128E, 400B params, 1 mil context
43
45
val llama4_maverick_instruct_basic = " llama4-maverick-instruct-basic" // Fireworks AI
44
46
// 17B x 16E, 107B params, 128k context
45
47
val llama4_scout_instruct_basic = " llama4-scout-instruct-basic" // Fireworks AI
46
48
// 17B x 16E, 107B params
47
- val llama_4_scout_17b_16e_instruct = " meta-llama/llama-4-scout-17b-16e-instruct" // Groq
49
+ val groq_llama_4_scout_17b_16e_instruct = " meta-llama/llama-4-scout-17b-16e-instruct" // Groq
50
+ // 17B x 16E, 107B params
51
+ val cerebras_llama_4_scout_17b_16e_instruct = " llama-4-scout-17b-16e-instruct" // Cerebras
52
+
48
53
val llama_3_3_70b_versatile = " llama-3.3-70b-versatile" // Groq
49
54
val llama_3_3_70b_specdec = " llama-3.3-70b-specdec" // Groq
50
55
val llama_v3p3_70b_instruct = " llama-v3p3-70b-instruct" // Fireworks AI
0 commit comments