diff --git a/.github/workflows/smoke-tests.yml b/.github/workflows/smoke-tests.yml index cdcdb51..bc76b2b 100644 --- a/.github/workflows/smoke-tests.yml +++ b/.github/workflows/smoke-tests.yml @@ -18,8 +18,10 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + - name: Set up Docker Compose + uses: docker/setup-compose-action@v1.2.0 + with: + version: v2.40.3 - name: Install Docker Model Plugin run: | @@ -86,12 +88,13 @@ jobs: -H "Content-Type: application/json" \ -d '{"message": "Hello"}' \ --max-time 30) - - if [[ $? -eq 0 ]] && [[ "$RESPONSE" == *"response"* ]]; then + if [[ -n "$RESPONSE" ]] && jq -e '.response' <<< "$RESPONSE" > /dev/null 2>&1; then echo "✅ Chat API test passed" + echo "Response: $(jq -r '.response' <<< "$RESPONSE" | head -c 200)..." else - echo "❌ Chat API test failed: $RESPONSE" - # Don't fail the test for now as model interaction might be flaky + echo "❌ Chat API test failed - empty response, invalid JSON, or missing 'response' key" + echo "Raw response: $RESPONSE" + exit 1 fi # Test model info endpoint @@ -100,11 +103,13 @@ jobs: -H "Content-Type: application/json" \ -d '{"message": "!modelinfo"}' \ --max-time 10) - - if [[ $? -eq 0 ]] && [[ "$MODEL_INFO" == *"model"* ]]; then + if [[ -n "$MODEL_INFO" ]] && jq -e '.model' <<< "$MODEL_INFO" > /dev/null 2>&1; then echo "✅ Model info test passed" + echo "Model: $(jq -r '.model' <<< "$MODEL_INFO")" else - echo "❌ Model info test failed: $MODEL_INFO" + echo "❌ Model info test failed - empty response, invalid JSON, or missing 'model' key" + echo "Raw response: $MODEL_INFO" + exit 1 fi echo "✅ Smoke tests completed for ${{ matrix.project }}" diff --git a/go-genai/docker-compose.yml b/go-genai/docker-compose.yml index c4c95e6..07fa910 100644 --- a/go-genai/docker-compose.yml +++ b/go-genai/docker-compose.yml @@ -19,4 +19,4 @@ services: models: llama: model: ai/llama3.2:1B-Q8_0 - context_size: 2048 \ No newline at end of file + context_size: 2048 diff --git a/node-genai/docker-compose.yml b/node-genai/docker-compose.yml index e35c8c4..6facad6 100644 --- a/node-genai/docker-compose.yml +++ b/node-genai/docker-compose.yml @@ -19,4 +19,4 @@ services: models: llama: model: ai/llama3.2:1B-Q8_0 - context_size: 2048 \ No newline at end of file + context_size: 2048 diff --git a/py-genai/docker-compose.yml b/py-genai/docker-compose.yml index d428278..70e5a66 100644 --- a/py-genai/docker-compose.yml +++ b/py-genai/docker-compose.yml @@ -20,4 +20,4 @@ services: models: llama: model: ai/llama3.2:1B-Q8_0 - context_size: 2048 \ No newline at end of file + context_size: 2048 diff --git a/rust-genai/src/handlers.rs b/rust-genai/src/handlers.rs index 1ef7840..facdd3a 100644 --- a/rust-genai/src/handlers.rs +++ b/rust-genai/src/handlers.rs @@ -63,6 +63,11 @@ pub struct ChatResponse { pub response: String, } +#[derive(Serialize)] +pub struct ModelInfoResponse { + pub model: String, +} + #[post("/api/chat")] pub async fn chat_api( req: HttpRequest, @@ -79,6 +84,12 @@ pub async fn chat_api( if message.len() > 4000 { return HttpResponse::BadRequest().json(serde_json::json!({"error": "Message too long (max 4000 chars)"})); } + // Special command for getting model info + if message == "!modelinfo" { + return HttpResponse::Ok().json(ModelInfoResponse { + model: config.llm_model_name.clone() + }); + } if let Some(resp) = cache.get(message) { return HttpResponse::Ok().json(ChatResponse { response: resp }); }