Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 14 additions & 9 deletions .github/workflows/smoke-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,10 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set up Docker Compose
uses: docker/[email protected]
with:
version: v2.40.3

- name: Install Docker Model Plugin
run: |
Expand Down Expand Up @@ -86,12 +88,13 @@ jobs:
-H "Content-Type: application/json" \
-d '{"message": "Hello"}' \
--max-time 30)

if [[ $? -eq 0 ]] && [[ "$RESPONSE" == *"response"* ]]; then
if [[ -n "$RESPONSE" ]] && jq -e '.response' <<< "$RESPONSE" > /dev/null 2>&1; then
echo "✅ Chat API test passed"
echo "Response: $(jq -r '.response' <<< "$RESPONSE" | head -c 200)..."
else
echo "❌ Chat API test failed: $RESPONSE"
# Don't fail the test for now as model interaction might be flaky
echo "❌ Chat API test failed - empty response, invalid JSON, or missing 'response' key"
echo "Raw response: $RESPONSE"
exit 1
fi

# Test model info endpoint
Expand All @@ -100,11 +103,13 @@ jobs:
-H "Content-Type: application/json" \
-d '{"message": "!modelinfo"}' \
--max-time 10)

if [[ $? -eq 0 ]] && [[ "$MODEL_INFO" == *"model"* ]]; then
if [[ -n "$MODEL_INFO" ]] && jq -e '.model' <<< "$MODEL_INFO" > /dev/null 2>&1; then
echo "✅ Model info test passed"
echo "Model: $(jq -r '.model' <<< "$MODEL_INFO")"
else
echo "❌ Model info test failed: $MODEL_INFO"
echo "❌ Model info test failed - empty response, invalid JSON, or missing 'model' key"
echo "Raw response: $MODEL_INFO"
exit 1
fi

echo "✅ Smoke tests completed for ${{ matrix.project }}"
Expand Down
2 changes: 1 addition & 1 deletion go-genai/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ services:
models:
llama:
model: ai/llama3.2:1B-Q8_0
context_size: 2048
context_size: 2048
2 changes: 1 addition & 1 deletion node-genai/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ services:
models:
llama:
model: ai/llama3.2:1B-Q8_0
context_size: 2048
context_size: 2048
2 changes: 1 addition & 1 deletion py-genai/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,4 @@ services:
models:
llama:
model: ai/llama3.2:1B-Q8_0
context_size: 2048
context_size: 2048
11 changes: 11 additions & 0 deletions rust-genai/src/handlers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,11 @@ pub struct ChatResponse {
pub response: String,
}

#[derive(Serialize)]
pub struct ModelInfoResponse {
pub model: String,
}

#[post("/api/chat")]
pub async fn chat_api(
req: HttpRequest,
Expand All @@ -79,6 +84,12 @@ pub async fn chat_api(
if message.len() > 4000 {
return HttpResponse::BadRequest().json(serde_json::json!({"error": "Message too long (max 4000 chars)"}));
}
// Special command for getting model info
if message == "!modelinfo" {
return HttpResponse::Ok().json(ModelInfoResponse {
model: config.llm_model_name.clone()
});
}
if let Some(resp) = cache.get(message) {
return HttpResponse::Ok().json(ChatResponse { response: resp });
}
Expand Down