|
| 1 | +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 2 | +# SPDX-License-Identifier: Apache-2.0 |
| 3 | +apiVersion: batch/v1 |
| 4 | +kind: Job |
| 5 | +metadata: |
| 6 | + name: qwen3-235b-a22b-bench |
| 7 | +spec: |
| 8 | + backoffLimit: 1 |
| 9 | + completions: 1 |
| 10 | + parallelism: 1 |
| 11 | + template: |
| 12 | + metadata: |
| 13 | + labels: |
| 14 | + app: qwen3-235b-a22b-bench |
| 15 | + spec: |
| 16 | + affinity: |
| 17 | + podAntiAffinity: |
| 18 | + requiredDuringSchedulingIgnoredDuringExecution: |
| 19 | + - labelSelector: |
| 20 | + matchExpressions: |
| 21 | + - key: nvidia.com/dynamo-graph-deployment-name |
| 22 | + operator: In |
| 23 | + values: |
| 24 | + - qwen3-235b-a22b-agg |
| 25 | + topologyKey: kubernetes.io/hostname |
| 26 | + containers: |
| 27 | + - command: |
| 28 | + - /bin/sh |
| 29 | + - -c |
| 30 | + - | |
| 31 | + apt-get update && apt-get install -y curl jq procps git && apt-get clean |
| 32 | + pip install git+https://github.com/ai-dynamo/aiperf.git@70af59489df24a601dba57604a7341966150b366; |
| 33 | + echo "aiperf installation completed"; |
| 34 | + sysctl -w net.ipv4.ip_local_port_range="1024 65000" |
| 35 | + cat /proc/sys/net/ipv4/ip_local_port_range |
| 36 | + export COLUMNS=200 |
| 37 | + EPOCH=$(date +%s) |
| 38 | + ## utility functions -- can be moved to a bash script / configmap |
| 39 | + wait_for_model_ready() { |
| 40 | + echo "Waiting for model '$TARGET_MODEL' at $ENDPOINT/v1/models (checking every 5s)..." |
| 41 | + while ! curl -s "http://$ENDPOINT/v1/models" | jq -e --arg model "$TARGET_MODEL" '.data[]? | select(.id == $model)' >/dev/null 2>&1; do |
| 42 | + echo "[$(date '+%H:%M:%S')] Model not ready yet, sleeping 5s before checking again http://$ENDPOINT/v1/models" |
| 43 | + sleep 5 |
| 44 | + done |
| 45 | + echo "✅ Model '$TARGET_MODEL' is now available!" |
| 46 | + echo "Model '$TARGET_MODEL' is now available!" |
| 47 | + curl -s "http://$ENDPOINT/v1/models" | jq . |
| 48 | + } |
| 49 | + run_perf() { |
| 50 | + local concurrency=$1 |
| 51 | + local isl=$2 |
| 52 | + local osl=$3 |
| 53 | + key=concurrency_${concurrency} |
| 54 | + export ARTIFACT_DIR="${ROOT_ARTIFACT_DIR}/${EPOCH}_${JOB_NAME}/${key}" |
| 55 | + mkdir -p "$ARTIFACT_DIR" |
| 56 | + echo "ARTIFACT_DIR: $ARTIFACT_DIR" |
| 57 | + aiperf profile --artifact-dir $ARTIFACT_DIR \ |
| 58 | + --model $TARGET_MODEL \ |
| 59 | + --tokenizer $TARGET_MODEL \ |
| 60 | + --endpoint-type chat \ |
| 61 | + --endpoint /v1/chat/completions \ |
| 62 | + --streaming \ |
| 63 | + --url http://$ENDPOINT \ |
| 64 | + --synthetic-input-tokens-mean $isl \ |
| 65 | + --synthetic-input-tokens-stddev 0 \ |
| 66 | + --output-tokens-mean $osl \ |
| 67 | + --output-tokens-stddev 0 \ |
| 68 | + --extra-inputs "max_tokens:$osl" \ |
| 69 | + --extra-inputs "min_tokens:$osl" \ |
| 70 | + --extra-inputs "ignore_eos:true" \ |
| 71 | + --extra-inputs "{\"nvext\":{\"ignore_eos\":true}}" \ |
| 72 | + --extra-inputs "repetition_penalty:1.0" \ |
| 73 | + --extra-inputs "temperature: 0.0" \ |
| 74 | + --concurrency $concurrency \ |
| 75 | + --request-count $((10*concurrency)) \ |
| 76 | + --warmup-request-count $concurrency \ |
| 77 | + --conversation-num 12800 \ |
| 78 | + --random-seed 100 \ |
| 79 | + --workers-max 252 \ |
| 80 | + -H 'Authorization: Bearer NOT USED' \ |
| 81 | + -H 'Accept: text/event-stream'\ |
| 82 | + --record-processors 32 \ |
| 83 | + --ui simple |
| 84 | + echo "ARTIFACT_DIR: $ARTIFACT_DIR" |
| 85 | + ls -la $ARTIFACT_DIR |
| 86 | + } |
| 87 | + #### Actual execution #### |
| 88 | + wait_for_model_ready |
| 89 | + mkdir -p "${ROOT_ARTIFACT_DIR}/${EPOCH}_${JOB_NAME}" |
| 90 | + # Calculate total concurrency based on per-GPU concurrency and GPU count |
| 91 | + TOTAL_CONCURRENCY=$((CONCURRENCY_PER_GPU * DEPLOYMENT_GPU_COUNT)) |
| 92 | + echo "Calculated total concurrency: $TOTAL_CONCURRENCY (${CONCURRENCY_PER_GPU} per GPU × ${DEPLOYMENT_GPU_COUNT} GPUs)" |
| 93 | + # Write input_config.json |
| 94 | + cat > "${ROOT_ARTIFACT_DIR}/${EPOCH}_${JOB_NAME}/input_config.json" <<EOF |
| 95 | + { |
| 96 | + "gpu_count": $DEPLOYMENT_GPU_COUNT, |
| 97 | + "concurrency_per_gpu": $CONCURRENCY_PER_GPU, |
| 98 | + "total_concurrency": $TOTAL_CONCURRENCY, |
| 99 | + "mode": "$DEPLOYMENT_MODE", |
| 100 | + "isl": $ISL, |
| 101 | + "osl": $OSL, |
| 102 | + "endpoint": "$ENDPOINT", |
| 103 | + "model endpoint": "$TARGET_MODEL" |
| 104 | + } |
| 105 | + EOF |
| 106 | +
|
| 107 | + # Run perf with calculated total concurrency |
| 108 | + run_perf $TOTAL_CONCURRENCY $ISL $OSL |
| 109 | + echo "done with concurrency $TOTAL_CONCURRENCY" |
| 110 | + env: |
| 111 | + - name: TARGET_MODEL |
| 112 | + value: Qwen/Qwen3-235B-A22B-FP8 |
| 113 | + - name: ENDPOINT |
| 114 | + value: qwen3-235b-a22b-agg-frontend:8000 |
| 115 | + - name: CONCURRENCY_PER_GPU |
| 116 | + value: "2" |
| 117 | + - name: DEPLOYMENT_GPU_COUNT |
| 118 | + value: "2" |
| 119 | + - name: ISL |
| 120 | + value: "4000" |
| 121 | + - name: OSL |
| 122 | + value: "500" |
| 123 | + - name: DEPLOYMENT_MODE |
| 124 | + value: agg |
| 125 | + - name: AIPERF_HTTP_CONNECTION_LIMIT |
| 126 | + value: "200" |
| 127 | + - name: JOB_NAME |
| 128 | + valueFrom: |
| 129 | + fieldRef: |
| 130 | + apiVersion: v1 |
| 131 | + fieldPath: metadata.labels['job-name'] |
| 132 | + - name: ROOT_ARTIFACT_DIR |
| 133 | + value: /model-cache/perf |
| 134 | + - name: HF_HOME |
| 135 | + value: /model-cache |
| 136 | + - name: PYTHONUNBUFFERED |
| 137 | + value: "1" |
| 138 | + image: python:3.12-slim |
| 139 | + imagePullPolicy: IfNotPresent |
| 140 | + name: perf |
| 141 | + securityContext: |
| 142 | + privileged: true |
| 143 | + volumeMounts: |
| 144 | + - name: model-cache |
| 145 | + mountPath: /model-cache |
| 146 | + workingDir: /workspace |
| 147 | + imagePullSecrets: |
| 148 | + - name: nvcrimagepullsecret |
| 149 | + restartPolicy: Never |
| 150 | + volumes: |
| 151 | + - name: model-cache |
| 152 | + persistentVolumeClaim: |
| 153 | + claimName: model-cache |
0 commit comments