#!/bin/bash # /data/adaptai/projects/elizabeth/blueprint/scripts/verify_forge.sh # Robust script to verify the operational status of the Nova GPU forge. echo "šŸ” [QUARTZ] Starting MLOps Verification Protocol" echo "===================================================" # Configuration # Assuming local port forwarding is set up from your machine to the Vast.ai instance. # Forward 20037 -> 8000 (for 7B model) # Forward 20038 -> 8001 (for 14B model) ENDPOINTS=( "http://localhost:20037" "http://localhost:20038" ) MODELS=( "Qwen/Qwen2-7B-Instruct" "Qwen/Qwen2-14B-Instruct" ) ALL_CHECKS_PASSED=true # 1. Verify Model Endpoints echo "1. 🧠 Verifying Nova Model Endpoints..." for i in ${!ENDPOINTS[@]}; do endpoint=${ENDPOINTS[$i]} model_name=${MODELS[$i]} echo " Pinging ${model_name} at ${endpoint}..." # Check if the vLLM server is alive response_code=$(curl -s -o /dev/null -w "%{{http_code}}" -m 5 "${endpoint}/v1/models") if [ "$response_code" = "200" ]; then echo " āœ… Port $(echo $endpoint | cut -d: -f3): vLLM Server ONLINE" else echo " āŒ Port $(echo $endpoint | cut -d: -f3): vLLM Server OFFLINE or ERROR (HTTP $response_code)" ALL_CHECKS_PASSED=false continue fi # 2. Perform a warm-up inference to check model loading and performance echo " šŸš€ Performing warm-up inference for ${model_name}..." TEST_PROMPT="Hello, Nova. This is a connectivity and performance verification. Please respond with 'OPERATIONAL' and nothing else." START_TIME=$(date +%s%N) RESPONSE=$(curl -s -X POST "${endpoint}/v1/chat/completions" -H "Content-Type: application/json" -d @- <