File size: 2,585 Bytes
fbf3c28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
#!/bin/bash
# /data/adaptai/projects/elizabeth/blueprint/scripts/verify_forge.sh
# Robust script to verify the operational status of the Nova GPU forge.

echo "πŸ” [QUARTZ] Starting MLOps Verification Protocol"
echo "==================================================="

# Configuration
# Assuming local port forwarding is set up from your machine to the Vast.ai instance.
# Forward 20037 -> 8000 (for 7B model)
# Forward 20038 -> 8001 (for 14B model)
ENDPOINTS=( "http://localhost:20037" "http://localhost:20038" )
MODELS=( "Qwen/Qwen2-7B-Instruct" "Qwen/Qwen2-14B-Instruct" )
ALL_CHECKS_PASSED=true

# 1. Verify Model Endpoints
echo "1. 🧠 Verifying Nova Model Endpoints..."
for i in ${!ENDPOINTS[@]};
  do
    endpoint=${ENDPOINTS[$i]}
    model_name=${MODELS[$i]}
    echo "   Pinging ${model_name} at ${endpoint}..."
    
    # Check if the vLLM server is alive
    response_code=$(curl -s -o /dev/null -w "%{{http_code}}" -m 5 "${endpoint}/v1/models")
    
    if [ "$response_code" = "200" ]; then
        echo "   βœ… Port $(echo $endpoint | cut -d: -f3): vLLM Server ONLINE"
    else
        echo "   ❌ Port $(echo $endpoint | cut -d: -f3): vLLM Server OFFLINE or ERROR (HTTP $response_code)"
        ALL_CHECKS_PASSED=false
        continue
    fi

    # 2. Perform a warm-up inference to check model loading and performance
    echo "   πŸš€ Performing warm-up inference for ${model_name}..."
    TEST_PROMPT="Hello, Nova. This is a connectivity and performance verification. Please respond with 'OPERATIONAL' and nothing else."
    
    START_TIME=$(date +%s%N)
    RESPONSE=$(curl -s -X POST "${endpoint}/v1/chat/completions" 
        -H "Content-Type: application/json" 
        -d @- <<EOF
{
    "model": "${model_name}",
    "messages": [{"role": "user", "content": "$TEST_PROMPT"}],
    "max_tokens": 5,
    "temperature": 0
}
EOF
)
    END_TIME=$(date +%s%N)
    
    LATENCY_MS=$(($((END_TIME - START_TIME))/1000000))
    RESPONSE_TEXT=$(echo $RESPONSE | jq -r '.choices[0].message.content // "ERROR"')

    if [[ "$RESPONSE_TEXT" == "OPERATIONAL" ]]; then
        echo "   βœ… Model Response: OK ('OPERATIONAL')"
        echo "   βœ… Latency: ${LATENCY_MS} ms"
    else
        echo "   ❌ Model Response FAILED. Raw response:"
        echo "      ${RESPONSE}"
        ALL_CHECKS_PASSED=false
    fi
done

if [ "$ALL_CHECKS_PASSED" = true ]; then
    echo "\nβœ… [QUARTZ] All MLOps checks PASSED. Forge is OPERATIONAL."
else
    echo "\n❌ [QUARTZ] Some MLOps checks FAILED. Review logs above."
fi

echo "==================================================="