Upload test_inference.sh with huggingface_hub
Browse files- test_inference.sh +48 -0
test_inference.sh
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# Test script for CodeLlama inference
|
| 3 |
+
|
| 4 |
+
cd /workspace/ftt/codellama-migration
|
| 5 |
+
|
| 6 |
+
MODEL_PATH="training-outputs/codellama-fifo-v1"
|
| 7 |
+
BASE_MODEL_PATH="models/base-models/CodeLlama-7B-Instruct"
|
| 8 |
+
|
| 9 |
+
echo "======================================================================"
|
| 10 |
+
echo "🧪 Testing CodeLlama Inference"
|
| 11 |
+
echo "======================================================================"
|
| 12 |
+
echo "Fine-tuned Model: $MODEL_PATH"
|
| 13 |
+
echo "Base Model: $BASE_MODEL_PATH"
|
| 14 |
+
echo "======================================================================"
|
| 15 |
+
|
| 16 |
+
# Activate virtual environment if needed
|
| 17 |
+
if [ -f /venv/main/bin/activate ]; then
|
| 18 |
+
source /venv/main/bin/activate
|
| 19 |
+
fi
|
| 20 |
+
|
| 21 |
+
# Test prompt
|
| 22 |
+
TEST_PROMPT="You are Elinnos RTL Code Generator v1.0, a specialized Verilog/SystemVerilog code generation agent. Your role: Generate clean, synthesizable RTL code for hardware design tasks. Output ONLY functional RTL code with no \$display, assertions, comments, or debug statements.
|
| 23 |
+
|
| 24 |
+
Generate a synchronous FIFO with 8-bit data width, depth 4, write_enable, read_enable, full flag, empty flag."
|
| 25 |
+
|
| 26 |
+
echo ""
|
| 27 |
+
echo "📝 Test Prompt:"
|
| 28 |
+
echo "$TEST_PROMPT"
|
| 29 |
+
echo ""
|
| 30 |
+
echo "======================================================================"
|
| 31 |
+
echo "🚀 Running Inference..."
|
| 32 |
+
echo "======================================================================"
|
| 33 |
+
echo ""
|
| 34 |
+
|
| 35 |
+
# Run inference
|
| 36 |
+
python3 scripts/inference/inference_codellama.py \
|
| 37 |
+
--mode local \
|
| 38 |
+
--model-path "$MODEL_PATH" \
|
| 39 |
+
--base-model-path "$BASE_MODEL_PATH" \
|
| 40 |
+
--prompt "$TEST_PROMPT" \
|
| 41 |
+
--max-new-tokens 800 \
|
| 42 |
+
--temperature 0.3
|
| 43 |
+
|
| 44 |
+
echo ""
|
| 45 |
+
echo "======================================================================"
|
| 46 |
+
echo "✅ Inference Complete!"
|
| 47 |
+
echo "======================================================================"
|
| 48 |
+
|