|
|
#!/bin/bash |
|
|
|
|
|
|
|
|
cd /workspace/ftt/codellama-migration |
|
|
|
|
|
MODEL_PATH="training-outputs/codellama-fifo-v1" |
|
|
BASE_MODEL_PATH="models/base-models/CodeLlama-7B-Instruct" |
|
|
|
|
|
echo "======================================================================" |
|
|
echo "π§ͺ Testing CodeLlama Inference" |
|
|
echo "======================================================================" |
|
|
echo "Fine-tuned Model: $MODEL_PATH" |
|
|
echo "Base Model: $BASE_MODEL_PATH" |
|
|
echo "======================================================================" |
|
|
|
|
|
|
|
|
if [ -f /venv/main/bin/activate ]; then |
|
|
source /venv/main/bin/activate |
|
|
fi |
|
|
|
|
|
|
|
|
TEST_PROMPT="You are Elinnos RTL Code Generator v1.0, a specialized Verilog/SystemVerilog code generation agent. Your role: Generate clean, synthesizable RTL code for hardware design tasks. Output ONLY functional RTL code with no \$display, assertions, comments, or debug statements. |
|
|
|
|
|
Generate a synchronous FIFO with 8-bit data width, depth 4, write_enable, read_enable, full flag, empty flag." |
|
|
|
|
|
echo "" |
|
|
echo "π Test Prompt:" |
|
|
echo "$TEST_PROMPT" |
|
|
echo "" |
|
|
echo "======================================================================" |
|
|
echo "π Running Inference..." |
|
|
echo "======================================================================" |
|
|
echo "" |
|
|
|
|
|
|
|
|
python3 scripts/inference/inference_codellama.py \ |
|
|
--mode local \ |
|
|
--model-path "$MODEL_PATH" \ |
|
|
--base-model-path "$BASE_MODEL_PATH" \ |
|
|
--prompt "$TEST_PROMPT" \ |
|
|
--max-new-tokens 800 \ |
|
|
--temperature 0.3 |
|
|
|
|
|
echo "" |
|
|
echo "======================================================================" |
|
|
echo "β
Inference Complete!" |
|
|
echo "======================================================================" |
|
|
|
|
|
|