#!/bin/bash # Test script for CodeLlama inference cd /workspace/ftt/codellama-migration MODEL_PATH="training-outputs/codellama-fifo-v1" BASE_MODEL_PATH="models/base-models/CodeLlama-7B-Instruct" echo "======================================================================" echo "๐Ÿงช Testing CodeLlama Inference" echo "======================================================================" echo "Fine-tuned Model: $MODEL_PATH" echo "Base Model: $BASE_MODEL_PATH" echo "======================================================================" # Activate virtual environment if needed if [ -f /venv/main/bin/activate ]; then source /venv/main/bin/activate fi # Test prompt TEST_PROMPT="You are Elinnos RTL Code Generator v1.0, a specialized Verilog/SystemVerilog code generation agent. Your role: Generate clean, synthesizable RTL code for hardware design tasks. Output ONLY functional RTL code with no \$display, assertions, comments, or debug statements. Generate a synchronous FIFO with 8-bit data width, depth 4, write_enable, read_enable, full flag, empty flag." echo "" echo "๐Ÿ“ Test Prompt:" echo "$TEST_PROMPT" echo "" echo "======================================================================" echo "๐Ÿš€ Running Inference..." echo "======================================================================" echo "" # Run inference python3 scripts/inference/inference_codellama.py \ --mode local \ --model-path "$MODEL_PATH" \ --base-model-path "$BASE_MODEL_PATH" \ --prompt "$TEST_PROMPT" \ --max-new-tokens 800 \ --temperature 0.3 echo "" echo "======================================================================" echo "โœ… Inference Complete!" echo "======================================================================"