ai-engineering-project / scripts /hf_test_runner.sh
GitHub Action
Clean deployment without binary files
f884e6e
#!/bin/bash
# HuggingFace CI/CD Test Runner
# This script runs comprehensive tests for the hybrid architecture
set -e # Exit on any error
echo "πŸš€ Starting HuggingFace CI/CD Test Suite"
echo "========================================"
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Test counters
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
run_test_suite() {
local test_name="$1"
local test_command="$2"
local is_critical="${3:-true}"
echo -e "\n${YELLOW}Running: $test_name${NC}"
echo "Command: $test_command"
echo "----------------------------------------"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
if eval "$test_command"; then
echo -e "${GREEN}βœ… PASSED: $test_name${NC}"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}❌ FAILED: $test_name${NC}"
FAILED_TESTS=$((FAILED_TESTS + 1))
if [ "$is_critical" = "true" ]; then
echo -e "${RED}Critical test failed. Stopping execution.${NC}"
exit 1
else
echo -e "${YELLOW}Non-critical test failed. Continuing...${NC}"
return 1
fi
fi
}
# Set up environment
export PYTHONPATH="${PYTHONPATH:-}:$(pwd)"
export HF_TOKEN="${HF_TOKEN:-mock-token-for-testing}"
export OPENROUTER_API_KEY="${OPENROUTER_API_KEY:-mock-key-for-testing}"
echo "Environment configured:"
echo " PYTHONPATH: $PYTHONPATH"
echo " HF_TOKEN: ${HF_TOKEN:0:10}..."
echo " OPENROUTER_API_KEY: ${OPENROUTER_API_KEY:0:10}..."
# 1. Linting and formatting tests
echo -e "\nπŸ” Code Quality Checks"
echo "===================="
run_test_suite "Black Code Formatting" "black --check ." true
run_test_suite "Import Sorting (isort)" "isort --check-only ." true
run_test_suite "Flake8 Linting" "flake8 --max-line-length=88 --exclude venv,dev-tools" true
# 2. Unit tests
echo -e "\nπŸ§ͺ Unit Tests"
echo "=============="
run_test_suite "Core Unit Tests" "pytest tests/ -m 'unit or not integration' -v" true
run_test_suite "HF Embedding Service Tests" "pytest tests/test_embedding/test_hf_embedding_service.py -v" true
run_test_suite "LLM Component Tests" "pytest tests/test_llm/ -v" true
run_test_suite "Citation Validation Tests" "pytest -k citation -v" true
# 3. Integration tests (non-critical in CI)
echo -e "\nπŸ”— Integration Tests"
echo "==================="
run_test_suite "HF Service Integration" "pytest tests/ -m integration -v" false
run_test_suite "End-to-End Pipeline Test" "python scripts/test_e2e_pipeline.py" false
# 4. Coverage report
echo -e "\nπŸ“Š Coverage Analysis"
echo "==================="
run_test_suite "Generate Coverage Report" "pytest --cov=src --cov-report=xml --cov-report=term-missing tests/" false
# 5. HuggingFace-specific tests
echo -e "\nπŸ€— HuggingFace Specific Tests"
echo "============================="
run_test_suite "HF Configuration Validation" "python -c 'import yaml; yaml.safe_load(open(\".hf.yml\"))'" true
run_test_suite "HF Dependencies Check" "python -c 'import gradio; import requests; print(\"HF deps OK\")'" true
# 6. Architecture validation
echo -e "\nπŸ—οΈ Architecture Validation"
echo "==========================="
run_test_suite "Import All Modules" "python -c 'import sys; sys.path.append(\"src\"); from embedding.hf_embedding_service import HFEmbeddingService; from llm.prompt_templates import PromptTemplates; print(\"All imports successful\")'" true
run_test_suite "Service Initialization" "python scripts/validate_services.py" false
# Final summary
echo -e "\nπŸ“‹ Test Summary"
echo "==============="
echo -e "Total Tests: $TOTAL_TESTS"
echo -e "${GREEN}Passed: $PASSED_TESTS${NC}"
echo -e "${RED}Failed: $FAILED_TESTS${NC}"
if [ $FAILED_TESTS -eq 0 ]; then
echo -e "\n${GREEN}πŸŽ‰ All tests passed! Ready for HuggingFace deployment.${NC}"
exit 0
else
echo -e "\n${YELLOW}⚠️ Some tests failed. Check the output above.${NC}"
exit 1
fi