Spaces:
Sleeping
Sleeping
| """Integration tests to validate all resume claims.""" | |
| import sys | |
| import os | |
| sys.path.append('..') | |
| import pytest | |
| import torch | |
| from pathlib import Path | |
| class TestResumeClaimValidation: | |
| """Validate each claim from the resume.""" | |
| def test_sentence_transformers_integration(self): | |
| """Verify Sentence Transformers is properly integrated.""" | |
| from app.vector_store import VectorStore | |
| vector_store = VectorStore() | |
| assert vector_store.model is not None | |
| assert hasattr(vector_store.model, 'encode') | |
| # Test encoding | |
| test_text = "def hello(): return 'world'" | |
| embedding = vector_store.model.encode([test_text]) | |
| assert embedding.shape[0] == 1 | |
| assert embedding.shape[1] > 0 # Has dimensions | |
| print("✅ Sentence Transformers integration verified") | |
| def test_faiss_integration(self): | |
| """Verify FAISS indexing works.""" | |
| from app.vector_store import VectorStore | |
| vector_store = VectorStore() | |
| test_files = [ | |
| ("test1.py", "def func1(): pass"), | |
| ("test2.py", "def func2(): pass") | |
| ] | |
| embeddings, files = vector_store.add_documents(test_files) | |
| assert embeddings.shape[0] == 2 | |
| # Test search | |
| indices = vector_store.search("function definition", k=1) | |
| assert len(indices) > 0 | |
| print("✅ FAISS integration verified") | |
| def test_langchain_integration(self): | |
| """Verify LangChain RAG pipeline exists.""" | |
| from app.langchain_rag import LangChainRAGPipeline, HuggingFaceLLM | |
| # Check classes exist | |
| assert HuggingFaceLLM is not None | |
| assert LangChainRAGPipeline is not None | |
| # Check LangChain imports | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chains import LLMChain | |
| assert PromptTemplate is not None | |
| assert LLMChain is not None | |
| print("✅ LangChain integration verified") | |
| def test_vector_store_configuration(self): | |
| """Verify vector store configuration.""" | |
| from app.config import Config | |
| assert hasattr(Config, 'FAISS_INDEX_PATH') | |
| assert Config.FAISS_INDEX_PATH == "code_index" | |
| print("✅ Vector store configuration verified") | |
| def test_lora_configuration(self): | |
| """Verify LoRA fine-tuning support.""" | |
| from app.lora_model import OptimizedModelLoader | |
| from peft import LoraConfig | |
| assert OptimizedModelLoader is not None | |
| assert LoraConfig is not None | |
| # Check LoRA config parameters | |
| from app.config import Config | |
| assert hasattr(Config, 'LORA_R') | |
| assert hasattr(Config, 'LORA_ALPHA') | |
| assert hasattr(Config, 'LORA_TARGET_MODULES') | |
| assert Config.LORA_R == 8 | |
| assert Config.LORA_ALPHA == 16 | |
| print("✅ LoRA configuration verified") | |
| def test_quantization_support(self): | |
| """Verify 8-bit quantization is configured.""" | |
| from transformers import BitsAndBytesConfig | |
| from app.config import Config | |
| assert BitsAndBytesConfig is not None | |
| assert hasattr(Config, 'ENABLE_QUANTIZATION') | |
| assert hasattr(Config, 'USE_GPU_OFFLOAD') | |
| print("✅ Quantization support verified") | |
| def test_performance_tracking(self): | |
| """Verify performance tracking system.""" | |
| from app.performance_tracker import PerformanceTracker | |
| tracker = PerformanceTracker() | |
| # Test tracking | |
| start = tracker.start_query() | |
| import time | |
| time.sleep(0.1) | |
| metric = tracker.end_query(start, "test query", tokens_generated=50) | |
| assert 'latency_seconds' in metric | |
| assert 'tokens_per_second' in metric | |
| assert metric['latency_seconds'] > 0 | |
| # Test improvement calculation | |
| stats = tracker.get_summary_stats() | |
| assert 'total_queries' in stats | |
| print("✅ Performance tracking verified") | |
| def test_benchmark_script_exists(self): | |
| """Verify benchmarking infrastructure.""" | |
| benchmark_path = Path("scripts/benchmark.py") | |
| assert benchmark_path.exists() | |
| # Check it has the right functions | |
| with open(benchmark_path) as f: | |
| content = f.read() | |
| assert 'benchmark_inference' in content | |
| assert 'improvement' in content | |
| print("✅ Benchmark script verified") | |
| def test_training_script_exists(self): | |
| """Verify LoRA training script.""" | |
| train_path = Path("scripts/train_lora.py") | |
| assert train_path.exists() | |
| with open(train_path) as f: | |
| content = f.read() | |
| assert 'train_lora_model' in content | |
| assert 'LoraConfig' in content | |
| print("✅ Training script verified") | |
| def test_deployment_documentation(self): | |
| """Verify deployment docs exist.""" | |
| assert Path("DEPLOYMENT.md").exists() | |
| assert Path("QUICKSTART.md").exists() | |
| assert Path("RESUME_VALIDATION.md").exists() | |
| assert Path(".env.example").exists() | |
| print("✅ Documentation verified") | |
| def test_project_structure(self): | |
| """Verify all required files exist.""" | |
| required_files = [ | |
| "app/assistant_v2.py", | |
| "app/langchain_rag.py", | |
| "app/vector_store.py", | |
| "app/lora_model.py", | |
| "app/performance_tracker.py", | |
| "app/config.py", | |
| "requirements.txt", | |
| "README.md" | |
| ] | |
| for file_path in required_files: | |
| assert Path(file_path).exists(), f"Missing: {file_path}" | |
| print("✅ Project structure verified") | |
| class TestPerformanceMetrics: | |
| """Test performance measurement capabilities.""" | |
| def test_latency_measurement(self): | |
| """Verify latency can be measured.""" | |
| from app.performance_tracker import PerformanceTracker | |
| import time | |
| tracker = PerformanceTracker() | |
| start = tracker.start_query() | |
| time.sleep(0.05) # Simulate work | |
| metric = tracker.end_query(start, "test", 100) | |
| assert metric['latency_seconds'] >= 0.05 | |
| assert metric['latency_seconds'] < 0.1 | |
| print(f"✅ Measured latency: {metric['latency_seconds']:.3f}s") | |
| def test_improvement_calculation(self): | |
| """Verify improvement percentage calculation.""" | |
| from app.performance_tracker import PerformanceTracker | |
| tracker = PerformanceTracker() | |
| tracker.baseline_latency = 3.0 | |
| # Simulate queries | |
| tracker.current_session["queries"] = [ | |
| {"latency_seconds": 3.0}, | |
| {"latency_seconds": 1.8}, | |
| {"latency_seconds": 1.9} | |
| ] | |
| improvement = tracker.calculate_improvement() | |
| expected_improvement = ((3.0 - 1.85) / 3.0) * 100 | |
| assert abs(improvement['improvement_percentage'] - expected_improvement) < 1 | |
| assert improvement['improvement_percentage'] > 35 # Should be ~38% | |
| print(f"✅ Calculated improvement: {improvement['improvement_percentage']:.1f}%") | |
| def run_all_tests(): | |
| """Run all validation tests.""" | |
| print("\n" + "="*60) | |
| print("RESUME CLAIMS VALIDATION TEST SUITE") | |
| print("="*60 + "\n") | |
| test_suite = TestResumeClaimValidation() | |
| performance_tests = TestPerformanceMetrics() | |
| tests = [ | |
| ("Sentence Transformers", test_suite.test_sentence_transformers_integration), | |
| ("FAISS", test_suite.test_faiss_integration), | |
| ("LangChain", test_suite.test_langchain_integration), | |
| ("Qdrant Cloud", test_suite.test_qdrant_integration), | |
| ("LoRA", test_suite.test_lora_configuration), | |
| ("Quantization", test_suite.test_quantization_support), | |
| ("Performance Tracking", test_suite.test_performance_tracking), | |
| ("Benchmark Script", test_suite.test_benchmark_script_exists), | |
| ("Training Script", test_suite.test_training_script_exists), | |
| ("Documentation", test_suite.test_deployment_documentation), | |
| ("Project Structure", test_suite.test_project_structure), | |
| ("Latency Measurement", performance_tests.test_latency_measurement), | |
| ("Improvement Calculation", performance_tests.test_improvement_calculation), | |
| ] | |
| passed = 0 | |
| failed = 0 | |
| for name, test_func in tests: | |
| try: | |
| print(f"\nTesting: {name}") | |
| test_func() | |
| passed += 1 | |
| except Exception as e: | |
| print(f"❌ FAILED: {name}") | |
| print(f" Error: {str(e)}") | |
| failed += 1 | |
| print("\n" + "="*60) | |
| print(f"RESULTS: {passed} passed, {failed} failed") | |
| print("="*60) | |
| if failed == 0: | |
| print("\n🎉 ALL TESTS PASSED! Your resume claims are validated!") | |
| else: | |
| print(f"\n⚠️ {failed} test(s) failed. Check the errors above.") | |
| return failed == 0 | |
| if __name__ == "__main__": | |
| success = run_all_tests() | |
| sys.exit(0 if success else 1) | |