| """
|
| Test script to verify the enhanced fallback configuration is loaded correctly.
|
| """
|
|
|
| from image_summarizer import ModelConfig
|
|
|
| def test_config():
|
| """Test that all models are loaded from config.env"""
|
| print("="*70)
|
| print("๐งช TESTING ENHANCED FALLBACK CONFIGURATION")
|
| print("="*70)
|
|
|
| config = ModelConfig()
|
|
|
|
|
| nvidia_models = config.get('nvidia_models', [])
|
| nvidia_vision = config.get('nvidia_vision_models', [])
|
| nvidia_text = config.get('nvidia_text_models', [])
|
| gemini_models = config.get('gemini_models', [])
|
|
|
| print(f"\n๐ Configuration Summary:")
|
| print(f" Total NVIDIA Models: {len(nvidia_models)}")
|
| print(f" - Vision Models: {len(nvidia_vision)}")
|
| print(f" - Text Models: {len(nvidia_text)}")
|
| print(f" Total Gemini Models: {len(gemini_models)}")
|
| print(f" GRAND TOTAL: {len(nvidia_models) + len(gemini_models)} models")
|
|
|
| print(f"\n๐ผ๏ธ NVIDIA Vision Models ({len(nvidia_vision)}):")
|
| for i, model in enumerate(nvidia_vision, 1):
|
| print(f" {i}. {model}")
|
|
|
| print(f"\n๐ NVIDIA Text Models ({len(nvidia_text)}):")
|
| for i, model in enumerate(nvidia_text, 1):
|
| print(f" {i}. {model}")
|
|
|
| print(f"\n๐ท Gemini Models ({len(gemini_models)}):")
|
| for i, model in enumerate(gemini_models, 1):
|
| print(f" {i}. {model}")
|
|
|
| print(f"\nโ๏ธ Other Settings:")
|
| print(f" Max Tokens: {config.get('max_tokens')}")
|
| print(f" Temperature: {config.get('temperature')}")
|
| print(f" Timeout: {config.get('request_timeout')}s")
|
| print(f" Streaming: {config.get('enable_streaming')}")
|
| print(f" Verbose Logging: {config.get('verbose_logging')}")
|
| print(f" Gemini Fallback: {config.get('enable_gemini_fallback')}")
|
|
|
|
|
| print(f"\nโ
VERIFICATION:")
|
| expected_nvidia = 8
|
| expected_gemini = 7
|
| expected_total = 15
|
|
|
| if len(nvidia_models) == expected_nvidia:
|
| print(f" โ
NVIDIA models: {len(nvidia_models)}/{expected_nvidia} โ")
|
| else:
|
| print(f" โ ๏ธ NVIDIA models: {len(nvidia_models)}/{expected_nvidia} (mismatch!)")
|
|
|
| if len(gemini_models) == expected_gemini:
|
| print(f" โ
Gemini models: {len(gemini_models)}/{expected_gemini} โ")
|
| else:
|
| print(f" โ ๏ธ Gemini models: {len(gemini_models)}/{expected_gemini} (mismatch!)")
|
|
|
| total = len(nvidia_models) + len(gemini_models)
|
| if total == expected_total:
|
| print(f" โ
Total models: {total}/{expected_total} โ")
|
| else:
|
| print(f" โ ๏ธ Total models: {total}/{expected_total} (mismatch!)")
|
|
|
| print(f"\n{'='*70}")
|
| if total == expected_total:
|
| print("๐ SUCCESS! All 15 models loaded correctly!")
|
| else:
|
| print("โ ๏ธ WARNING: Model count doesn't match expected configuration")
|
| print("="*70)
|
|
|
| if __name__ == "__main__":
|
| test_config()
|
|
|