from transformers import pipeline import torch # Try multiple fallback options model_options = [ "microsoft/DialoGPT-medium", "microsoft/DialoGPT-small", "gpt2", "distilgpt2" ] pipe = None model_id = None for model in model_options: try: print(f"🔄 Trying to load model: {model}") pipe = pipeline( "text-generation", model=model, torch_dtype="auto", device_map="auto", ) model_id = model print(f"✅ Model '{model_id}' loaded successfully!") break except Exception as e: print(f"❌ Failed to load {model}: {e}") continue if pipe is None: print("❌ Could not load any model. Please check your internet connection.") # Create a dummy pipeline for testing class DummyPipe: def __call__(self, *args, **kwargs): return [{"generated_text": "I'm sorry, but I cannot connect to download the AI model. Please check your internet connection and try again."}] @property def tokenizer(self): class DummyTokenizer: eos_token_id = None return DummyTokenizer() pipe = DummyPipe() model_id = "dummy"