| from transformers import pipeline | |
| import torch | |
| # Try multiple fallback options | |
| model_options = [ | |
| "microsoft/DialoGPT-medium", | |
| "microsoft/DialoGPT-small", | |
| "gpt2", | |
| "distilgpt2" | |
| ] | |
| pipe = None | |
| model_id = None | |
| for model in model_options: | |
| try: | |
| print(f"π Trying to load model: {model}") | |
| pipe = pipeline( | |
| "text-generation", | |
| model=model, | |
| torch_dtype="auto", | |
| device_map="auto", | |
| ) | |
| model_id = model | |
| print(f"β Model '{model_id}' loaded successfully!") | |
| break | |
| except Exception as e: | |
| print(f"β Failed to load {model}: {e}") | |
| continue | |
| if pipe is None: | |
| print("β Could not load any model. Please check your internet connection.") | |
| # Create a dummy pipeline for testing | |
| class DummyPipe: | |
| def __call__(self, *args, **kwargs): | |
| return [{"generated_text": "I'm sorry, but I cannot connect to download the AI model. Please check your internet connection and try again."}] | |
| def tokenizer(self): | |
| class DummyTokenizer: | |
| eos_token_id = None | |
| return DummyTokenizer() | |
| pipe = DummyPipe() | |
| model_id = "dummy" | |