GPT / model.py
cosmoruler
Implement model loading with fallbacks and add dummy pipeline for error handling
76108a1
from transformers import pipeline
import torch
# Try multiple fallback options
model_options = [
"microsoft/DialoGPT-medium",
"microsoft/DialoGPT-small",
"gpt2",
"distilgpt2"
]
pipe = None
model_id = None
for model in model_options:
try:
print(f"πŸ”„ Trying to load model: {model}")
pipe = pipeline(
"text-generation",
model=model,
torch_dtype="auto",
device_map="auto",
)
model_id = model
print(f"βœ… Model '{model_id}' loaded successfully!")
break
except Exception as e:
print(f"❌ Failed to load {model}: {e}")
continue
if pipe is None:
print("❌ Could not load any model. Please check your internet connection.")
# Create a dummy pipeline for testing
class DummyPipe:
def __call__(self, *args, **kwargs):
return [{"generated_text": "I'm sorry, but I cannot connect to download the AI model. Please check your internet connection and try again."}]
@property
def tokenizer(self):
class DummyTokenizer:
eos_token_id = None
return DummyTokenizer()
pipe = DummyPipe()
model_id = "dummy"