File size: 1,239 Bytes
76108a1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | from transformers import pipeline
import torch
# Try multiple fallback options
model_options = [
"microsoft/DialoGPT-medium",
"microsoft/DialoGPT-small",
"gpt2",
"distilgpt2"
]
pipe = None
model_id = None
for model in model_options:
try:
print(f"🔄 Trying to load model: {model}")
pipe = pipeline(
"text-generation",
model=model,
torch_dtype="auto",
device_map="auto",
)
model_id = model
print(f"✅ Model '{model_id}' loaded successfully!")
break
except Exception as e:
print(f"❌ Failed to load {model}: {e}")
continue
if pipe is None:
print("❌ Could not load any model. Please check your internet connection.")
# Create a dummy pipeline for testing
class DummyPipe:
def __call__(self, *args, **kwargs):
return [{"generated_text": "I'm sorry, but I cannot connect to download the AI model. Please check your internet connection and try again."}]
@property
def tokenizer(self):
class DummyTokenizer:
eos_token_id = None
return DummyTokenizer()
pipe = DummyPipe()
model_id = "dummy"
|