# Load model directly
from transformers import AutoProcessor, AutoModelForImageTextToText
processor = AutoProcessor.from_pretrained("umm-maybe/image-generator-identifier")
model = AutoModelForImageTextToText.from_pretrained("umm-maybe/image-generator-identifier")Quick Links
No model card
- Downloads last month
- 12
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐ Ask for provider support
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("image-text-to-text", model="umm-maybe/image-generator-identifier")