flant5 / app.py
mortezahaydari's picture
Update app.py
c958ed5 verified
raw
history blame contribute delete
915 Bytes
import gradio as gr
import torch
from transformers import pipeline
# Detect if a GPU is available
if torch.cuda.is_available():
device = "cuda"
print("has cuda.")
else:
device = "cpu"
print("no cuda.")
# Print device info (this will be logged in Hugging Face Spaces container)
if torch.cuda.is_available():
gpu_info = [torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())]
print(f"🚀 Running on GPU: {gpu_info}")
else:
print("⚠️ No GPU found. Running on CPU.")
# Load model on GPU or CPU
model = pipeline("text2text-generation", model="google/long-t5-local-base", device=0 if device == "cuda" else -1)
# Define text generation function
def generate(prompt):
return model(prompt, max_length=16300)[0]['generated_text']
# Gradio interface
iface = gr.Interface(fn=generate, inputs="text", outputs="text")
# Launch Gradio & log device info
iface.launch()