from transformers import pipeline import gradio as gr # Use a specific model and CPU model = pipeline( "summarization", model="facebook/bart-large-cnn", device=-1 # forces CPU ) def predict(prompt): summary = model(prompt)[0]["summary_text"] return summary # Simple Gradio interface demo = gr.Interface( fn=predict, inputs=gr.Textbox(placeholder="Enter text to summarize", lines=4), outputs="text", title="MLOps Pipeline Summarizer", description="Enter text and get a summarized version using Hugging Face Transformers" ) demo.launch()