import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline # Hugging Face model path MODEL_NAME = "umarfarzan/clipworthy-deberta-model" # Load tokenizer & model tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) # Create pipeline classifier = pipeline( "text-classification", model=model, tokenizer=tokenizer, device=-1 # CPU; set to 0 for GPU ) # Function to predict def predict_clipworthiness(text): if not text.strip(): return {"error": "No text provided"} result = classifier(text, truncation=True, max_length=256) return result # Gradio interface iface = gr.Interface( fn=predict_clipworthiness, inputs=gr.Textbox( label="Transcript Text", placeholder="Paste transcript here..." ), outputs=gr.JSON(label="Prediction"), title="Clipworthy Classifier", description="Paste transcript text and get classification results." ) if __name__ == "__main__": iface.launch()