# app.py from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import gradio as gr model_name = "mimoha/arat5-qg-finetuned" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) def generate_question(text): inputs = tokenizer.encode(text, return_tensors="pt", max_length=512, truncation=True) outputs = model.generate(inputs, max_length=128, num_beams=4, early_stopping=True) return tokenizer.decode(outputs[0], skip_special_tokens=True) iface = gr.Interface(fn=generate_question, inputs=gr.Textbox(label="أدخل فقرة النص"), outputs=gr.Textbox(label="السؤال الناتج")) iface.launch(share=True, show_error=True)