oddadmix's picture
Update app.py
1d2d2ac verified
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as grad
model_id = "oddadmix/arabic-summarization"
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype="bfloat16",
# attn_implementation="flash_attention_2"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
@spaces.GPU
def summarize(text):
prompt = "Summarize the following text: \n\n " + text
input_ids = tokenizer.apply_chat_template(
[{"role": "user", "content": prompt}],
add_generation_prompt=True,
return_tensors="pt",
tokenize=True,
).to(model.device)
output = model.generate(
input_ids,
do_sample=True,
temperature=0.3,
min_p=0.15,
repetition_penalty=1.05,
max_new_tokens=512,
)
response = tokenizer.decode(output[0], skip_special_tokens=False)
return response.split("<|im_start|>assistant")[1]
input_textbox = grad.Textbox(lines=5, placeholder="إكتب النص الذي تريد تلخيصه", label="Input Text")
output_textbox = grad.Textbox(lines=5, label="النص الملخص")
grad.Interface(summarize, inputs=[input_textbox], outputs=output_textbox, title="تلخيص النصوص",
description="تلخيص النصوص.").launch()