| | import gradio as gr |
| | import torch |
| | from transformers import AutoTokenizer |
| | from modeling_multitask import MultiTaskModel |
| |
|
| | |
| | tokenizer = AutoTokenizer.from_pretrained("patrickott1/model-ticket-ti") |
| | model = MultiTaskModel.from_pretrained( |
| | "patrickott1/model-ticket-ti", |
| | trust_remote_code=True |
| | ) |
| | model.eval() |
| |
|
| | type_labels = ["RÉSEAU", "MATÉRIEL", "LOGICIEL", "COMPTE/MOT DE PASSE", "AUTRE"] |
| | priorite_labels = ["BASSE", "MOYENNE", "ÉLEVÉ"] |
| |
|
| | def predict(text): |
| | inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) |
| | with torch.no_grad(): |
| | outputs = model(**inputs) |
| | |
| | logits_type = outputs.logits_type |
| | logits_priorite = outputs.logits_priorite |
| |
|
| | type_pred = type_labels[logits_type.argmax(dim=1).item()] |
| | priorite_pred = priorite_labels[logits_priorite.argmax(dim=1).item()] |
| |
|
| | return type_pred, priorite_pred |
| |
|
| | iface = gr.Interface( |
| | fn=predict, |
| | inputs=gr.Textbox(lines=2, placeholder="Entrez un ticket IT..."), |
| | outputs=[gr.Textbox(label="Type de problème"), gr.Textbox(label="Priorité")], |
| | title="Classificateur de Ticket TI", |
| | description="Classification des tickets TI en type de problèmes et priorité" |
| | ) |
| |
|
| | iface.launch() |