File size: 1,878 Bytes
ab69189
 
 
5d801dd
ab69189
 
 
 
 
21c8c01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab69189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f995d18
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch.nn.functional as F

tokenizer = AutoTokenizer.from_pretrained("tuantc/flysugarbot")
model = AutoModelForSequenceClassification.from_pretrained("tuantc/flysugarbot")
model.eval()

classes = ['admin_hr_avi_hr_lead',
 'airbus_avi_hr_lead',
 'avi_hr_lead',
 'bot_challenge',
 'bus_info',
 'c99_avi_hr_lead',
 'card',
 'channel_info',
 'congras_general',
 'contract',
 'doc_email_sw',
 'dxg_avi_hr_lead',
 'empl_password',
 'encourage',
 'excited',
 'fly_avi_hr_lead',
 'general_spam',
 'goodbye',
 'greeting',
 'laptop_pc',
 'lover_spam',
 'meeting_room',
 'on_leave',
 'palantir_avi_hr_lead',
 'printer',
 'property',
 'salary_insurance_avi_hr_lead',
 'services',
 'spam_eating',
 'sw_install',
 'thoi_viec',
 'tms',
 'union',
 'wifi',
 'working_hour']
NUM_LABELS = len(classes)
label_map = {}
for l in range(NUM_LABELS):
    label_map[l] = classes[l]

def predict_intent(model, tokenizer, text_instance,label_map):
    device = torch.device("cpu")
    encoding = tokenizer(text_instance, return_tensors='pt', padding=True, truncation=True, max_length=512)
    input_ids = encoding['input_ids'].to(device)
    attention_masks = encoding['attention_mask'].to(device)
    with torch.no_grad():
        outputs = model(input_ids, attention_mask=attention_masks)
    logits = outputs[0]
    predictions = torch.argmax(logits, dim=-1).item()
    predicted_label = label_map[predictions]
    prob = F.softmax(logits, dim=1).max().item()
    return predicted_label, prob
def bot_response(message, history):
    predicted_intent, prob = predict_intent(model=model, tokenizer=tokenizer, text_instance=message,label_map=label_map)
    return f"Intent này là {predicted_intent} với confidence {prob:.2f}"
demo = gr.ChatInterface(bot_response)
demo.launch(share=True)