File size: 2,641 Bytes
ee594c3
1a68b4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd1b16f
 
1a68b4b
 
 
cd1b16f
1a68b4b
 
ee594c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a68b4b
 
 
 
 
ee594c3
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import gradio as gr
from transformers import pipeline

classifier = pipeline(
    "text-classification",
    model="King-8/request-classifier",
    tokenizer="King-8/request-classifier",
    return_all_scores=True
)

label_map = {
    "LABEL_0": "administrative_action",
    "LABEL_1": "attendance",
    "LABEL_2": "check_in",
    "LABEL_3": "clarification",
    "LABEL_4": "general_chat",
    "LABEL_5": "technical_help"
}

RESPONSES = {
    "attendance": "This message appears to be about **attendance**.",
    "check_in": "This looks like a **check-in** related message.",
    "technical_help": "This seems to be a **technical help** request.",
    "clarification": "This appears to be a **clarification request**.",
    "administrative_action": "This request likely requires an **administrative action**.",
    "general_chat": "This looks like general conversation and does not require action."
}


def respond(message, history):
    # Format input exactly like training
    text = f"Role: student | Context: No prior context | Request: {message}"

    outputs = classifier(text)[0]
    best = max(outputs, key=lambda x: x["score"])

    intent = label_map.get(best["label"], best["label"])
    confidence = round(best["score"], 2)

    reply = (
        f"Detected intent: {intent}\n"
        f"Confidence: {confidence}\n\n"
        f"{RESPONSES[intent]}"
    )

    return reply


    """
    For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
    """
    client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")

    messages = [{"role": "system", "content": system_message}]

    messages.extend(history)

    messages.append({"role": "user", "content": message})

    response = ""

    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        choices = message.choices
        token = ""
        if len(choices) and choices[0].delta.content:
            token = choices[0].delta.content

        response += token
        yield response


chatbot = gr.ChatInterface(
    respond,
    type="messages",
    title="Internship Request Classifier Chatbot",
    description=(
        "This chatbot uses a trained intent classification model to understand "
        "internship-related messages and determine how they should be handled."
    ),
)

with gr.Blocks() as demo:
    with gr.Sidebar():
        gr.LoginButton()
    chatbot.render()


if __name__ == "__main__":
    demo.launch()