Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| classifier = pipeline( | |
| "text-classification", | |
| model="King-8/request-classifier", | |
| tokenizer="King-8/request-classifier", | |
| return_all_scores=True | |
| ) | |
| label_map = { | |
| "LABEL_0": "administrative_action", | |
| "LABEL_1": "attendance", | |
| "LABEL_2": "check_in", | |
| "LABEL_3": "clarification", | |
| "LABEL_4": "general_chat", | |
| "LABEL_5": "technical_help" | |
| } | |
| RESPONSES = { | |
| "attendance": "This message appears to be about **attendance**.", | |
| "check_in": "This looks like a **check-in** related message.", | |
| "technical_help": "This seems to be a **technical help** request.", | |
| "clarification": "This appears to be a **clarification request**.", | |
| "administrative_action": "This request likely requires an **administrative action**.", | |
| "general_chat": "This looks like general conversation and does not require action." | |
| } | |
| def respond(message, history): | |
| # Format input exactly like training | |
| text = f"Role: student | Context: No prior context | Request: {message}" | |
| outputs = classifier(text)[0] | |
| best = max(outputs, key=lambda x: x["score"]) | |
| intent = label_map.get(best["label"], best["label"]) | |
| confidence = round(best["score"], 2) | |
| reply = ( | |
| f"Detected intent: {intent}\n" | |
| f"Confidence: {confidence}\n\n" | |
| f"{RESPONSES[intent]}" | |
| ) | |
| return reply | |
| """ | |
| For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
| """ | |
| client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b") | |
| messages = [{"role": "system", "content": system_message}] | |
| messages.extend(history) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| for message in client.chat_completion( | |
| messages, | |
| max_tokens=max_tokens, | |
| stream=True, | |
| temperature=temperature, | |
| top_p=top_p, | |
| ): | |
| choices = message.choices | |
| token = "" | |
| if len(choices) and choices[0].delta.content: | |
| token = choices[0].delta.content | |
| response += token | |
| yield response | |
| chatbot = gr.ChatInterface( | |
| respond, | |
| type="messages", | |
| title="Internship Request Classifier Chatbot", | |
| description=( | |
| "This chatbot uses a trained intent classification model to understand " | |
| "internship-related messages and determine how they should be handled." | |
| ), | |
| ) | |
| with gr.Blocks() as demo: | |
| with gr.Sidebar(): | |
| gr.LoginButton() | |
| chatbot.render() | |
| if __name__ == "__main__": | |
| demo.launch() | |