jiya2 commited on
Commit
0eb943c
Β·
verified Β·
1 Parent(s): 41b21ed

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +156 -98
app.py CHANGED
@@ -1,98 +1,156 @@
1
- import streamlit as st
2
- from llama_cpp import Llama
3
- from dotenv import load_dotenv
4
- import os
5
-
6
-
7
- load_dotenv()
8
- print(f"REPO_ID: {os.getenv('REPO_ID')}")
9
-
10
- llm = Llama.from_pretrained(
11
- repo_id=os.getenv("REPO_ID"),
12
- filename="unsloth.Q8_0.gguf"
13
- )
14
-
15
- def load_html(file_path):
16
- with open(file_path, "r", encoding="utf-8") as file:
17
- return file.read()
18
-
19
- st.markdown(load_html("chatbot_ui.html"), unsafe_allow_html=True)
20
- st.markdown('<div class="title">HR Chatbot</div>', unsafe_allow_html=True)
21
- st.write("\n\n\n")
22
-
23
-
24
- intents = {
25
-
26
- "Employee Benefits & Policies": "Assist with salary, leave, and policies.",
27
- "Employee Support & Self-Service": "Provide support for HR-related queries.",
28
- "Recruitment & Onboarding": "Help with job applications and hiring processes."
29
- }
30
-
31
-
32
- intent_keywords = {
33
-
34
- "Employee Benefits & Policies": ["leave", "insurance", "policy", "benefit", "salary", "payroll", "bonus"],
35
- "Employee Support & Self-Service": ["complaint", "technical issue", "self-service", "support", "HR help"],
36
- "Recruitment & Onboarding": ["hiring", "job", "apply", "interview", "onboarding", "new employee"]
37
- }
38
-
39
-
40
- def determine_intent(question):
41
- question_lower = question.lower()
42
- for intent, keywords in intent_keywords.items():
43
- if any(keyword in question_lower for keyword in keywords):
44
- return intent
45
- return None
46
-
47
-
48
- if "selected_intent" not in st.session_state:
49
- st.session_state.selected_intent = list(intents.keys())[0]
50
-
51
- cols = st.columns(len(intents))
52
- for i, (intent, desc) in enumerate(intents.items()):
53
- btn_label = f"βœ… **{intent}**" if st.session_state.selected_intent == intent else intent
54
- if cols[i].button(btn_label, key=intent, help=desc):
55
- st.session_state.selected_intent = intent
56
- st.rerun()
57
-
58
- if "messages" not in st.session_state:
59
- st.session_state.messages = []
60
-
61
-
62
- if st.session_state.messages:
63
- st.markdown('<div class="chat-container">', unsafe_allow_html=True)
64
- for msg in st.session_state.messages:
65
- role_class = "user-msg" if msg["role"] == "user" else "bot-msg"
66
- with st.chat_message(msg["role"]):
67
- st.markdown(f'<div class="chat-box {role_class}">{msg["content"]}</div>', unsafe_allow_html=True)
68
-
69
-
70
- if prompt := st.chat_input("Ask me anything related to HR..."):
71
-
72
- st.session_state.messages.append({"role": "user", "content": prompt})
73
-
74
- with st.chat_message("user"):
75
- st.markdown(f'<div class="chat-box user-msg">πŸ‘€ {prompt}</div>', unsafe_allow_html=True)
76
-
77
-
78
- correct_intent = determine_intent(prompt)
79
-
80
- if correct_intent and correct_intent != st.session_state.selected_intent:
81
-
82
- reply = f"πŸ”„ Redirecting... Your query belongs to '{correct_intent}'. Please select the correct category."
83
- else:
84
- with st.spinner("πŸ€” Thinking..."):
85
- response = llm.create_chat_completion(
86
- messages=[{"role": "user", "content": prompt}],
87
- max_tokens=280
88
- )
89
- reply = response["choices"][0]["message"]["content"]
90
-
91
-
92
- st.session_state.messages.append({"role": "assistant", "content": reply})
93
-
94
- with st.chat_message("assistant"):
95
- st.markdown(f'<div class="chat-box bot-msg">πŸ€– {reply}</div>', unsafe_allow_html=True)
96
-
97
- if st.session_state.messages:
98
- st.markdown('</div>', unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification
2
+ import streamlit as st
3
+ from llama_cpp import Llama
4
+ from dotenv import load_dotenv
5
+ import os
6
+ import torch
7
+
8
+ load_dotenv()
9
+
10
+ try:
11
+ REPO_ID = os.getenv('REPO_ID')
12
+ INTENT_MODEL = os.getenv('INTENT_MODEL')
13
+ CHITCHAT_MODEL = os.getenv('CHITCHAT_MODEL')
14
+
15
+ if not all([REPO_ID, INTENT_MODEL, CHITCHAT_MODEL]):
16
+ raise EnvironmentError("One or more required environment variables are missing.")
17
+
18
+ except Exception as e:
19
+ st.error(f"Environment setup failed: {e}")
20
+ raise SystemExit(e)
21
+
22
+ try:
23
+ llm_hr = Llama.from_pretrained(
24
+ repo_id=REPO_ID,
25
+ filename="unsloth.F16.gguf"
26
+ )
27
+ intent_tokenizer = AutoTokenizer.from_pretrained(INTENT_MODEL)
28
+ intent_model = AutoModelForSequenceClassification.from_pretrained(INTENT_MODEL)
29
+ tokenizer = AutoTokenizer.from_pretrained(CHITCHAT_MODEL)
30
+ model = AutoModelForSeq2SeqLM.from_pretrained(CHITCHAT_MODEL)
31
+ pipe_chitchat = pipeline('text2text-generation', model=model, tokenizer=tokenizer)
32
+
33
+ except Exception as e:
34
+ st.error(f"Model loading failed: {e}")
35
+ raise SystemExit(e)
36
+
37
+
38
+ intent_labels = ["Employee Benefits & Policies", "Employee Support & Self-Service", "Recruitment & Onboarding"]
39
+ total_labels = ["πŸ˜‚ Let's Chat & Chill"] + intent_labels
40
+
41
+
42
+ st.set_page_config(page_title="AI HR Chatbot", layout="wide")
43
+ st.markdown("""
44
+ <style>
45
+ body { background-color: #f6f9fc; }
46
+ .sidebar .sidebar-content { background-color: #ffffff; }
47
+ .chat-container { max-height: 500px; overflow-y: auto; padding: 1rem; display: flex; flex-direction: column; }
48
+ .chat-box { display: inline-block; border-radius: 10px; padding: 8px 12px; margin: 5px 0; max-width: 70%; word-wrap: break-word; }
49
+ .user-msg { background-color: #708090; text-align: right; align-self: flex-end; color: white; }
50
+ .bot-msg { background-color: #d3d3d3; text-align: left; align-self: flex-start; color: black; }
51
+ div.title { font-size: 2rem; font-weight: bold; margin: 20px 0; color: #0077cc; }
52
+ </style>
53
+ """, unsafe_allow_html=True)
54
+
55
+ if "intent" not in st.session_state:
56
+ st.session_state.intent = total_labels[0]
57
+ if "messages" not in st.session_state:
58
+ st.session_state.messages = []
59
+
60
+
61
+ def select_intent(current_intent):
62
+ display_labels = [f"βœ… {intent}" if intent == current_intent else intent for intent in total_labels]
63
+ selected = st.sidebar.radio(
64
+ "πŸ’Ό Select Your HR Intent",
65
+ display_labels,
66
+ index=display_labels.index(f"βœ… {current_intent}") if f"βœ… {current_intent}" in display_labels else 0
67
+ )
68
+ return selected.replace("βœ… ", "")
69
+
70
+
71
+ def determine_intent(text):
72
+ try:
73
+ inputs = intent_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
74
+ with torch.no_grad():
75
+ logits = intent_model(**inputs).logits
76
+ predicted_label = torch.argmax(logits, dim=1).item()
77
+ return predicted_label
78
+ except Exception as e:
79
+ st.error(f"Intent determination failed: {e}")
80
+ return 0
81
+
82
+
83
+ def render_chat():
84
+ if st.session_state.messages:
85
+ st.markdown('<div class="chat-container">', unsafe_allow_html=True)
86
+ for msg in st.session_state.messages:
87
+ role_class = "user-msg" if msg["role"] == "user" else "bot-msg"
88
+ with st.chat_message(msg["role"], avatar=msg.get("avatar")):
89
+ st.markdown(f'<div class="chat-box {role_class}">{msg["content"]}</div>', unsafe_allow_html=True)
90
+ st.markdown('</div>', unsafe_allow_html=True)
91
+
92
+
93
+ def generate_hr_response(user_prompt, intent):
94
+ hr_prompt = """You are an HR Assistant at our company.
95
+ Your role is to assist employees by providing accurate and concise responses regarding company policies and HR-related questions.
96
+
97
+ ### Instruction:
98
+ {}
99
+
100
+ ### Input:
101
+ {}
102
+
103
+ ### Response:
104
+ {}"""
105
+ instruction = f"Answer the HR-related query categorized as '{intent}'."
106
+ formatted_prompt = hr_prompt.format(instruction, user_prompt, "") + tokenizer.eos_token
107
+
108
+ try:
109
+ response = llm_hr.create_chat_completion(
110
+ messages=[{"role": "user", "content": formatted_prompt}],
111
+ max_tokens=100,
112
+ )
113
+ return response["choices"][0]["message"]["content"]
114
+ except Exception as e:
115
+ st.error(f"Failed to generate HR response: {e}")
116
+ return "⚠️ Sorry, I couldn't process your HR request at the moment."
117
+
118
+
119
+ st.markdown('<div class="title">AI HR Chatbot </div>', unsafe_allow_html=True)
120
+ st.markdown("<h6 style='color:rgb(131, 123, 160);'>Your personal assistant for HR queries and support.</h6>", unsafe_allow_html=True)
121
+
122
+ st.session_state.intent = select_intent(st.session_state.intent)
123
+ render_chat()
124
+
125
+
126
+ try:
127
+ if prompt := st.chat_input("Ask me anything related to HR or just chat casually..."):
128
+ st.session_state.messages.append({"role": "user", "content": prompt, "avatar": "man.png"})
129
+ with st.chat_message("user", avatar="man.png"):
130
+ st.markdown(f'<div class="chat-box user-msg">{prompt}</div>', unsafe_allow_html=True)
131
+
132
+ if st.session_state.intent == "πŸ˜‚ Let's Chat & Chill":
133
+ with st.spinner("πŸ˜„ Chitchatting..."):
134
+ try:
135
+ result = pipe_chitchat(prompt)
136
+ reply = result[0]['generated_text'].strip()
137
+ except Exception as e:
138
+ st.error(f"Chitchat generation failed: {e}")
139
+ reply = "Oops, I couldn't come up with a witty reply! πŸ˜…"
140
+ else:
141
+ detected_intent = determine_intent(prompt)
142
+ predicted_label = intent_labels[detected_intent]
143
+
144
+ if predicted_label != st.session_state.intent:
145
+
146
+ st.warning(f"πŸ” Automatically switched to **{predicted_label}** based on your query.")
147
+
148
+ with st.spinner("Generating response with specialized HR model..."):
149
+ reply = generate_hr_response(prompt, predicted_label)
150
+
151
+ st.session_state.messages.append({"role": "bot", "content": reply, "avatar": "robot.png"})
152
+ with st.chat_message("bot", avatar="robot.png"):
153
+ st.markdown(f'<div class="chat-box bot-msg">{reply}</div>', unsafe_allow_html=True)
154
+
155
+ except Exception as e:
156
+ st.error(f"Something went wrong while processing your input: {e}")