karthik102 commited on
Commit
a087821
·
verified ·
1 Parent(s): 2e6b8f1

Update chatbot_logic.py

Browse files
Files changed (1) hide show
  1. chatbot_logic.py +264 -238
chatbot_logic.py CHANGED
@@ -1,238 +1,264 @@
1
- import os
2
- import re
3
- import json
4
- import pyttsx3
5
- import requests
6
- from datetime import datetime
7
- from dotenv import load_dotenv
8
-
9
- # === ENV SETUP ===
10
- load_dotenv()
11
- OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
12
-
13
- HEADERS = {
14
- "Authorization": f"Bearer {OPENROUTER_API_KEY}",
15
- "Content-Type": "application/json",
16
- }
17
- MODEL = "anthropic/claude-3-haiku"
18
-
19
- # === TTS SETUP ===
20
- engine = pyttsx3.init()
21
- engine.setProperty('rate', 180)
22
- tts_enabled = True
23
-
24
- def speak(text):
25
- if tts_enabled:
26
- try:
27
- engine.say(text)
28
- engine.runAndWait()
29
- except Exception as e:
30
- print(f"[TTS Error] {str(e)}")
31
-
32
- def set_tts(enabled: bool):
33
- global tts_enabled
34
- tts_enabled = enabled
35
-
36
- # === SESSION SAVE ===
37
- SAVE_DIR = "chat_logs"
38
- os.makedirs(SAVE_DIR, exist_ok=True)
39
-
40
- def save_chat_session(responses: dict, messages: list, recommendation: str):
41
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
42
- filename = f"chat_{timestamp}.json"
43
- path = os.path.join(SAVE_DIR, filename)
44
-
45
- session_data = {
46
- "timestamp": timestamp,
47
- "user_responses": responses,
48
- "chat_history": [{"user": u, "bot": b} for u, b in messages],
49
- "recommendation": recommendation,
50
- }
51
-
52
- with open(path, "w", encoding="utf-8") as f:
53
- json.dump(session_data, f, indent=4)
54
-
55
- print(f"✅ Chat session saved to {path}")
56
-
57
- # === USER STATE ===
58
- user_state = {
59
- "stage": "intro",
60
- "responses": {},
61
- }
62
-
63
- # === QUESTION FLOW ===
64
- questions = [
65
- {
66
- "key": "age",
67
- "text": "What is your age?",
68
- "type": "int",
69
- "validation": lambda val: val.isdigit() and 0 < int(val) < 120,
70
- "error": "Please enter a valid age between 1 and 120."
71
- },
72
- {
73
- "key": "fever",
74
- "text": "Do you have a fever? (yes/no)",
75
- "type": "yesno",
76
- "validation": lambda val: val.lower() in ["yes", "no"],
77
- "error": "Please answer with yes or no."
78
- },
79
- {
80
- "key": "cough",
81
- "text": "Are you experiencing cough or chest pain? (yes/no)",
82
- "type": "yesno",
83
- "validation": lambda val: val.lower() in ["yes", "no"],
84
- "error": "Please answer with yes or no."
85
- },
86
- {
87
- "key": "chronic",
88
- "text": "Do you have any chronic conditions? (yes/no)",
89
- "type": "yesno",
90
- "validation": lambda val: val.lower() in ["yes", "no"],
91
- "error": "Please answer with yes or no."
92
- },
93
- {
94
- "key": "chills",
95
- "text": "Since you have a fever, do you also have chills or body aches? (yes/no)",
96
- "depends_on": {"fever": "yes"},
97
- "type": "yesno",
98
- "validation": lambda val: val.lower() in ["yes", "no"],
99
- "error": "Please answer with yes or no."
100
- }
101
- ]
102
-
103
- def reset_user_state():
104
- global user_state
105
- user_state = {
106
- "stage": "intro",
107
- "responses": {},
108
- }
109
-
110
- def infer_yes_no(user_input):
111
- cleaned = user_input.lower().strip()
112
- if re.search(r"\b(yes|yeah|yep|i do|i am|sure|of course)\b", cleaned):
113
- return "yes"
114
- elif re.search(r"\b(no|nah|nope|i don’t|i do not|not really|i am not)\b", cleaned):
115
- return "no"
116
- return None
117
-
118
- def query_openrouter(prompt):
119
- messages = [
120
- {
121
- "role": "system",
122
- "content": (
123
- "You are a friendly virtual doctor. Respond in second person only. "
124
- "Give a short, kind explanation of what the user might be facing. "
125
- "Do not repeat already known symptoms. Be medically helpful but clear."
126
- )
127
- },
128
- {"role": "user", "content": prompt}
129
- ]
130
-
131
- data = {
132
- "model": MODEL,
133
- "messages": messages,
134
- }
135
-
136
- try:
137
- response = requests.post(
138
- "https://openrouter.ai/api/v1/chat/completions",
139
- headers=HEADERS,
140
- json=data,
141
- timeout=20
142
- )
143
- result = response.json()
144
- return result["choices"][0]["message"]["content"]
145
- except Exception as e:
146
- return f"LLM Error: {str(e)}"
147
-
148
- def get_next_question():
149
- for q in questions:
150
- key = q["key"]
151
- if key not in user_state["responses"]:
152
- if "depends_on" in q:
153
- if not all(user_state["responses"].get(k) == v for k, v in q["depends_on"].items()):
154
- continue
155
- return q
156
- return None
157
-
158
- def format_summary_prompt():
159
- summary = "\n".join([f"{k}: {v}" for k, v in user_state["responses"].items()])
160
- return f"Based on these user symptoms, provide a helpful diagnosis and suggestions:\n{summary}"
161
-
162
- def handle_user_message(user_input, chat_history):
163
- global user_state
164
-
165
- user_input = user_input.strip()
166
-
167
- if user_input.lower() == "hi" and user_state["stage"] == "done":
168
- reset_user_state()
169
-
170
- if user_state["stage"] == "intro":
171
- bot_msg = (
172
- "👋 Hello! I'm your virtual health assistant. I'll ask a few questions and suggest what to do.\n\n"
173
- + questions[0]["text"]
174
- )
175
- user_state["stage"] = "questions"
176
- chat_history.append((user_input, bot_msg))
177
- speak(bot_msg)
178
- return bot_msg, chat_history
179
-
180
- elif user_state["stage"] == "questions":
181
- question = get_next_question()
182
- if question:
183
- key = question["key"]
184
- input_value = user_input.lower()
185
-
186
- # Yes/No intent inference
187
- if question["type"] == "yesno":
188
- inferred = infer_yes_no(input_value)
189
- if inferred:
190
- input_value = inferred
191
- else:
192
- bot_msg = question["error"]
193
- chat_history.append((user_input, bot_msg))
194
- speak(bot_msg)
195
- return bot_msg, chat_history
196
-
197
- # Age validation
198
- if question["type"] == "int" and not question["validation"](input_value):
199
- bot_msg = question["error"]
200
- chat_history.append((user_input, bot_msg))
201
- speak(bot_msg)
202
- return bot_msg, chat_history
203
-
204
- # Save valid input
205
- user_state["responses"][key] = int(input_value) if question["type"] == "int" else input_value
206
-
207
- # Red flag → end early
208
- if (
209
- user_state["responses"].get("fever") == "yes"
210
- and user_state["responses"].get("cough") == "yes"
211
- and user_state["responses"].get("chills") == "yes"
212
- ):
213
- prompt = format_summary_prompt()
214
- advice = query_openrouter(prompt)
215
- user_state["stage"] = "done"
216
- chat_history.append((user_input, advice))
217
- speak(advice)
218
- save_chat_session(user_state["responses"], chat_history, advice)
219
- return advice, chat_history
220
-
221
- next_q = get_next_question()
222
- if next_q:
223
- bot_msg = next_q["text"]
224
- else:
225
- user_state["stage"] = "done"
226
- prompt = format_summary_prompt()
227
- bot_msg = query_openrouter(prompt)
228
- save_chat_session(user_state["responses"], chat_history, bot_msg)
229
-
230
- chat_history.append((user_input, bot_msg))
231
- speak(bot_msg)
232
- return bot_msg, chat_history
233
-
234
- elif user_state["stage"] == "done":
235
- bot_msg = "✅ You've completed the health check. Refresh or type 'hi' to start over."
236
- chat_history.append((user_input, bot_msg))
237
- speak(bot_msg)
238
- return bot_msg, chat_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+ import pyttsx3
5
+ import requests
6
+ from datetime import datetime
7
+ from dotenv import load_dotenv
8
+
9
+ # === ENV SETUP ===
10
+ load_dotenv()
11
+ OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
12
+
13
+ HEADERS = {
14
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
15
+ "Content-Type": "application/json",
16
+ }
17
+ MODEL = "anthropic/claude-3-haiku"
18
+
19
+ # # === TTS SETUP ===
20
+ # engine = pyttsx3.init()
21
+ # engine.setProperty('rate', 180)
22
+ # tts_enabled = True
23
+
24
+ # def speak(text):
25
+ # if tts_enabled:
26
+ # try:
27
+ # engine.say(text)
28
+ # engine.runAndWait()
29
+ # except Exception as e:
30
+ # print(f"[TTS Error] {str(e)}")
31
+
32
+ # def set_tts(enabled: bool):
33
+ # global tts_enabled
34
+ # tts_enabled = enabled
35
+
36
+ # speech_engine.py
37
+ import os
38
+ from gtts import gTTS
39
+ from tempfile import NamedTemporaryFile
40
+
41
+ tts_enabled = True
42
+ tts_supported = os.getenv("SPACE_ENVIRONMENT") != "huggingface"
43
+
44
+ def set_tts(enabled: bool):
45
+ global tts_enabled
46
+ tts_enabled = enabled
47
+
48
+ def speak(text: str):
49
+ if not tts_enabled or not tts_supported:
50
+ return
51
+
52
+ try:
53
+ tts = gTTS(text)
54
+ with NamedTemporaryFile(delete=False, suffix=".mp3") as tmp:
55
+ tts.save(tmp.name)
56
+ os.system(f"start {tmp.name}" if os.name == 'nt' else f"mpg123 {tmp.name}")
57
+ os.remove(tmp.name)
58
+ except Exception as e:
59
+ print(f"[TTS Error] {str(e)}")
60
+
61
+
62
+ # === SESSION SAVE ===
63
+ SAVE_DIR = "chat_logs"
64
+ os.makedirs(SAVE_DIR, exist_ok=True)
65
+
66
+ def save_chat_session(responses: dict, messages: list, recommendation: str):
67
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
68
+ filename = f"chat_{timestamp}.json"
69
+ path = os.path.join(SAVE_DIR, filename)
70
+
71
+ session_data = {
72
+ "timestamp": timestamp,
73
+ "user_responses": responses,
74
+ "chat_history": [{"user": u, "bot": b} for u, b in messages],
75
+ "recommendation": recommendation,
76
+ }
77
+
78
+ with open(path, "w", encoding="utf-8") as f:
79
+ json.dump(session_data, f, indent=4)
80
+
81
+ print(f" Chat session saved to {path}")
82
+
83
+ # === USER STATE ===
84
+ user_state = {
85
+ "stage": "intro",
86
+ "responses": {},
87
+ }
88
+
89
+ # === QUESTION FLOW ===
90
+ questions = [
91
+ {
92
+ "key": "age",
93
+ "text": "What is your age?",
94
+ "type": "int",
95
+ "validation": lambda val: val.isdigit() and 0 < int(val) < 120,
96
+ "error": "Please enter a valid age between 1 and 120."
97
+ },
98
+ {
99
+ "key": "fever",
100
+ "text": "Do you have a fever? (yes/no)",
101
+ "type": "yesno",
102
+ "validation": lambda val: val.lower() in ["yes", "no"],
103
+ "error": "Please answer with yes or no."
104
+ },
105
+ {
106
+ "key": "cough",
107
+ "text": "Are you experiencing cough or chest pain? (yes/no)",
108
+ "type": "yesno",
109
+ "validation": lambda val: val.lower() in ["yes", "no"],
110
+ "error": "Please answer with yes or no."
111
+ },
112
+ {
113
+ "key": "chronic",
114
+ "text": "Do you have any chronic conditions? (yes/no)",
115
+ "type": "yesno",
116
+ "validation": lambda val: val.lower() in ["yes", "no"],
117
+ "error": "Please answer with yes or no."
118
+ },
119
+ {
120
+ "key": "chills",
121
+ "text": "Since you have a fever, do you also have chills or body aches? (yes/no)",
122
+ "depends_on": {"fever": "yes"},
123
+ "type": "yesno",
124
+ "validation": lambda val: val.lower() in ["yes", "no"],
125
+ "error": "Please answer with yes or no."
126
+ }
127
+ ]
128
+
129
+ def reset_user_state():
130
+ global user_state
131
+ user_state = {
132
+ "stage": "intro",
133
+ "responses": {},
134
+ }
135
+
136
+ def infer_yes_no(user_input):
137
+ cleaned = user_input.lower().strip()
138
+ if re.search(r"\b(yes|yeah|yep|i do|i am|sure|of course)\b", cleaned):
139
+ return "yes"
140
+ elif re.search(r"\b(no|nah|nope|i don’t|i do not|not really|i am not)\b", cleaned):
141
+ return "no"
142
+ return None
143
+
144
+ def query_openrouter(prompt):
145
+ messages = [
146
+ {
147
+ "role": "system",
148
+ "content": (
149
+ "You are a friendly virtual doctor. Respond in second person only. "
150
+ "Give a short, kind explanation of what the user might be facing. "
151
+ "Do not repeat already known symptoms. Be medically helpful but clear."
152
+ )
153
+ },
154
+ {"role": "user", "content": prompt}
155
+ ]
156
+
157
+ data = {
158
+ "model": MODEL,
159
+ "messages": messages,
160
+ }
161
+
162
+ try:
163
+ response = requests.post(
164
+ "https://openrouter.ai/api/v1/chat/completions",
165
+ headers=HEADERS,
166
+ json=data,
167
+ timeout=20
168
+ )
169
+ result = response.json()
170
+ return result["choices"][0]["message"]["content"]
171
+ except Exception as e:
172
+ return f"LLM Error: {str(e)}"
173
+
174
+ def get_next_question():
175
+ for q in questions:
176
+ key = q["key"]
177
+ if key not in user_state["responses"]:
178
+ if "depends_on" in q:
179
+ if not all(user_state["responses"].get(k) == v for k, v in q["depends_on"].items()):
180
+ continue
181
+ return q
182
+ return None
183
+
184
+ def format_summary_prompt():
185
+ summary = "\n".join([f"{k}: {v}" for k, v in user_state["responses"].items()])
186
+ return f"Based on these user symptoms, provide a helpful diagnosis and suggestions:\n{summary}"
187
+
188
+ def handle_user_message(user_input, chat_history):
189
+ global user_state
190
+
191
+ user_input = user_input.strip()
192
+
193
+ if user_input.lower() == "hi" and user_state["stage"] == "done":
194
+ reset_user_state()
195
+
196
+ if user_state["stage"] == "intro":
197
+ bot_msg = (
198
+ "👋 Hello! I'm your virtual health assistant. I'll ask a few questions and suggest what to do.\n\n"
199
+ + questions[0]["text"]
200
+ )
201
+ user_state["stage"] = "questions"
202
+ chat_history.append((user_input, bot_msg))
203
+ speak(bot_msg)
204
+ return bot_msg, chat_history
205
+
206
+ elif user_state["stage"] == "questions":
207
+ question = get_next_question()
208
+ if question:
209
+ key = question["key"]
210
+ input_value = user_input.lower()
211
+
212
+ # Yes/No intent inference
213
+ if question["type"] == "yesno":
214
+ inferred = infer_yes_no(input_value)
215
+ if inferred:
216
+ input_value = inferred
217
+ else:
218
+ bot_msg = question["error"]
219
+ chat_history.append((user_input, bot_msg))
220
+ speak(bot_msg)
221
+ return bot_msg, chat_history
222
+
223
+ # Age validation
224
+ if question["type"] == "int" and not question["validation"](input_value):
225
+ bot_msg = question["error"]
226
+ chat_history.append((user_input, bot_msg))
227
+ speak(bot_msg)
228
+ return bot_msg, chat_history
229
+
230
+ # Save valid input
231
+ user_state["responses"][key] = int(input_value) if question["type"] == "int" else input_value
232
+
233
+ # Red flag → end early
234
+ if (
235
+ user_state["responses"].get("fever") == "yes"
236
+ and user_state["responses"].get("cough") == "yes"
237
+ and user_state["responses"].get("chills") == "yes"
238
+ ):
239
+ prompt = format_summary_prompt()
240
+ advice = query_openrouter(prompt)
241
+ user_state["stage"] = "done"
242
+ chat_history.append((user_input, advice))
243
+ speak(advice)
244
+ save_chat_session(user_state["responses"], chat_history, advice)
245
+ return advice, chat_history
246
+
247
+ next_q = get_next_question()
248
+ if next_q:
249
+ bot_msg = next_q["text"]
250
+ else:
251
+ user_state["stage"] = "done"
252
+ prompt = format_summary_prompt()
253
+ bot_msg = query_openrouter(prompt)
254
+ save_chat_session(user_state["responses"], chat_history, bot_msg)
255
+
256
+ chat_history.append((user_input, bot_msg))
257
+ speak(bot_msg)
258
+ return bot_msg, chat_history
259
+
260
+ elif user_state["stage"] == "done":
261
+ bot_msg = "✅ You've completed the health check. Refresh or type 'hi' to start over."
262
+ chat_history.append((user_input, bot_msg))
263
+ speak(bot_msg)
264
+ return bot_msg, chat_history