csmith715 commited on
Commit
e223a00
·
1 Parent(s): 9d742c4

Initial commit

Browse files
Files changed (3) hide show
  1. README.md +47 -0
  2. app.py +159 -55
  3. data.py +42 -0
README.md CHANGED
@@ -14,3 +14,50 @@ short_description: A quick Gradio agent demo
14
  ---
15
 
16
  An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  ---
15
 
16
  An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
17
+
18
+ # 🧠 Thoughtful AI – Customer Support Agent
19
+
20
+ A lightweight conversational demo built for the **Thoughtful AI coding exercise**.
21
+ This app simulates a simple **customer service AI agent** that answers user questions about Thoughtful AI’s healthcare automation agents — using only **hardcoded responses** and a minimal similarity retriever.
22
+
23
+ ---
24
+
25
+ ## 🚀 Overview
26
+
27
+ The **Thoughtful AI Support Assistant** accepts user questions and returns the most relevant predefined answer.
28
+ It uses a lightweight token-based similarity model (no external NLP dependencies) to match user input against five common FAQs.
29
+
30
+ ### Example Questions
31
+ - What does the eligibility verification agent (EVA) do?
32
+ - How does the claims processing agent (CAM) work?
33
+ - What are the benefits of using Thoughtful AI’s agents?
34
+ - Tell me about Thoughtful AI’s agents.
35
+
36
+ ---
37
+
38
+ ## 💡 Features
39
+
40
+ - Conversational chat interface built with **Gradio Blocks**.
41
+ - **Hardcoded FAQ retrieval** (no external APIs or models required).
42
+ - **Lightweight custom tokenizer** for fuzzy keyword matching.
43
+ - Handles unknown inputs gracefully with fallback responses.
44
+ - Includes confidence scoring and clear, user-friendly formatting.
45
+
46
+ ---
47
+
48
+ ## 🧩 Tech Stack
49
+
50
+ - **Language:** Python 3.9+
51
+ - **Framework:** [Gradio](https://gradio.app)
52
+ - **Deployment:** [Hugging Face Spaces](https://huggingface.co/spaces)
53
+ - **Dependencies:** None beyond Gradio (no model downloads required)
54
+
55
+ ---
56
+
57
+ ## 🏗️ Local Development
58
+
59
+ To run locally:
60
+
61
+ ```bash
62
+ pip install gradio
63
+ python app.py
app.py CHANGED
@@ -1,70 +1,174 @@
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
16
  """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
 
 
 
 
 
18
 
19
- messages = [{"role": "system", "content": system_message}]
20
 
21
- messages.extend(history)
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- messages.append({"role": "user", "content": message})
24
 
25
- response = ""
 
 
 
 
 
 
 
 
26
 
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
41
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
 
 
 
 
 
67
 
 
68
 
69
  if __name__ == "__main__":
70
  demo.launch()
 
1
+ import re
2
+ from dataclasses import dataclass
3
+ from typing import List, Tuple
4
+ from data import RAW_DATA, GENERIC_HELP, EXAMPLES
5
  import gradio as gr
6
+
7
+ # ======= Retrieval utilities (dependency-free) =======
8
+ @dataclass
9
+ class QA:
10
+ question: str
11
+ answer: str
12
+ keywords: List[str]
13
+
14
+ def normalize(text: str) -> str:
15
+ text = text.lower()
16
+ text = re.sub(r"[^a-z0-9\s\(\)\-\'&/]", " ", text)
17
+ text = re.sub(r"\s+", " ", text).strip()
18
+ return text
19
+
20
+ # A simplified tokenizer to reduce latency
21
+ def tokenize(text: str) -> List[str]:
22
+ return normalize(text).split()
23
+
24
+ # Other similarity measures could be used, but jaccard is simple enough and it works
25
+ def jaccard(a: List[str], b: List[str]) -> float:
26
+ sa, sb = set(a), set(b)
27
+ if not sa and not sb:
28
+ return 0.0
29
+ return len(sa & sb) / len(sa | sb)
30
+
31
+ def seq_ratio(a: str, b: str) -> float:
32
+ # lightweight character-overlap ratio (no external dependencies)
33
+ sa, sb = set(a), set(b)
34
+ if not sa and not sb:
35
+ return 0.0
36
+ return len(sa & sb) / max(len(sa), len(sb))
37
+
38
+ def contains_any(text: str, needles: List[str]) -> int:
39
+ t = normalize(text)
40
+ return sum(1 for n in needles if n in t)
41
+
42
+ def build_qa_bank(raw) -> List[QA]:
43
+ bank = []
44
+ for item in raw["questions"]:
45
+ q = item["question"]
46
+ a = item["answer"]
47
+ kws = []
48
+ lq = q.lower()
49
+ if "eva" in lq:
50
+ kws += ["eva", "eligibility", "benefits", "verification"]
51
+ if "cam" in lq:
52
+ kws += ["cam", "claims", "processing", "reimbursement"]
53
+ if "phil" in lq:
54
+ kws += ["phil", "payment", "posting", "reconciliation"]
55
+ if "agents" in lq or "thoughtful ai" in lq:
56
+ kws += ["agents", "thoughtful ai", "suite", "automation", "healthcare"]
57
+ bank.append(QA(q, a, kws))
58
+ return bank
59
+
60
+
61
+ QA_BANK = build_qa_bank(RAW_DATA)
62
+
63
+ def score_query(user_msg: str, qa: QA) -> float:
64
+ """Return a confidence score for how well `qa` answers `user_msg`."""
65
+ u_norm = normalize(user_msg)
66
+ q_tokens = tokenize(qa.question + " " + qa.answer)
67
+ u_tokens = tokenize(u_norm)
68
+
69
+ s_jaccard = jaccard(u_tokens, q_tokens) # word overlap
70
+ s_seq_q = seq_ratio(u_norm, normalize(qa.question)) # char overlap vs question
71
+ s_seq_a = seq_ratio(u_norm, normalize(qa.answer)) # char overlap vs answer
72
+ s_kw = 0.06 * contains_any(u_norm, qa.keywords) # keyword hints
73
+ s_agent_hint = 0.03 if "agent" in u_norm else 0.0
74
+
75
+ score = (0.5 * s_jaccard) + (0.25 * s_seq_q) + (0.15 * s_seq_a) + s_kw + s_agent_hint
76
+ return min(score, 1.5)
77
+
78
+ def retrieve_best_answer(user_msg: str) -> Tuple[str, str, float]:
79
+ best = None
80
+ best_score = -1.0
81
+ for qa in QA_BANK:
82
+ s = score_query(user_msg, qa)
83
+ if s > best_score:
84
+ best, best_score = qa, s
85
+ return best.question, best.answer, best_score
86
+
87
+ # ======= Chat logic =======
88
+ def chat_step(user_msg: str, history: List[Tuple[str, str]], show_conf: bool):
89
  """
90
+ Stateless step function for the UI.
91
+ Returns updated history and an empty textbox string.
92
  """
93
+ try:
94
+ user_msg = (user_msg or "").strip()
95
+ if not user_msg:
96
+ # gentle nudge without crashing the flow
97
+ bot_reply = "Please enter a question about Thoughtful AI’s agents (EVA, CAM, PHIL)."
98
+ return history + [(user_msg, bot_reply)], ""
99
 
100
+ matched_q, answer, score = retrieve_best_answer(user_msg)
101
 
102
+ # Arbitrarily setting the matching score to 0.18
103
+ if score < 0.18:
104
+ bot_reply = (
105
+ f"Here’s a quick overview:\n\n{GENERIC_HELP}\n\n"
106
+ f"_Tip: mention an agent name like EVA, CAM, or PHIL for a precise answer._"
107
+ )
108
+ else:
109
+ bot_reply = f"**Answer:** {answer}"
110
+ if show_conf:
111
+ bot_reply += (
112
+ f"\n\n_Matched topic:_ “{matched_q}” \n"
113
+ f"_Confidence:_ {score:.2f}"
114
+ )
115
 
116
+ return history + [(user_msg, bot_reply)], ""
117
 
118
+ except Exception as e:
119
+ # UI Robustness
120
+ bot_reply = (
121
+ "Sorry — I ran into an unexpected error while processing that. "
122
+ "Please try again or rephrase your question."
123
+ )
124
+ # In a real setting, I would log `e` to a file/monitoring system.
125
+ print(e)
126
+ return history + [(user_msg or "", bot_reply)], ""
127
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
+ # ======= UI =======
130
+ CSS = """
131
+ #app-title {font-size: 28px; font-weight: 700; margin-bottom: 2px;}
132
+ #app-sub {opacity: 0.8; margin-bottom: 16px;}
133
+ """
134
 
135
+ with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
136
+ gr.Markdown(
137
+ "<div id='app-title'>Thoughtful AI – Support Assistant</div>"
138
+ "<div id='app-sub'>Ask about EVA, CAM, PHIL, or general benefits.</div>"
139
+ )
140
 
141
+ with gr.Row():
142
+ show_conf = gr.Checkbox(label="Show match & confidence", value=True)
143
+
144
+ chatbot = gr.Chatbot(type='tuples', height=380)
145
+ with gr.Row():
146
+ inp = gr.Textbox(placeholder="Ask a question about Thoughtful AI…", lines=2)
147
+ with gr.Row():
148
+ submit = gr.Button("Ask", variant="primary")
149
+ clear = gr.Button("Clear Chat")
150
+
151
+ gr.Examples(examples=EXAMPLES, inputs=inp, label="Try these")
152
+
153
+ state = gr.State([]) # chat history
154
+
155
+ def on_submit(user_msg, history, conf):
156
+ new_history, cleared = chat_step(user_msg, history, conf)
157
+ return new_history, cleared
158
+
159
+ submit.click(on_submit, inputs=[inp, state, show_conf], outputs=[chatbot, inp])
160
+ inp.submit(on_submit, inputs=[inp, state, show_conf], outputs=[chatbot, inp])
161
+
162
+ def on_clear():
163
+ return [], ""
164
+
165
+ clear.click(on_clear, outputs=[chatbot, inp])
166
+
167
+ # keep state in sync with what's shown
168
+ def sync_state(chat_history):
169
+ return chat_history
170
 
171
+ chatbot.change(sync_state, inputs=[chatbot], outputs=[state])
172
 
173
  if __name__ == "__main__":
174
  demo.launch()
data.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Base Q&A
2
+ RAW_DATA = {
3
+ "questions": [
4
+ {
5
+ "question": "What does the eligibility verification agent (EVA) do?",
6
+ "answer": "EVA automates the process of verifying a patient’s eligibility and benefits information in real-time, eliminating manual data entry errors and reducing "
7
+ "claim rejections."
8
+ },
9
+ {
10
+ "question": "What does the claims processing agent (CAM) do?",
11
+ "answer": "CAM streamlines the submission and management of claims, improving accuracy, reducing manual intervention, and accelerating reimbursements."
12
+ },
13
+ {
14
+ "question": "How does the payment posting agent (PHIL) work?",
15
+ "answer": "PHIL automates the posting of payments to patient accounts, ensuring fast, accurate reconciliation of payments and reducing administrative burden."
16
+ },
17
+ {
18
+ "question": "Tell me about Thoughtful AI's Agents.",
19
+ "answer": "Thoughtful AI provides a suite of AI-powered automation agents designed to streamline healthcare processes. These include Eligibility Verification (EVA), "
20
+ "Claims Processing (CAM), and Payment Posting (PHIL), among others."
21
+ },
22
+ {
23
+ "question": "What are the benefits of using Thoughtful AI's agents?",
24
+ "answer": "Using Thoughtful AI's Agents can significantly reduce administrative costs, improve operational efficiency, and reduce errors in critical processes like "
25
+ "claims management and payment posting."
26
+ }
27
+ ]
28
+ }
29
+
30
+ GENERIC_HELP = (
31
+ "I can help with Thoughtful AI’s healthcare automation agents. "
32
+ "Ask me about Eligibility Verification (EVA), Claims Processing (CAM), or Payment Posting (PHIL), "
33
+ "or say “Tell me about Thoughtful AI’s agents.”"
34
+ )
35
+
36
+ EXAMPLES = [
37
+ "What does EVA do?",
38
+ "How does CAM help with claims?",
39
+ "Tell me about Thoughtful AI's agents",
40
+ "What are the benefits of using Thoughtful AI?",
41
+ "How does PHIL work?",
42
+ ]