Rajan Sharma commited on
Commit
0b1c3ed
·
verified ·
1 Parent(s): 6c7162d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -289
app.py CHANGED
@@ -1,321 +1,57 @@
1
- import os
2
- import time
3
- import re
4
- from datetime import datetime, timezone
5
- from functools import lru_cache
6
-
7
- import gradio as gr
8
- import torch
9
-
10
- # Timezone conversion (Python 3.9+ stdlib)
11
- try:
12
- from zoneinfo import ZoneInfo
13
- except Exception:
14
- ZoneInfo = None # graceful fallback to UTC
15
-
16
- # Try to import Cohere SDK if present (for hosted path)
17
- try:
18
- import cohere # pip install cohere
19
- _HAS_COHERE = True
20
- except Exception:
21
- _HAS_COHERE = False
22
-
23
- from transformers import AutoTokenizer, AutoModelForCausalLM
24
- from huggingface_hub import login, HfApi
25
-
26
-
27
- # -------------------
28
- # Configuration
29
- # -------------------
30
- MODEL_ID = os.getenv("MODEL_ID", "CohereLabs/c4ai-command-r7b-12-2024")
31
-
32
- HF_TOKEN = (
33
- os.getenv("HUGGINGFACE_HUB_TOKEN") # official Spaces name
34
- or os.getenv("HF_TOKEN")
35
- )
36
-
37
- COHERE_API_KEY = os.getenv("COHERE_API_KEY")
38
- USE_HOSTED_COHERE = bool(COHERE_API_KEY and _HAS_COHERE)
39
-
40
-
41
- # -------------------
42
- # Helpers (used for the connection card only)
43
- # -------------------
44
- def local_now_str(user_tz: str | None) -> tuple[str, str]:
45
- """Returns (label, formatted_time). Falls back to UTC if tz missing/invalid."""
46
- label = "UTC"
47
- dt = datetime.now(timezone.utc)
48
- if user_tz and ZoneInfo is not None:
49
- try:
50
- tz = ZoneInfo(user_tz)
51
- dt = datetime.now(tz)
52
- label = user_tz
53
- except Exception:
54
- dt = datetime.now(timezone.utc)
55
- label = "UTC"
56
- return label, dt.strftime("%Y-%m-%d %H:%M:%S")
57
-
58
-
59
- def header(processing_time=None, user_tz: str | None = None):
60
- """Only used in the connection status panel (not in chat replies)."""
61
- tz_label, now_str = local_now_str(user_tz)
62
- s = (
63
- f"Current Date and Time ({tz_label} - YYYY-MM-DD HH:MM:SS formatted): {now_str}\n"
64
- f"Current User's Login: Raj-VedAI\n"
65
- )
66
- if processing_time is not None:
67
- s += f"Processing Time: {processing_time:.2f} seconds\n"
68
- return s
69
-
70
-
71
- def pick_dtype_and_map():
72
- if torch.cuda.is_available():
73
- return torch.float16, "auto"
74
- if torch.backends.mps.is_available():
75
- return torch.float16, {"": "mps"}
76
- return torch.float32, "cpu" # CPU path (likely too big for R7B)
77
-
78
-
79
- def is_identity_query(message: str, history) -> bool:
80
- """Detects identity questions in current message or most recent user turn."""
81
- patterns = [
82
- r"\bwho\s+are\s+you\b",
83
- r"\bwhat\s+are\s+you\b",
84
- r"\bwhat\s+is\s+your\s+name\b",
85
- r"\bwho\s+is\s+this\b",
86
- r"\bidentify\s+yourself\b",
87
- r"\btell\s+me\s+about\s+yourself\b",
88
- r"\bdescribe\s+yourself\b",
89
- r"\band\s+you\s*\?\b",
90
- r"\byour\s+name\b",
91
- r"\bwho\s+am\s+i\s+chatting\s+with\b",
92
- ]
93
-
94
- def hit(text: str | None) -> bool:
95
- t = (text or "").strip().lower()
96
- return any(re.search(p, t) for p in patterns)
97
-
98
- if hit(message):
99
- return True
100
-
101
- if history:
102
- # Gradio history: List[Tuple[user, assistant]]
103
- last_user = history[-1][0] if isinstance(history[-1], (list, tuple)) and history[-1] else None
104
- if hit(last_user):
105
- return True
106
-
107
- return False
108
-
109
-
110
- # -------------------
111
- # Cohere Hosted Path
112
- # -------------------
113
- _co_client = None
114
- if USE_HOSTED_COHERE:
115
- _co_client = cohere.Client(api_key=COHERE_API_KEY)
116
-
117
-
118
- def _cohere_parse(resp):
119
- # v5+ responses.create
120
- if hasattr(resp, "output_text") and resp.output_text:
121
- return resp.output_text.strip()
122
- if getattr(resp, "message", None) and getattr(resp.message, "content", None):
123
- for p in resp.message.content:
124
- if hasattr(p, "text") and p.text:
125
- return p.text.strip()
126
- # v4 chat
127
- if hasattr(resp, "text") and resp.text:
128
- return resp.text.strip()
129
- return "Sorry, I couldn't parse the response from Cohere."
130
-
131
-
132
- def cohere_chat(message, history):
133
- try:
134
- # Prefer modern API
135
- try:
136
- msgs = []
137
- for u, a in (history or []):
138
- msgs.append({"role": "user", "content": u})
139
- msgs.append({"role": "assistant", "content": a})
140
- msgs.append({"role": "user", "content": message})
141
- resp = _co_client.responses.create(
142
- model="command-r7b-12-2024",
143
- messages=msgs,
144
- temperature=0.3,
145
- max_tokens=350,
146
- )
147
- except Exception:
148
- # Fallback to older chat API
149
- resp = _co_client.chat(
150
- model="command-r7b-12-2024",
151
- message=message,
152
- temperature=0.3,
153
- max_tokens=350,
154
- )
155
- return _cohere_parse(resp)
156
- except Exception as e:
157
- return f"Error calling Cohere API: {e}"
158
-
159
-
160
- # -------------------
161
- # Local HF Path
162
- # -------------------
163
- @lru_cache(maxsize=1)
164
- def load_local_model():
165
- if not HF_TOKEN:
166
- raise RuntimeError(
167
- "HUGGINGFACE_HUB_TOKEN (or HF_TOKEN) is not set. "
168
- "Either set it, or provide COHERE_API_KEY to use Cohere's hosted API."
169
- )
170
-
171
- login(token=HF_TOKEN, add_to_git_credential=False)
172
-
173
- dtype, device_map = pick_dtype_and_map()
174
- tok = AutoTokenizer.from_pretrained(
175
- MODEL_ID,
176
- token=HF_TOKEN,
177
- use_fast=True,
178
- model_max_length=4096,
179
- padding_side="left",
180
- trust_remote_code=True,
181
- )
182
- mdl = AutoModelForCausalLM.from_pretrained(
183
- MODEL_ID,
184
- token=HF_TOKEN,
185
- device_map=device_map,
186
- low_cpu_mem_usage=True,
187
- torch_dtype=dtype,
188
- trust_remote_code=True,
189
- )
190
- if mdl.config.eos_token_id is None and tok.eos_token_id is not None:
191
- mdl.config.eos_token_id = tok.eos_token_id
192
- return mdl, tok
193
-
194
-
195
- def build_inputs(tokenizer, message, history):
196
- msgs = []
197
- for u, a in (history or []):
198
- msgs.append({"role": "user", "content": u})
199
- msgs.append({"role": "assistant", "content": a})
200
- msgs.append({"role": "user", "content": message})
201
- return tokenizer.apply_chat_template(
202
- msgs, tokenize=True, add_generation_prompt=True, return_tensors="pt"
203
- )
204
-
205
-
206
- def local_generate(model, tokenizer, input_ids, max_new_tokens=350):
207
- input_ids = input_ids.to(model.device)
208
- with torch.no_grad():
209
- out = model.generate(
210
- input_ids=input_ids,
211
- max_new_tokens=max_new_tokens,
212
- do_sample=True,
213
- temperature=0.3,
214
- top_p=0.9,
215
- repetition_penalty=1.15,
216
- pad_token_id=tokenizer.eos_token_id,
217
- eos_token_id=tokenizer.eos_token_id,
218
- )
219
- gen_only = out[0, input_ids.shape[-1]:]
220
- text = tokenizer.decode(gen_only, skip_special_tokens=True)
221
- return text.strip()
222
-
223
-
224
- # -------------------
225
- # Chat callback (no header/meta in chat replies)
226
- # -------------------
227
- def chat_fn(message, history, user_tz):
228
- try:
229
- # Identity override → return ONLY the brand line
230
- if is_identity_query(message, history):
231
- return "I am ClarityOps, your strategic decision making AI partner. How can I assist you?"
232
-
233
- if USE_HOSTED_COHERE:
234
- return cohere_chat(message, history)
235
-
236
- model, tokenizer = load_local_model()
237
- inputs = build_inputs(tokenizer, message, history)
238
- return local_generate(model, tokenizer, inputs, max_new_tokens=350)
239
-
240
- except RuntimeError as e:
241
- emsg = str(e)
242
- if "out of memory" in emsg.lower() or "cuda" in emsg.lower():
243
- return "Local load likely OOM. Use a GPU Space or set COHERE_API_KEY to run via Cohere hosted API."
244
- return f"Error during chat: {e}"
245
- except Exception as e:
246
- return f"Error during chat: {e}"
247
-
248
-
249
- # -------------------
250
- # Connection check (keeps header/meta)
251
- # -------------------
252
- def check_connection(user_tz=None):
253
- try:
254
- mode = "Cohere API (hosted)" if USE_HOSTED_COHERE else "Local HF"
255
- if USE_HOSTED_COHERE:
256
- return (
257
- f"{header(user_tz=user_tz)}"
258
- f"Connection Status: ✅ Using Cohere hosted API\n"
259
- f"Mode: {mode}\n"
260
- f"Model: command-r7b-12-2024\n"
261
- )
262
- api = HfApi(token=HF_TOKEN)
263
- mi = api.model_info(MODEL_ID)
264
- return (
265
- f"{header(user_tz=user_tz)}"
266
- f"Connection Status: ✅ Connected\n"
267
- f"Mode: {mode}\n"
268
- f"Model: {mi.modelId}\n"
269
- f"Last Modified: {mi.lastModified}\n"
270
- )
271
- except Exception as e:
272
- return f"{header(user_tz=user_tz)}Connection Status: ❌ Error\nDetails: {e}"
273
-
274
-
275
  # -------------------
276
  # UI
277
  # -------------------
278
  with gr.Blocks(theme=gr.themes.Default()) as demo:
279
- # Hidden textbox to hold browser timezone (Gradio expects components for outputs)
280
  tz_box = gr.Textbox(visible=False)
281
 
282
  # On load, capture browser timezone via JS and write it into tz_box
283
  demo.load(
284
- fn=lambda tz: tz, # echo the JS value back to Python
285
- inputs=[tz_box], # must provide 1 input for the lambda
286
- outputs=[tz_box], # write result into the same hidden box
287
  js="() => Intl.DateTimeFormat().resolvedOptions().timeZone"
288
  )
289
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  gr.Markdown("# Medical Decision Support AI")
291
 
292
- with gr.Row():
293
- btn = gr.Button("Check Connection Status")
294
- status = gr.Textbox(label="Connection Status", lines=7, value="Click to check…")
 
295
 
296
  gr.Markdown(
297
  "⚙️ First response may take a moment while the model warms up. "
298
- "Currently configured to use **Cohere hosted API** if `COHERE_API_KEY` is set; "
299
- "otherwise, tries **local HF**."
300
  )
301
 
302
  chat = gr.ChatInterface(
303
  fn=chat_fn,
304
  type="messages",
305
- additional_inputs=[tz_box], # pass timezone into chat_fn
306
  description="A medical decision support system that provides healthcare-related information and decision making support.",
307
- # Each example is [message, timezone]; second field is a dummy since tz comes from JS.
308
  examples=[
309
  ["What are the symptoms of hypertension?", ""],
310
  ["What are common drug interactions with aspirin?", ""],
311
  ["What are the warning signs of diabetes?", ""],
312
  ],
313
- cache_examples=True, # show example chips on load
314
  )
315
 
316
- # Wire timezone into the connection check as well
317
- btn.click(fn=check_connection, inputs=tz_box, outputs=status)
318
-
319
  if __name__ == "__main__":
320
  demo.launch()
321
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # -------------------
2
  # UI
3
  # -------------------
4
  with gr.Blocks(theme=gr.themes.Default()) as demo:
5
+ # Hidden textbox to hold browser timezone
6
  tz_box = gr.Textbox(visible=False)
7
 
8
  # On load, capture browser timezone via JS and write it into tz_box
9
  demo.load(
10
+ fn=lambda tz: tz,
11
+ inputs=[tz_box],
12
+ outputs=[tz_box],
13
  js="() => Intl.DateTimeFormat().resolvedOptions().timeZone"
14
  )
15
 
16
+ # Automatically determine connection info once tz is available
17
+ def model_status(user_tz):
18
+ try:
19
+ if USE_HOSTED_COHERE:
20
+ return "✅ Connected to: Cohere API (model: command-r7b-12-2024)"
21
+ api = HfApi(token=HF_TOKEN)
22
+ mi = api.model_info(MODEL_ID)
23
+ return f"✅ Connected to: Local HF model ({mi.modelId})"
24
+ except Exception as e:
25
+ return f"❌ Connection Error: {e}"
26
+
27
+ # Heading
28
  gr.Markdown("# Medical Decision Support AI")
29
 
30
+ # One-line status bar
31
+ status_line = gr.Markdown("Connecting...")
32
+
33
+ demo.load(fn=model_status, inputs=[tz_box], outputs=[status_line])
34
 
35
  gr.Markdown(
36
  "⚙️ First response may take a moment while the model warms up. "
37
+ # "Currently configured to use **Cohere hosted API** if `COHERE_API_KEY` is set; "
38
+ # "otherwise, tries **local HF**."
39
  )
40
 
41
  chat = gr.ChatInterface(
42
  fn=chat_fn,
43
  type="messages",
44
+ additional_inputs=[tz_box],
45
  description="A medical decision support system that provides healthcare-related information and decision making support.",
 
46
  examples=[
47
  ["What are the symptoms of hypertension?", ""],
48
  ["What are common drug interactions with aspirin?", ""],
49
  ["What are the warning signs of diabetes?", ""],
50
  ],
51
+ cache_examples=True,
52
  )
53
 
 
 
 
54
  if __name__ == "__main__":
55
  demo.launch()
56
 
57
+