Spaces:
Sleeping
Sleeping
| import os | |
| import re | |
| from datetime import datetime, timezone | |
| from functools import lru_cache | |
| import gradio as gr | |
| import torch | |
| # Timezone (Python 3.9+) | |
| try: | |
| from zoneinfo import ZoneInfo | |
| except Exception: | |
| ZoneInfo = None | |
| # Cohere SDK | |
| try: | |
| import cohere | |
| _HAS_COHERE = True | |
| except Exception: | |
| _HAS_COHERE = False | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from huggingface_hub import login, HfApi | |
| # ------------------- | |
| # Config | |
| # ------------------- | |
| MODEL_ID = os.getenv("MODEL_ID", "CohereLabs/c4ai-command-r7b-12-2024") | |
| HF_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN") | |
| COHERE_API_KEY = os.getenv("COHERE_API_KEY") | |
| USE_HOSTED_COHERE = bool(COHERE_API_KEY and _HAS_COHERE) | |
| # ------------------- | |
| # Helpers | |
| # ------------------- | |
| def pick_dtype_and_map(): | |
| if torch.cuda.is_available(): | |
| return torch.float16, "auto" | |
| if torch.backends.mps.is_available(): | |
| return torch.float16, {"": "mps"} | |
| return torch.float32, "cpu" | |
| def is_identity_query(message, history): | |
| patterns = [ | |
| r"\bwho\s+are\s+you\b", r"\bwhat\s+are\s+you\b", | |
| r"\bwhat\s+is\s+your\s+name\b", r"\bwho\s+is\s+this\b", | |
| r"\bidentify\s+yourself\b", r"\btell\s+me\s+about\s+yourself\b", | |
| r"\bdescribe\s+yourself\b", r"\band\s+you\s*\?\b", | |
| r"\byour\s+name\b", r"\bwho\s+am\s+i\s+chatting\s+with\b" | |
| ] | |
| def match(t): | |
| return any(re.search(p, (t or "").strip().lower()) for p in patterns) | |
| if match(message): | |
| return True | |
| if history: | |
| last_user = history[-1][0] if isinstance(history[-1], (list, tuple)) else None | |
| if match(last_user): | |
| return True | |
| return False | |
| # ------------------- | |
| # Cohere Hosted | |
| # ------------------- | |
| _co_client = None | |
| if USE_HOSTED_COHERE: | |
| _co_client = cohere.Client(api_key=COHERE_API_KEY) | |
| def _cohere_parse(resp): | |
| if hasattr(resp, "output_text") and resp.output_text: | |
| return resp.output_text.strip() | |
| if getattr(resp, "message", None) and getattr(resp.message, "content", None): | |
| for p in resp.message.content: | |
| if hasattr(p, "text") and p.text: | |
| return p.text.strip() | |
| if hasattr(resp, "text") and resp.text: | |
| return resp.text.strip() | |
| return "Sorry, I couldn't parse the response from Cohere." | |
| def cohere_chat(message, history): | |
| try: | |
| msgs = [] | |
| for u, a in (history or []): | |
| msgs.append({"role": "user", "content": u}) | |
| msgs.append({"role": "assistant", "content": a}) | |
| msgs.append({"role": "user", "content": message}) | |
| resp = _co_client.responses.create( | |
| model="command-r7b-12-2024", | |
| messages=msgs, | |
| temperature=0.3, | |
| max_tokens=350, | |
| ) | |
| return _cohere_parse(resp) | |
| except Exception as e: | |
| return f"Error calling Cohere API: {e}" | |
| # ------------------- | |
| # Local HF Model | |
| # ------------------- | |
| def load_local_model(): | |
| if not HF_TOKEN: | |
| raise RuntimeError( | |
| "HUGGINGFACE_HUB_TOKEN is not set." | |
| ) | |
| login(token=HF_TOKEN, add_to_git_credential=False) | |
| dtype, device_map = pick_dtype_and_map() | |
| tok = AutoTokenizer.from_pretrained( | |
| MODEL_ID, | |
| token=HF_TOKEN, | |
| use_fast=True, | |
| model_max_length=4096, | |
| padding_side="left", | |
| trust_remote_code=True, | |
| ) | |
| mdl = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, | |
| token=HF_TOKEN, | |
| device_map=device_map, | |
| low_cpu_mem_usage=True, | |
| torch_dtype=dtype, | |
| trust_remote_code=True, | |
| ) | |
| if mdl.config.eos_token_id is None and tok.eos_token_id is not None: | |
| mdl.config.eos_token_id = tok.eos_token_id | |
| return mdl, tok | |
| def build_inputs(tokenizer, message, history): | |
| msgs = [] | |
| for u, a in (history or []): | |
| msgs.append({"role": "user", "content": u}) | |
| msgs.append({"role": "assistant", "content": a}) | |
| msgs.append({"role": "user", "content": message}) | |
| return tokenizer.apply_chat_template( | |
| msgs, tokenize=True, add_generation_prompt=True, return_tensors="pt" | |
| ) | |
| def local_generate(model, tokenizer, input_ids, max_new_tokens=350): | |
| input_ids = input_ids.to(model.device) | |
| with torch.no_grad(): | |
| out = model.generate( | |
| input_ids=input_ids, | |
| max_new_tokens=max_new_tokens, | |
| do_sample=True, | |
| temperature=0.3, | |
| top_p=0.9, | |
| repetition_penalty=1.15, | |
| pad_token_id=tokenizer.eos_token_id, | |
| eos_token_id=tokenizer.eos_token_id, | |
| ) | |
| gen_only = out[0, input_ids.shape[-1]:] | |
| return tokenizer.decode(gen_only, skip_special_tokens=True).strip() | |
| # ------------------- | |
| # Chat Function | |
| # ------------------- | |
| def chat_fn(message, history, user_tz): | |
| try: | |
| if is_identity_query(message, history): | |
| return "I am ClarityOps, your strategic decision making AI partner." | |
| if USE_HOSTED_COHERE: | |
| return cohere_chat(message, history) | |
| model, tokenizer = load_local_model() | |
| inputs = build_inputs(tokenizer, message, history) | |
| return local_generate(model, tokenizer, inputs, max_new_tokens=350) | |
| except Exception as e: | |
| return f"Error: {e}" | |
| # ------------------- | |
| # Theme & CSS | |
| # ------------------- | |
| theme = gr.themes.Soft( | |
| primary_hue="teal", | |
| neutral_hue="slate", | |
| radius_size=gr.themes.sizes.radius_lg, | |
| ) | |
| custom_css = """ | |
| :root { | |
| --brand-bg: #e6f7f8; /* soft medical teal */ | |
| --brand-accent: #0d9488; /* teal-600 */ | |
| --brand-text-light: #ffffff; | |
| } | |
| .gradio-container { | |
| background: var(--brand-bg); | |
| } | |
| h1 { | |
| color: #0f172a; | |
| font-weight: 700; | |
| font-size: 28px !important; | |
| } | |
| /* Both bot and user bubbles teal with white text */ | |
| .message.user, .message.bot { | |
| background: var(--brand-accent) !important; | |
| color: var(--brand-text-light) !important; | |
| } | |
| """ | |
| # ------------------- | |
| # UI | |
| # ------------------- | |
| with gr.Blocks(theme=theme, css=custom_css) as demo: | |
| tz_box = gr.Textbox(visible=False) | |
| demo.load(lambda tz: tz, inputs=[tz_box], outputs=[tz_box], | |
| js="() => Intl.DateTimeFormat().resolvedOptions().timeZone") | |
| gr.Markdown("# Medical Decision Support AI") | |
| gr.ChatInterface( | |
| fn=chat_fn, | |
| type="messages", | |
| additional_inputs=[tz_box], | |
| examples=[ | |
| ["What are the symptoms of hypertension?", ""], | |
| ["What are common drug interactions with aspirin?", ""], | |
| ["What are the warning signs of diabetes?", ""], | |
| ], | |
| cache_examples=True, | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |