Rajan Sharma
Update app.py
a0bcec9 verified
raw
history blame
2.2 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from datetime import datetime, timezone
# Initialize the model and tokenizer
model_id = "CohereLabs/c4ai-command-a-03-2025"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
def get_timestamp():
"""Get current UTC datetime in specified format"""
return datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%SS')
def format_system_info():
"""Format system information header"""
return (
f"Current Date and Time (UTC - YYYY-MM-DD HH:MM:SS formatted): {get_timestamp()}\n"
f"Current User's Login: Raj-VedAI\n"
)
def chat(message, history):
if history is None:
history = []
# Add system information
system_info = format_system_info()
# Format messages with the c4ai-command-a-03-2025 chat template
messages = [{"role": "user", "content": message}]
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True)
# Generate response
gen_tokens = model.generate(
input_ids,
max_new_tokens=100,
do_sample=True,
temperature=0.3,
)
# Decode response
gen_text = tokenizer.decode(gen_tokens[0])
# Format the full response with system info
formatted_response = f"{system_info}\n{gen_text}"
history.append((message, formatted_response))
return history
# Create custom theme
theme = gr.themes.Default().set(
body_background_fill="#f0f8ff",
block_background_fill="#ffffff",
block_border_width="1px",
block_border_color="#2c3e50",
block_radius="10px"
)
# Create the Gradio interface
demo = gr.ChatInterface(
fn=chat,
title="Medical Decision Support AI",
description="""A medical decision support system that provides healthcare-related information and guidance.
Current UTC Time: """ + get_timestamp(),
theme=theme,
examples=[
"What are the symptoms of hypertension?",
"What are common drug interactions with aspirin?",
"What are the warning signs of diabetes?",
],
retry_on_error=True
)
demo.launch()