File size: 2,197 Bytes
1b2bc31
a0bcec9
349e0fc
1b2bc31
a0bcec9
 
 
 
1b2bc31
349e0fc
 
a0bcec9
1b2bc31
349e0fc
 
 
 
 
 
1b2bc31
349e0fc
 
 
 
 
 
 
a0bcec9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1b2bc31
349e0fc
 
a0bcec9
349e0fc
 
 
 
 
1b2bc31
349e0fc
1b2bc31
349e0fc
 
 
 
 
 
 
 
 
1b2bc31
349e0fc
1b2bc31
 
349e0fc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from datetime import datetime, timezone

# Initialize the model and tokenizer
model_id = "CohereLabs/c4ai-command-a-03-2025"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)

def get_timestamp():
    """Get current UTC datetime in specified format"""
    return datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%SS')

def format_system_info():
    """Format system information header"""
    return (
        f"Current Date and Time (UTC - YYYY-MM-DD HH:MM:SS formatted): {get_timestamp()}\n"
        f"Current User's Login: Raj-VedAI\n"
    )

def chat(message, history):
    if history is None:
        history = []
    
    # Add system information
    system_info = format_system_info()
    
    # Format messages with the c4ai-command-a-03-2025 chat template
    messages = [{"role": "user", "content": message}]
    input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True)
    
    # Generate response
    gen_tokens = model.generate(
        input_ids,
        max_new_tokens=100,
        do_sample=True,
        temperature=0.3,
    )
    
    # Decode response
    gen_text = tokenizer.decode(gen_tokens[0])
    
    # Format the full response with system info
    formatted_response = f"{system_info}\n{gen_text}"
    history.append((message, formatted_response))
    return history

# Create custom theme
theme = gr.themes.Default().set(
    body_background_fill="#f0f8ff",
    block_background_fill="#ffffff",
    block_border_width="1px",
    block_border_color="#2c3e50",
    block_radius="10px"
)

# Create the Gradio interface
demo = gr.ChatInterface(
    fn=chat,
    title="Medical Decision Support AI",
    description="""A medical decision support system that provides healthcare-related information and guidance.
    Current UTC Time: """ + get_timestamp(),
    theme=theme,
    examples=[
        "What are the symptoms of hypertension?",
        "What are common drug interactions with aspirin?",
        "What are the warning signs of diabetes?",
    ],
    retry_on_error=True
)

demo.launch()