Deva1211 commited on
Commit
e303824
Β·
1 Parent(s): 1f37230

added new model

Browse files
Files changed (3) hide show
  1. app.py +16 -24
  2. app_flan_t5.py +228 -0
  3. config.py +5 -20
app.py CHANGED
@@ -100,6 +100,10 @@ def generate_response(prompt, max_tokens=None, temperature=None, top_p=None):
100
  """Generate response using the loaded model"""
101
  global model, tokenizer, current_model_name
102
 
 
 
 
 
103
  if model is None or tokenizer is None:
104
  return "❌ Model not loaded. Please wait for initialization or try restarting the space."
105
 
@@ -108,14 +112,9 @@ def generate_response(prompt, max_tokens=None, temperature=None, top_p=None):
108
  temperature = temperature or GENERATION_DEFAULTS["temperature"]
109
  top_p = top_p or GENERATION_DEFAULTS["top_p"]
110
 
111
- print(f"πŸ”„ Starting generation for prompt: {prompt[:50]}{'...' if len(prompt) > 50 else ''}")
112
- if not prompt or not prompt.strip():
113
- return "Please enter a question. 😊"
114
- print(f"πŸ“Š Generation params: max_tokens={max_tokens}, temp={temperature}, top_p={top_p}")
115
-
116
  try:
117
- full_prompt = f"{MEDICAL_SYSTEM_PROMPT}\n\nPatient/User: {prompt}\nMedical Assistant:"
118
- print(f"πŸ“ Using full prompt: {full_prompt[:100]}{'...' if len(full_prompt) > 100 else ''}")
119
 
120
  # Tokenize input with proper truncation
121
  inputs = tokenizer(
@@ -130,20 +129,18 @@ def generate_response(prompt, max_tokens=None, temperature=None, top_p=None):
130
  device = next(model.parameters()).device
131
  inputs = {k: v.to(device) for k, v in inputs.items()}
132
 
133
- # Generation parameters (optimized for CPU performance)
134
  generation_kwargs = {
135
- "max_new_tokens": min(max_tokens, 100), # Further reduced for better control
136
  "temperature": temperature,
137
  "top_p": top_p,
138
  "do_sample": GENERATION_DEFAULTS["do_sample"],
139
  "pad_token_id": tokenizer.eos_token_id,
140
  "repetition_penalty": GENERATION_DEFAULTS["repetition_penalty"],
141
- "no_repeat_ngram_size": GENERATION_DEFAULTS["no_repeat_ngram_size"],
142
- "early_stopping": True, # Stop early when end token is found
143
- "num_beams": 1 # Use greedy decoding for speed on CPU
144
  }
145
 
146
- print(f"πŸ”§ Generating with kwargs: {generation_kwargs}")
147
 
148
  # Generate response
149
  print(f"πŸ€– Generating response with {current_model_name}...")
@@ -156,19 +153,14 @@ def generate_response(prompt, max_tokens=None, temperature=None, top_p=None):
156
  generation_time = time.time() - start_time
157
  print(f"⏱️ Generation completed in {generation_time:.2f} seconds")
158
 
159
- # Decode response
160
  full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
161
 
162
- # Extract only the new generated text
163
- if "Medical Assistant:" in full_response:
164
- response = full_response.split("Medical Assistant:")[-1].strip()
165
- else:
166
- # Fallback extraction
167
- response = full_response[len(full_prompt):].strip()
168
-
169
- # Clean up response - keep it natural as per CareConnect guidelines
170
- if not response or len(response.strip()) < 3:
171
- response = "Could you please provide more details about your question so I can help you better? 😊"
172
 
173
  print(f"βœ… Generated response length: {len(response)} characters")
174
  print(f"πŸ“„ Response preview: {response[:150]}{'...' if len(response) > 150 else ''}")
 
100
  """Generate response using the loaded model"""
101
  global model, tokenizer, current_model_name
102
 
103
+ print(f"Starting generation for prompt: {prompt}")
104
+ if not prompt or not prompt.strip():
105
+ return "Please enter a question. 😊"
106
+
107
  if model is None or tokenizer is None:
108
  return "❌ Model not loaded. Please wait for initialization or try restarting the space."
109
 
 
112
  temperature = temperature or GENERATION_DEFAULTS["temperature"]
113
  top_p = top_p or GENERATION_DEFAULTS["top_p"]
114
 
 
 
 
 
 
115
  try:
116
+ full_prompt = f"{MEDICAL_SYSTEM_PROMPT}\n\nPatient/User: {prompt}\n"
117
+ print(f"Full prompt: {full_prompt}")
118
 
119
  # Tokenize input with proper truncation
120
  inputs = tokenizer(
 
129
  device = next(model.parameters()).device
130
  inputs = {k: v.to(device) for k, v in inputs.items()}
131
 
132
+ # Generation parameters
133
  generation_kwargs = {
134
+ "max_new_tokens": min(max_tokens, 1024),
135
  "temperature": temperature,
136
  "top_p": top_p,
137
  "do_sample": GENERATION_DEFAULTS["do_sample"],
138
  "pad_token_id": tokenizer.eos_token_id,
139
  "repetition_penalty": GENERATION_DEFAULTS["repetition_penalty"],
140
+ "no_repeat_ngram_size": GENERATION_DEFAULTS["no_repeat_ngram_size"]
 
 
141
  }
142
 
143
+ print(f"Generating with kwargs: {generation_kwargs}")
144
 
145
  # Generate response
146
  print(f"πŸ€– Generating response with {current_model_name}...")
 
153
  generation_time = time.time() - start_time
154
  print(f"⏱️ Generation completed in {generation_time:.2f} seconds")
155
 
156
+ # Decode response and extract new content
157
  full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
158
+ response = full_response.replace(full_prompt, "").strip()
159
+ print(f"Generated response: {response}")
160
 
161
+ # Clean up response
162
+ if not response or len(response.strip()) < 10:
163
+ response = "Sorry, I couldn't process that. Try again or see a doctor. 😊"
 
 
 
 
 
 
 
164
 
165
  print(f"βœ… Generated response length: {len(response)} characters")
166
  print(f"πŸ“„ Response preview: {response[:150]}{'...' if len(response) > 150 else ''}")
app_flan_t5.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
+ import logging
5
+ import gc
6
+ import warnings
7
+ import os
8
+ from huggingface_hub import login
9
+
10
+ # Login with the secret token
11
+ login(token=os.getenv("HF_TOKEN"))
12
+
13
+ # Suppress warnings
14
+ warnings.filterwarnings("ignore")
15
+ logging.getLogger("transformers").setLevel(logging.ERROR)
16
+
17
+ # Configuration for FLAN-T5 Small
18
+ MODEL_NAME = "google/flan-t5-small"
19
+ MAX_NEW_TOKENS = 100
20
+ TEMPERATURE = 0.7
21
+ TOP_P = 0.9
22
+
23
+ # Medical system prompt optimized for FLAN-T5
24
+ MEDICAL_SYSTEM_PROMPT = """You are a friendly and smart medical assistant. Your job is to give short, clear, and helpful health information.
25
+
26
+ Your answers should:
27
+ - Stay focused. No long essays or extra fluff.
28
+ - Give basic helpful steps for common symptoms like fever, cough, or headache (e.g., rest, drink fluids, take paracetamol if needed).
29
+ - For any serious or unclear issues, remind the user to see a doctor β€” but do it briefly and naturally.
30
+ - Keep responses concise and under 4 sentences when possible.
31
+
32
+ Tone:
33
+ - Friendly, supportive, and calm.
34
+ - No robotic warnings unless needed. Keep it real and human.
35
+ - Use emojis like 😊 or πŸ‘ occasionally to appear friendly.
36
+
37
+ Important rules:
38
+ - NEVER include text in parentheses in your responses.
39
+ - NEVER include any meta-instructions in your responses.
40
+ - NEVER include reminders about what you should do in future responses.
41
+ - DO NOT include phrases like "We're here to help" or "I'm just an AI".
42
+ - DO NOT include any text that instructs you what to do or how to behave.
43
+ - DO NOT include any sentences that start with "If the user asks..." or "Remember..."
44
+ - DO NOT include "(smile)" - instead, use actual emojis like 😊 or πŸ‘ when appropriate.
45
+ - DO NOT include numbered references like [1], [2], etc. in your responses.
46
+ - DO NOT include any text that explains what your response is doing.
47
+ - DO NOT include "user:" or "assistant:" prefixes in your responses.
48
+ - DO NOT include hypothetical user questions in your responses.
49
+ - DO NOT refuse to answer harmless non-medical questions like jokes or general knowledge.
50
+ - Don't give exact dosages or diagnoses.
51
+ - Be consistent in your responses regardless of the user's role."""
52
+
53
+ # Global variables
54
+ model = None
55
+ tokenizer = None
56
+
57
+ def load_model():
58
+ """Load FLAN-T5 Small model"""
59
+ global model, tokenizer
60
+
61
+ try:
62
+ print(f"πŸ₯ Loading FLAN-T5 Small for medical assistance...")
63
+
64
+ # Load tokenizer
65
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
66
+
67
+ # Load model (FLAN-T5 is a seq2seq model)
68
+ model = AutoModelForSeq2SeqLM.from_pretrained(
69
+ MODEL_NAME,
70
+ torch_dtype=torch.float32, # Use float32 for CPU
71
+ low_cpu_mem_usage=True
72
+ )
73
+
74
+ print(f"βœ… FLAN-T5 Small loaded successfully!")
75
+ return True
76
+
77
+ except Exception as e:
78
+ print(f"❌ Failed to load model: {str(e)}")
79
+ return False
80
+
81
+ def generate_response(prompt, max_tokens=None, temperature=None, top_p=None):
82
+ """Generate response using FLAN-T5"""
83
+ global model, tokenizer
84
+
85
+ print(f"Starting generation for prompt: {prompt}")
86
+ if not prompt or not prompt.strip():
87
+ return "Please enter a question. 😊"
88
+
89
+ if model is None or tokenizer is None:
90
+ return "❌ Model not loaded. Please wait for initialization."
91
+
92
+ # Use defaults if not specified
93
+ max_tokens = max_tokens or MAX_NEW_TOKENS
94
+ temperature = temperature or TEMPERATURE
95
+ top_p = top_p or TOP_P
96
+
97
+ try:
98
+ # Create instruction format for FLAN-T5
99
+ full_prompt = f"{MEDICAL_SYSTEM_PROMPT}\n\nQuestion: {prompt}\nAnswer:"
100
+ print(f"Full prompt: {full_prompt}")
101
+
102
+ # Tokenize input
103
+ inputs = tokenizer(
104
+ full_prompt,
105
+ return_tensors="pt",
106
+ truncation=True,
107
+ max_length=512,
108
+ padding=True
109
+ )
110
+
111
+ # Generation parameters
112
+ generation_kwargs = {
113
+ "max_new_tokens": max_tokens,
114
+ "temperature": temperature,
115
+ "top_p": top_p,
116
+ "do_sample": True,
117
+ "repetition_penalty": 1.2,
118
+ "early_stopping": True
119
+ }
120
+
121
+ print(f"Generating with kwargs: {generation_kwargs}")
122
+
123
+ # Generate response
124
+ with torch.no_grad():
125
+ outputs = model.generate(**inputs, **generation_kwargs)
126
+
127
+ # Decode response
128
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
129
+
130
+ print(f"Generated response: {response}")
131
+
132
+ # Clean up response
133
+ if not response or len(response.strip()) < 10:
134
+ response = "Sorry, I couldn't process that. Try again or see a doctor. 😊"
135
+
136
+ # Memory cleanup
137
+ del inputs, outputs
138
+ gc.collect()
139
+
140
+ return response
141
+
142
+ except Exception as e:
143
+ print(f"οΏ½οΏ½οΏ½ Generation error: {str(e)}")
144
+ return f"I encountered a technical issue. Please try again! 😊"
145
+
146
+ def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
147
+ """Main response function for Gradio ChatInterface"""
148
+ if not message or not message.strip():
149
+ return "Please enter a medical question or concern."
150
+
151
+ try:
152
+ # Generate response
153
+ response = generate_response(
154
+ message.strip(),
155
+ max_tokens=int(max_tokens),
156
+ temperature=float(temperature),
157
+ top_p=float(top_p)
158
+ )
159
+
160
+ return response
161
+
162
+ except Exception as e:
163
+ print(f"System error: {str(e)}")
164
+ return f"⚠️ System temporarily unavailable. Please try again later or consult a healthcare professional directly."
165
+
166
+ # Load model on startup
167
+ print("πŸ₯ Initializing FLAN-T5 Medical Chatbot...")
168
+ model_loaded = load_model()
169
+
170
+ if model_loaded:
171
+ print(f"βœ… Ready! Using FLAN-T5 Small")
172
+ else:
173
+ print("⚠️ WARNING: Model failed to load. The app will run but responses may be limited.")
174
+
175
+ # Create Gradio interface
176
+ demo = gr.ChatInterface(
177
+ respond,
178
+ title="πŸ₯ Medical Assistant (FLAN-T5)",
179
+ description="A lightweight medical AI assistant powered by FLAN-T5 Small. Optimized for fast responses on CPU.",
180
+ additional_inputs=[
181
+ gr.Textbox(
182
+ value=MEDICAL_SYSTEM_PROMPT,
183
+ label="System Instructions",
184
+ lines=4,
185
+ interactive=False
186
+ ),
187
+ gr.Slider(
188
+ minimum=50,
189
+ maximum=200,
190
+ value=MAX_NEW_TOKENS,
191
+ step=10,
192
+ label="Max new tokens"
193
+ ),
194
+ gr.Slider(
195
+ minimum=0.1,
196
+ maximum=1.0,
197
+ value=TEMPERATURE,
198
+ step=0.1,
199
+ label="Temperature"
200
+ ),
201
+ gr.Slider(
202
+ minimum=0.1,
203
+ maximum=1.0,
204
+ value=TOP_P,
205
+ step=0.05,
206
+ label="Top-p",
207
+ ),
208
+ ],
209
+ examples=[
210
+ ["What are the symptoms of diabetes?"],
211
+ ["How can I treat a headache?"],
212
+ ["What should I do for a fever?"],
213
+ ["Tell me about healthy eating"],
214
+ ["How to improve sleep quality?"]
215
+ ],
216
+ cache_examples=False,
217
+ theme=gr.themes.Soft(),
218
+ css=".gradio-container {max-width: 900px; margin: auto;}"
219
+ )
220
+
221
+ if __name__ == "__main__":
222
+ demo.launch(
223
+ server_name="0.0.0.0",
224
+ server_port=7860,
225
+ share=True,
226
+ show_error=True,
227
+ debug=True
228
+ )
config.py CHANGED
@@ -2,37 +2,22 @@
2
 
3
  # Model configurations
4
  MODEL_CONFIGS = {
5
- # Primary medical models (replace with actual MedLLaMA2 when available)
6
- "medllama2": {
7
- "name": "meta-llama/Llama-2-7b-chat-hf", # Replace with actual MedLLaMA2 model ID
8
- "description": "MedLLaMA2 7B medical language model"
9
- },
10
-
11
- # Alternative medical models
12
  "meditron": {
13
  "name": "epfl-llm/meditron-7b",
14
  "description": "Meditron 7B medical language model"
15
  },
16
-
17
- "clinical_camel": {
18
- "name": "wanglab/ClinicalCamel-70B", # Note: This is very large, might not fit
19
- "description": "Clinical Camel medical model"
20
- },
21
-
22
- # Fallback models (smaller, more reliable)
23
  "dialogpt_medium": {
24
  "name": "microsoft/DialoGPT-medium",
25
  "description": "DialoGPT Medium (fallback)"
26
  },
27
-
28
- "dialogpt_small": {
29
- "name": "microsoft/DialoGPT-small",
30
- "description": "DialoGPT Small (lightweight fallback)"
31
  }
32
  }
33
 
34
- # Default model to use (switched to lighter model for better performance)
35
- DEFAULT_MODEL = "dialogpt_medium"
36
 
37
  # Model loading settings (optimized for CPU)
38
  MODEL_SETTINGS = {
 
2
 
3
  # Model configurations
4
  MODEL_CONFIGS = {
 
 
 
 
 
 
 
5
  "meditron": {
6
  "name": "epfl-llm/meditron-7b",
7
  "description": "Meditron 7B medical language model"
8
  },
 
 
 
 
 
 
 
9
  "dialogpt_medium": {
10
  "name": "microsoft/DialoGPT-medium",
11
  "description": "DialoGPT Medium (fallback)"
12
  },
13
+ "flan_t5_small": {
14
+ "name": "google/flan-t5-small",
15
+ "description": "FLAN-T5 Small (instruction-following fallback)"
 
16
  }
17
  }
18
 
19
+ # Default model to use - prioritize medical model
20
+ DEFAULT_MODEL = "meditron"
21
 
22
  # Model loading settings (optimized for CPU)
23
  MODEL_SETTINGS = {