Bhaskar2611 commited on
Commit
f9a420a
·
verified ·
1 Parent(s): 5a572ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -58
app.py CHANGED
@@ -160,71 +160,48 @@
160
  # # Load any additional models if needed
161
  # # gr.load("models/Bhaskar2611/Capstone").launch()
162
 
163
- import torch
164
- import transformers
165
- from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
166
  import gradio as gr
167
 
168
- # Load tokenizer and model from Hugging Face (your fine-tuned model)
 
 
169
  model_id = "Bhaskar2611/Capstone"
170
 
171
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=True)
172
- model = AutoModelForCausalLM.from_pretrained(
173
- model_id,
174
- torch_dtype=torch.float16,
175
- device_map="auto",
176
- use_auth_token=True
 
 
 
177
  )
178
 
179
- streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
180
-
181
- # Function to format prompt with Zephyr-style and system prompt
182
- def format_prompt(message, history):
183
- system_prompt = (
184
- "You are a helpful medical assistant specialized in skin diseases. "
185
- "Always provide accurate, responsible, and actionable information. "
186
- "If needed, recommend seeing a dermatologist."
187
- )
188
- prompt = f"<|user|>\n{system_prompt}\n<|assistant|>\nSure, I'm here to help you.</s>\n"
189
- for user_msg, bot_msg in history:
190
- prompt += f"<|user|>\n{user_msg}\n<|assistant|>\n{bot_msg}</s>\n"
191
- prompt += f"<|user|>\n{message}\n<|assistant|>\n"
192
- return prompt
193
-
194
- # Function to generate model response
195
- def respond(message, history):
196
- prompt = format_prompt(message, history)
197
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
198
- output = model.generate(
199
- **inputs,
200
- max_new_tokens=2048,
201
- pad_token_id=tokenizer.eos_token_id,
202
- do_sample=True,
203
- temperature=0.7,
204
- top_p=0.9,
205
- streamer=streamer
206
- )
207
- decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
208
- # Extract the final assistant message only
209
- final_response = decoded_output.split("<|assistant|>")[-1].strip().split("</s>")[0].strip()
210
- return final_response
211
-
212
- # Create Gradio interface
213
- chat = gr.ChatInterface(
214
- fn=respond,
215
- title="Skin & Hair Disease Assistant",
216
- chatbot=gr.Chatbot(height=400),
217
- textbox=gr.Textbox(placeholder="Describe your symptoms or ask a question...", container=False, scale=7),
218
- description="Ask about skin conditions, hair issues, or treatment guidance. Powered by a custom fine-tuned Zephyr model.",
219
- theme="soft",
220
- examples=["I have red patches on my skin.", "What causes hair loss?", "Is dandruff a fungal infection?"],
221
- cache_examples=False,
222
- retry_btn="🔁 Retry",
223
- undo_btn="↩️ Undo",
224
- clear_btn="🧹 Clear"
225
  )
226
 
227
- # Launch app
228
  if __name__ == "__main__":
229
- chat.launch()
230
 
 
160
  # # Load any additional models if needed
161
  # # gr.load("models/Bhaskar2611/Capstone").launch()
162
 
163
+ import os
164
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
165
  import gradio as gr
166
 
167
+ # Load your Hugging Face token from environment variables
168
+ hf_token = os.environ.get("HF_TOKEN")
169
+
170
  model_id = "Bhaskar2611/Capstone"
171
 
172
+ # Load tokenizer and model with token parameter (no deprecated args)
173
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token)
174
+ model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_token)
175
+
176
+ # Define a skin assistant prompt to set the context for the model
177
+ SKIN_ASSISTANT_PROMPT = (
178
+ "You are a helpful assistant specialized in skin diseases and dermatology. "
179
+ "Provide accurate, concise, and helpful advice about skin conditions, symptoms, "
180
+ "treatments, and care. Always respond in a clear and empathetic way.\n\n"
181
  )
182
 
183
+ def generate_response(user_input):
184
+ # Combine the assistant prompt + user input
185
+ prompt = SKIN_ASSISTANT_PROMPT + user_input
186
+
187
+ inputs = tokenizer(prompt, return_tensors="pt")
188
+ outputs = model.generate(**inputs, max_length=2048, do_sample=True, temperature=0.7)
189
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
190
+
191
+ # Remove the assistant prompt part from the response (if it appears)
192
+ if response.startswith(SKIN_ASSISTANT_PROMPT):
193
+ response = response[len(SKIN_ASSISTANT_PROMPT):].strip()
194
+ return response
195
+
196
+ # Gradio interface
197
+ iface = gr.Interface(
198
+ fn=generate_response,
199
+ inputs=gr.Textbox(lines=3, placeholder="Ask about skin diseases..."),
200
+ outputs="text",
201
+ title="Skin Disease Assistant",
202
+ description="Ask any questions related to skin diseases and get expert-like responses."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  )
204
 
 
205
  if __name__ == "__main__":
206
+ iface.launch()
207