model path
#26
by
ismailhakki37
- opened
- handler.py +2 -2
handler.py
CHANGED
|
@@ -249,7 +249,7 @@ def clear_history():
|
|
| 249 |
return {"error": "LLaVA modules not available"}
|
| 250 |
|
| 251 |
try:
|
| 252 |
-
chatbot_instance = chat_manager.get_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
| 253 |
chatbot_instance.conversation = conv_templates[chatbot_instance.conv_mode].copy()
|
| 254 |
return {"status": "success", "message": "Conversation history cleared"}
|
| 255 |
except Exception as e:
|
|
@@ -268,7 +268,7 @@ def generate_response(message_text, image_input, temperature=0.05, top_p=1.0, ma
|
|
| 268 |
if not message_text or not image_input:
|
| 269 |
return {"error": "Both message text and image are required"}
|
| 270 |
|
| 271 |
-
our_chatbot = chat_manager.get_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
| 272 |
|
| 273 |
# Process image input
|
| 274 |
try:
|
|
|
|
| 249 |
return {"error": "LLaVA modules not available"}
|
| 250 |
|
| 251 |
try:
|
| 252 |
+
chatbot_instance = chat_manager.get_chatbot(args, args.model_path if args else "PULSE-ECG/PULSE-7B", tokenizer, model, image_processor, context_len)
|
| 253 |
chatbot_instance.conversation = conv_templates[chatbot_instance.conv_mode].copy()
|
| 254 |
return {"status": "success", "message": "Conversation history cleared"}
|
| 255 |
except Exception as e:
|
|
|
|
| 268 |
if not message_text or not image_input:
|
| 269 |
return {"error": "Both message text and image are required"}
|
| 270 |
|
| 271 |
+
our_chatbot = chat_manager.get_chatbot(args, args.model_path if args else "PULSE-ECG/PULSE-7B", tokenizer, model, image_processor, context_len)
|
| 272 |
|
| 273 |
# Process image input
|
| 274 |
try:
|