temp handle it turns request

#27
Files changed (1) hide show
  1. handler.py +32 -3
handler.py CHANGED
@@ -4,6 +4,7 @@ import torch
4
  import numpy as np
5
  import hashlib
6
  import json
 
7
  import requests
8
  from PIL import Image
9
  from io import BytesIO
@@ -303,10 +304,19 @@ def generate_response(message_text, image_input, temperature=0.05, top_p=1.0, ma
303
 
304
  # Process image for model
305
  try:
306
- image_tensor = process_images([image], our_chatbot.image_processor, our_chatbot.model.config)[0]
 
 
 
 
 
 
 
307
  image_tensor = image_tensor.half().to(our_chatbot.model.device)
308
  image_tensor = image_tensor.unsqueeze(0)
 
309
  except Exception as e:
 
310
  return {"error": f"Image processing failed: {str(e)}"}
311
 
312
  # Prepare conversation
@@ -346,8 +356,27 @@ def generate_response(message_text, image_input, temperature=0.05, top_p=1.0, ma
346
  )
347
 
348
  # Decode response
349
- response = our_chatbot.tokenizer.decode(outputs[0][input_ids.shape[1]:], skip_special_tokens=True)
350
- our_chatbot.conversation.messages[-1][-1] = response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
 
352
  # Log conversation
353
  history = [(message_text, response)]
 
4
  import numpy as np
5
  import hashlib
6
  import json
7
+ import base64
8
  import requests
9
  from PIL import Image
10
  from io import BytesIO
 
304
 
305
  # Process image for model
306
  try:
307
+ print(f"[DEBUG] Processing image for model...")
308
+ processed_images = process_images([image], our_chatbot.image_processor, our_chatbot.model.config)
309
+ print(f"[DEBUG] Processed images length: {len(processed_images)}")
310
+
311
+ if len(processed_images) == 0:
312
+ return {"error": "Image processing returned empty list"}
313
+
314
+ image_tensor = processed_images[0]
315
  image_tensor = image_tensor.half().to(our_chatbot.model.device)
316
  image_tensor = image_tensor.unsqueeze(0)
317
+ print(f"[DEBUG] Image tensor shape: {image_tensor.shape}")
318
  except Exception as e:
319
+ print(f"[DEBUG] Image processing error: {str(e)}")
320
  return {"error": f"Image processing failed: {str(e)}"}
321
 
322
  # Prepare conversation
 
356
  )
357
 
358
  # Decode response
359
+ try:
360
+ print(f"[DEBUG] Outputs shape: {outputs.shape if hasattr(outputs, 'shape') else 'No shape attr'}")
361
+ print(f"[DEBUG] Outputs length: {len(outputs) if hasattr(outputs, '__len__') else 'No length'}")
362
+ print(f"[DEBUG] Input IDs shape: {input_ids.shape}")
363
+
364
+ if len(outputs) == 0:
365
+ return {"error": "Model generated empty output"}
366
+
367
+ response = our_chatbot.tokenizer.decode(outputs[0][input_ids.shape[1]:], skip_special_tokens=True)
368
+
369
+ print(f"[DEBUG] Conversation messages length: {len(our_chatbot.conversation.messages)}")
370
+ if len(our_chatbot.conversation.messages) > 0:
371
+ print(f"[DEBUG] Last message length: {len(our_chatbot.conversation.messages[-1])}")
372
+ our_chatbot.conversation.messages[-1][-1] = response
373
+ else:
374
+ print("[DEBUG] No conversation messages found")
375
+
376
+ print(f"[DEBUG] Generated response length: {len(response)}")
377
+ except Exception as e:
378
+ print(f"[DEBUG] Response decoding error: {str(e)}")
379
+ return {"error": f"Response decoding failed: {str(e)}"}
380
 
381
  # Log conversation
382
  history = [(message_text, response)]