JasperHaozhe commited on
Commit
9febb0e
·
verified ·
1 Parent(s): 4d816d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -417,8 +417,8 @@ def run_flux_generation(instruction_text, source_img, width, height):
417
  def run_vlm_evaluation(messages, loaded_images):
418
  """Run VLM model on GPU to evaluate images and stream output text."""
419
  # Ensure model is on CUDA/device for evaluation
420
- if not (VLM_MAX_MEMORY or VLM_QUANTIZATION_4BIT or VLM_QUANTIZATION_8BIT):
421
- model.to(device_vlm)
422
 
423
  # Generate and stream text
424
  prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
417
  def run_vlm_evaluation(messages, loaded_images):
418
  """Run VLM model on GPU to evaluate images and stream output text."""
419
  # Ensure model is on CUDA/device for evaluation
420
+ # if not (VLM_MAX_MEMORY or VLM_QUANTIZATION_4BIT or VLM_QUANTIZATION_8BIT):
421
+ model.to(device_vlm)
422
 
423
  # Generate and stream text
424
  prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)