gopalagra commited on
Commit
f5fa8f7
Β·
verified Β·
1 Parent(s): 269fb75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -118,9 +118,9 @@ model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(
118
  # Function
119
  def vqa_answer(image, question):
120
  # image is already a PIL Image (no need to open again)
121
- inputs = vqa_processor(image, question, return_tensors="pt").to(vqa_model.device)
122
- out = vqa_model.generate(**inputs, max_new_tokens=50)
123
- answer = vqa_processor.decode(out[0], skip_special_tokens=True)
124
  return answer
125
 
126
 
 
118
  # Function
119
  def vqa_answer(image, question):
120
  # image is already a PIL Image (no need to open again)
121
+ inputs = processor(image, question, return_tensors="pt").to(model.device)
122
+ out = model.generate(**inputs, max_new_tokens=50)
123
+ answer = processor.decode(out[0], skip_special_tokens=True)
124
  return answer
125
 
126