Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,8 +2,8 @@
|
|
| 2 |
|
| 3 |
# gr.load("models/ManishThota/InstructBlip-VQA").launch()
|
| 4 |
|
| 5 |
-
|
| 6 |
from PIL import Image
|
|
|
|
| 7 |
from transformers import BlipProcessor, BlipForQuestionAnswering
|
| 8 |
|
| 9 |
# Initialize the model and processor
|
|
@@ -15,7 +15,7 @@ def predict_answer(image, question):
|
|
| 15 |
image = image.convert("RGB")
|
| 16 |
|
| 17 |
# Prepare inputs
|
| 18 |
-
encoding = processor(image, question, return_tensors="pt")
|
| 19 |
|
| 20 |
out = model.generate(**encoding)
|
| 21 |
generated_text = processor.decode(out[0], skip_special_tokens=True)
|
|
|
|
| 2 |
|
| 3 |
# gr.load("models/ManishThota/InstructBlip-VQA").launch()
|
| 4 |
|
|
|
|
| 5 |
from PIL import Image
|
| 6 |
+
import torch
|
| 7 |
from transformers import BlipProcessor, BlipForQuestionAnswering
|
| 8 |
|
| 9 |
# Initialize the model and processor
|
|
|
|
| 15 |
image = image.convert("RGB")
|
| 16 |
|
| 17 |
# Prepare inputs
|
| 18 |
+
encoding = processor(image, question, return_tensors="pt")
|
| 19 |
|
| 20 |
out = model.generate(**encoding)
|
| 21 |
generated_text = processor.decode(out[0], skip_special_tokens=True)
|