Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -120,7 +120,6 @@ def generate_caption_llava(image_path, caption_bool):
|
|
| 120 |
|
| 121 |
inputs = processor(prompt, Image.open(image_path), return_tensors="pt").to(device)
|
| 122 |
|
| 123 |
-
# autoregressively complete prompt
|
| 124 |
output = model.generate(**inputs, max_new_tokens=100)
|
| 125 |
|
| 126 |
return processor.decode(output[0], skip_special_tokens=True)["generated_text"][len(text_prompt):]
|
|
@@ -128,7 +127,7 @@ def generate_caption_llava(image_path, caption_bool):
|
|
| 128 |
@spaces.GPU
|
| 129 |
def generate_answer_llava(image_path, question):
|
| 130 |
text_prompt =f"[INST] \n{question} [/INST]"
|
| 131 |
-
inputs = processor(
|
| 132 |
output = model.generate(**inputs, max_new_tokens=100)
|
| 133 |
return processor.decode(output[0], skip_special_tokens=True)["generated_text"][len(text_prompt):]
|
| 134 |
|
|
|
|
| 120 |
|
| 121 |
inputs = processor(prompt, Image.open(image_path), return_tensors="pt").to(device)
|
| 122 |
|
|
|
|
| 123 |
output = model.generate(**inputs, max_new_tokens=100)
|
| 124 |
|
| 125 |
return processor.decode(output[0], skip_special_tokens=True)["generated_text"][len(text_prompt):]
|
|
|
|
| 127 |
@spaces.GPU
|
| 128 |
def generate_answer_llava(image_path, question):
|
| 129 |
text_prompt =f"[INST] \n{question} [/INST]"
|
| 130 |
+
inputs = processor(text_prompt, Image.open(image_path), return_tensors="pt").to(device)
|
| 131 |
output = model.generate(**inputs, max_new_tokens=100)
|
| 132 |
return processor.decode(output[0], skip_special_tokens=True)["generated_text"][len(text_prompt):]
|
| 133 |
|