Update project_model.py
Browse files- project_model.py +10 -7
project_model.py
CHANGED
|
@@ -195,14 +195,17 @@ def process_inputs(
|
|
| 195 |
# Add user's new question to the history
|
| 196 |
session.add_question(question)
|
| 197 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
# Call to gemma_pipe
|
| 199 |
-
gemma_output = gemma_pipe(
|
| 200 |
-
text=[
|
| 201 |
-
{"role": "system", "content": 'You are a helpful assistant for visually impaired users.'},
|
| 202 |
-
{"role": "user", "content": "Question: " + question + " This is the shared visual context: " + session.visual_context}
|
| 203 |
-
],
|
| 204 |
-
images=[session.current_image]
|
| 205 |
-
)
|
| 206 |
|
| 207 |
# Handle the output from Gemma model safely
|
| 208 |
if isinstance(gemma_output, list) and len(gemma_output) > 0:
|
|
|
|
| 195 |
# Add user's new question to the history
|
| 196 |
session.add_question(question)
|
| 197 |
|
| 198 |
+
# Gemma answer
|
| 199 |
+
messages = [{
|
| 200 |
+
"role": "user",
|
| 201 |
+
"content": [
|
| 202 |
+
{"type": "image", "image": pil_image},
|
| 203 |
+
{"type": "text", "text": vqa_prompt}
|
| 204 |
+
]
|
| 205 |
+
}]
|
| 206 |
+
|
| 207 |
# Call to gemma_pipe
|
| 208 |
+
gemma_output = gemma_pipe(text=messages, max_new_tokens=500)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
|
| 210 |
# Handle the output from Gemma model safely
|
| 211 |
if isinstance(gemma_output, list) and len(gemma_output) > 0:
|