Update app.py
Browse files
app.py
CHANGED
|
@@ -109,13 +109,13 @@ def imageMode(image, question):
|
|
| 109 |
print('-------question embedding from phi2 obtained-----------')
|
| 110 |
inputs = torch.concat((imgToTextEmb, Qtoken_embeddings), axis=-2)
|
| 111 |
|
| 112 |
-
prediction =
|
| 113 |
phi2.generate(
|
| 114 |
inputs_embeds=inputs,
|
| 115 |
max_new_tokens=50,
|
| 116 |
-
bos_token_id=
|
| 117 |
-
eos_token_id=
|
| 118 |
-
pad_token_id=
|
| 119 |
)
|
| 120 |
)
|
| 121 |
text_pred = prediction[0].rstrip('<|endoftext|>').rstrip("\n")
|
|
|
|
| 109 |
print('-------question embedding from phi2 obtained-----------')
|
| 110 |
inputs = torch.concat((imgToTextEmb, Qtoken_embeddings), axis=-2)
|
| 111 |
|
| 112 |
+
prediction = tokenizer_text.batch_decode(
|
| 113 |
phi2.generate(
|
| 114 |
inputs_embeds=inputs,
|
| 115 |
max_new_tokens=50,
|
| 116 |
+
bos_token_id=tokenizer_text.bos_token_id,
|
| 117 |
+
eos_token_id=tokenizer_text.eos_token_id,
|
| 118 |
+
pad_token_id=tokenizer_text.pad_token_id
|
| 119 |
)
|
| 120 |
)
|
| 121 |
text_pred = prediction[0].rstrip('<|endoftext|>').rstrip("\n")
|