Spaces:
Runtime error
Runtime error
Commit
·
64dbe9b
1
Parent(s):
84e1996
Update app.py
Browse files
app.py
CHANGED
|
@@ -54,14 +54,14 @@ with open("bad_words.txt", "r") as f:
|
|
| 54 |
model, image_processor, tokenizer = create_model_and_transforms(
|
| 55 |
clip_vision_encoder_pretrained="openai",
|
| 56 |
clip_vision_encoder_path="ViT-L-14",
|
| 57 |
-
lang_encoder_path="anas-awadalla/mpt-
|
| 58 |
-
tokenizer_path="anas-awadalla/mpt-
|
| 59 |
cross_attn_every_n_layers=1,
|
| 60 |
)
|
| 61 |
|
| 62 |
-
checkpoint_path = hf_hub_download("openflamingo/OpenFlamingo-
|
| 63 |
model.load_state_dict(torch.load(checkpoint_path), strict=False)
|
| 64 |
-
|
| 65 |
model.eval()
|
| 66 |
|
| 67 |
def generate(
|
|
@@ -153,9 +153,9 @@ def generate(
|
|
| 153 |
|
| 154 |
# with torch.cuda.amp.autocast(dtype=torch.bfloat16):
|
| 155 |
output = model.generate(
|
| 156 |
-
vision_x=vision_x,
|
| 157 |
-
lang_x=input_ids,
|
| 158 |
-
attention_mask=attention_mask,
|
| 159 |
max_new_tokens=30,
|
| 160 |
num_beams=3,
|
| 161 |
# do_sample=True,
|
|
|
|
| 54 |
model, image_processor, tokenizer = create_model_and_transforms(
|
| 55 |
clip_vision_encoder_pretrained="openai",
|
| 56 |
clip_vision_encoder_path="ViT-L-14",
|
| 57 |
+
lang_encoder_path="anas-awadalla/mpt-7b",
|
| 58 |
+
tokenizer_path="anas-awadalla/mpt-7b",
|
| 59 |
cross_attn_every_n_layers=1,
|
| 60 |
)
|
| 61 |
|
| 62 |
+
checkpoint_path = hf_hub_download("openflamingo/OpenFlamingo-9B-vitl-mpt7b", "checkpoint.pt")
|
| 63 |
model.load_state_dict(torch.load(checkpoint_path), strict=False)
|
| 64 |
+
model = model.to("cuda")
|
| 65 |
model.eval()
|
| 66 |
|
| 67 |
def generate(
|
|
|
|
| 153 |
|
| 154 |
# with torch.cuda.amp.autocast(dtype=torch.bfloat16):
|
| 155 |
output = model.generate(
|
| 156 |
+
vision_x=vision_x.to("cuda"),
|
| 157 |
+
lang_x=input_ids.to("cuda"),
|
| 158 |
+
attention_mask=attention_mask.to("cuda"),
|
| 159 |
max_new_tokens=30,
|
| 160 |
num_beams=3,
|
| 161 |
# do_sample=True,
|