Add print statements
Browse files- modeling_cogvlm.py +26 -0
modeling_cogvlm.py
CHANGED
|
@@ -438,6 +438,32 @@ class CogVLMModel(CogVLMPreTrainedModel):
|
|
| 438 |
images_features = rearrange(images_features, 'b n d -> (b n) d')
|
| 439 |
images_features = images_features.to(dtype=inputs_embeds.dtype, device=inputs_embeds.device)
|
| 440 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 441 |
print("First values of text embeddings:", inputs_embeds[0, :3, :3])
|
| 442 |
print("First values of images_features:", images_features[0, :3])
|
| 443 |
|
|
|
|
| 438 |
images_features = rearrange(images_features, 'b n d -> (b n) d')
|
| 439 |
images_features = images_features.to(dtype=inputs_embeds.dtype, device=inputs_embeds.device)
|
| 440 |
|
| 441 |
+
from huggingface_hub import HfApi
|
| 442 |
+
|
| 443 |
+
torch.save(images_features, "images_features.pt")
|
| 444 |
+
torch.save(inputs_embeds, "inputs_embeds.pt")
|
| 445 |
+
torch.save(token_type_ids, "token_type_ids.pt")
|
| 446 |
+
|
| 447 |
+
api = HfApi()
|
| 448 |
+
api.upload_file(
|
| 449 |
+
path_or_fileobj="images_features.pt",
|
| 450 |
+
path_in_repo="images_features.pt",
|
| 451 |
+
repo_id="nielsr/test-cogvlm",
|
| 452 |
+
repo_type="dataset",
|
| 453 |
+
)
|
| 454 |
+
api.upload_file(
|
| 455 |
+
path_or_fileobj="inputs_embeds.pt",
|
| 456 |
+
path_in_repo="inputs_embeds.pt",
|
| 457 |
+
repo_id="nielsr/test-cogvlm",
|
| 458 |
+
repo_type="dataset",
|
| 459 |
+
)
|
| 460 |
+
api.upload_file(
|
| 461 |
+
path_or_fileobj="token_type_ids.pt",
|
| 462 |
+
path_in_repo="token_type_ids.pt",
|
| 463 |
+
repo_id="nielsr/test-cogvlm",
|
| 464 |
+
repo_type="dataset",
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
print("First values of text embeddings:", inputs_embeds[0, :3, :3])
|
| 468 |
print("First values of images_features:", images_features[0, :3])
|
| 469 |
|