Upload SigLIP2 NaViT model with Google checkpoint
Browse files- configuration_siglip2_navit.py +0 -2
- model.safetensors +2 -2
- modeling_siglip2_navit.py +1 -1
configuration_siglip2_navit.py
CHANGED
|
@@ -34,7 +34,6 @@ class Siglip2NaViTVisionConfig(PretrainedConfig):
|
|
| 34 |
hidden_act="gelu_pytorch_tanh",
|
| 35 |
layer_norm_eps=1e-6,
|
| 36 |
attention_dropout=0.0,
|
| 37 |
-
out_hidden_size=896,
|
| 38 |
spatial_merge_size=2,
|
| 39 |
**kwargs,
|
| 40 |
):
|
|
@@ -49,5 +48,4 @@ class Siglip2NaViTVisionConfig(PretrainedConfig):
|
|
| 49 |
self.attention_dropout = attention_dropout
|
| 50 |
self.layer_norm_eps = layer_norm_eps
|
| 51 |
self.hidden_act = hidden_act
|
| 52 |
-
self.out_hidden_size = out_hidden_size
|
| 53 |
self.spatial_merge_size = spatial_merge_size
|
|
|
|
| 34 |
hidden_act="gelu_pytorch_tanh",
|
| 35 |
layer_norm_eps=1e-6,
|
| 36 |
attention_dropout=0.0,
|
|
|
|
| 37 |
spatial_merge_size=2,
|
| 38 |
**kwargs,
|
| 39 |
):
|
|
|
|
| 48 |
self.attention_dropout = attention_dropout
|
| 49 |
self.layer_norm_eps = layer_norm_eps
|
| 50 |
self.hidden_act = hidden_act
|
|
|
|
| 51 |
self.spatial_merge_size = spatial_merge_size
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9da4424fe862f8e718637ea8605e5161b4e13ee01f4ddc1d255eebde3b2422a6
|
| 3 |
+
size 194918288
|
modeling_siglip2_navit.py
CHANGED
|
@@ -401,7 +401,7 @@ class Siglip2NaViTVisionMerger(nn.Module):
|
|
| 401 |
)
|
| 402 |
self.linear_fc1 = nn.Linear(self.hidden_size, self.hidden_size)
|
| 403 |
self.act_fn = nn.GELU()
|
| 404 |
-
self.linear_fc2 = nn.Linear(self.hidden_size, config.
|
| 405 |
|
| 406 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 407 |
x = self.norm(
|
|
|
|
| 401 |
)
|
| 402 |
self.linear_fc1 = nn.Linear(self.hidden_size, self.hidden_size)
|
| 403 |
self.act_fn = nn.GELU()
|
| 404 |
+
self.linear_fc2 = nn.Linear(self.hidden_size, config.hidden_size)
|
| 405 |
|
| 406 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 407 |
x = self.norm(
|