Update README.md
Browse files
README.md
CHANGED
|
@@ -116,7 +116,7 @@ from transformers import SiglipModel, SiglipProcessor
|
|
| 116 |
from safetensors.torch import load_file
|
| 117 |
from huggingface_hub import hf_hub_download
|
| 118 |
|
| 119 |
-
class
|
| 120 |
def __init__(self):
|
| 121 |
super().__init__()
|
| 122 |
ckpt = "google/siglip-base-patch16-224"
|
|
@@ -128,7 +128,7 @@ class FaceRecognizer(nn.Module):
|
|
| 128 |
clip_inputs = self.processor(images=images, return_tensors="pt").to(self.clip.device)
|
| 129 |
return self.clip.get_image_features(**clip_inputs)
|
| 130 |
|
| 131 |
-
model =
|
| 132 |
|
| 133 |
weights_path = hf_hub_download(repo_id="AvitoTech/SigLIP-Base-for-animal-identification", filename="model.safetensors")
|
| 134 |
state_dict = load_file(weights_path)
|
|
|
|
| 116 |
from safetensors.torch import load_file
|
| 117 |
from huggingface_hub import hf_hub_download
|
| 118 |
|
| 119 |
+
class Model(nn.Module):
|
| 120 |
def __init__(self):
|
| 121 |
super().__init__()
|
| 122 |
ckpt = "google/siglip-base-patch16-224"
|
|
|
|
| 128 |
clip_inputs = self.processor(images=images, return_tensors="pt").to(self.clip.device)
|
| 129 |
return self.clip.get_image_features(**clip_inputs)
|
| 130 |
|
| 131 |
+
model = Model()
|
| 132 |
|
| 133 |
weights_path = hf_hub_download(repo_id="AvitoTech/SigLIP-Base-for-animal-identification", filename="model.safetensors")
|
| 134 |
state_dict = load_file(weights_path)
|