Commit ·
66b3dc1
1
Parent(s): d378855
image_encoder from other repo
Browse files- ip_adapter/ip_adapter.py +17 -3
ip_adapter/ip_adapter.py
CHANGED
|
@@ -7,6 +7,7 @@ from diffusers.pipelines.controlnet import MultiControlNetModel
|
|
| 7 |
from PIL import Image
|
| 8 |
from safetensors import safe_open
|
| 9 |
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
|
|
|
| 10 |
|
| 11 |
from .utils import is_torch2_available, get_generator
|
| 12 |
|
|
@@ -75,9 +76,22 @@ class IPAdapter:
|
|
| 75 |
self.set_ip_adapter()
|
| 76 |
|
| 77 |
# load image encoder
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
self.clip_image_processor = CLIPImageProcessor()
|
| 82 |
# image proj model
|
| 83 |
self.image_proj_model = self.init_proj()
|
|
|
|
| 7 |
from PIL import Image
|
| 8 |
from safetensors import safe_open
|
| 9 |
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
| 10 |
+
from huggingface_hub import snapshot_download
|
| 11 |
|
| 12 |
from .utils import is_torch2_available, get_generator
|
| 13 |
|
|
|
|
| 76 |
self.set_ip_adapter()
|
| 77 |
|
| 78 |
# load image encoder
|
| 79 |
+
|
| 80 |
+
repo_id = "h94/IP-Adapter"
|
| 81 |
+
|
| 82 |
+
# Descargar todo el contenido del directorio image_encoder
|
| 83 |
+
local_path = snapshot_download(repo_id=repo_id, subfolder="image_encoder")
|
| 84 |
+
|
| 85 |
+
# Usar el path local descargado para cargar el modelo
|
| 86 |
+
self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(
|
| 87 |
+
local_path
|
| 88 |
+
).to(self.device, dtype=torch.float16)
|
| 89 |
+
|
| 90 |
+
# load image encoder
|
| 91 |
+
# self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(self.image_encoder_path).to(
|
| 92 |
+
# self.device, dtype=torch.float16
|
| 93 |
+
# )
|
| 94 |
+
|
| 95 |
self.clip_image_processor = CLIPImageProcessor()
|
| 96 |
# image proj model
|
| 97 |
self.image_proj_model = self.init_proj()
|