yamildiego commited on
Commit
02da513
·
1 Parent(s): 73164e7

self.ip_ckpt

Browse files
Files changed (2) hide show
  1. handler.py +17 -9
  2. ip_adapter/ip_adapter.py +16 -16
handler.py CHANGED
@@ -9,6 +9,7 @@ from pathlib import Path
9
  from huggingface_hub import hf_hub_download, snapshot_download
10
  from ip_adapter.ip_adapter import IPAdapterXL
11
  from safetensors.torch import load_file
 
12
 
13
  from diffusers import (
14
  ControlNetModel,
@@ -24,8 +25,8 @@ dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32
24
 
25
  # initialization
26
  base_model_path = "stabilityai/stable-diffusion-xl-base-1.0"
27
- image_encoder_path = "sdxl_models/image_encoder"
28
- ip_ckpt = "sdxl_models/ip-adapter_sdxl.bin"
29
  controlnet_path = "diffusers/controlnet-canny-sdxl-1.0"
30
 
31
 
@@ -33,6 +34,13 @@ controlnet_path = "diffusers/controlnet-canny-sdxl-1.0"
33
  class EndpointHandler():
34
  def __init__(self, model_dir):
35
 
 
 
 
 
 
 
 
36
 
37
 
38
  self.controlnet = ControlNetModel.from_pretrained(
@@ -63,8 +71,8 @@ class EndpointHandler():
63
 
64
  self.ip_model = IPAdapterXL(
65
  self.pipe,
66
- image_encoder_path,
67
- ip_ckpt,
68
  device,
69
  target_blocks=["up_blocks.0.attentions.1"],
70
  )
@@ -89,14 +97,14 @@ class EndpointHandler():
89
  if target == "Load original IP-Adapter":
90
  # target_blocks=["blocks"] for original IP-Adapter
91
  ip_model = IPAdapterXL(
92
- self.pipe, image_encoder_path, ip_ckpt, device, target_blocks=["blocks"]
93
  )
94
  elif target == "Load only style blocks":
95
  # target_blocks=["up_blocks.0.attentions.1"] for style blocks only
96
  ip_model = IPAdapterXL(
97
  self.pipe,
98
- image_encoder_path,
99
- ip_ckpt,
100
  device,
101
  target_blocks=["up_blocks.0.attentions.1"],
102
  )
@@ -104,8 +112,8 @@ class EndpointHandler():
104
  # target_blocks = ["up_blocks.0.attentions.1", "down_blocks.2.attentions.1"] # for style+layout blocks
105
  ip_model = IPAdapterXL(
106
  self.pipe,
107
- image_encoder_path,
108
- ip_ckpt,
109
  device,
110
  target_blocks=["up_blocks.0.attentions.1", "down_blocks.2.attentions.1"],
111
  )
 
9
  from huggingface_hub import hf_hub_download, snapshot_download
10
  from ip_adapter.ip_adapter import IPAdapterXL
11
  from safetensors.torch import load_file
12
+ import os
13
 
14
  from diffusers import (
15
  ControlNetModel,
 
25
 
26
  # initialization
27
  base_model_path = "stabilityai/stable-diffusion-xl-base-1.0"
28
+ # image_encoder_path = "sdxl_models/image_encoder"
29
+ # ip_ckpt = "sdxl_models/ip-adapter_sdxl.bin"
30
  controlnet_path = "diffusers/controlnet-canny-sdxl-1.0"
31
 
32
 
 
34
  class EndpointHandler():
35
  def __init__(self, model_dir):
36
 
37
+ repo_id = "h94/IP-Adapter"
38
+
39
+ # Descargar todo el contenido del directorio image_encoder
40
+ local_repo_path = snapshot_download(repo_id=repo_id)
41
+ # image_encoder_local_path = os.path.join(local_repo_path, "image_encoder")
42
+ self.image_encoder_local_path = os.path.join(local_repo_path, "sdxl_models", "image_encoder")
43
+ self.ip_ckpt = os.path.join(local_repo_path, "sdxl_models", "ip-adapter_sdxl.bin")
44
 
45
 
46
  self.controlnet = ControlNetModel.from_pretrained(
 
71
 
72
  self.ip_model = IPAdapterXL(
73
  self.pipe,
74
+ self.image_encoder_local_path,
75
+ self.ip_ckpt,
76
  device,
77
  target_blocks=["up_blocks.0.attentions.1"],
78
  )
 
97
  if target == "Load original IP-Adapter":
98
  # target_blocks=["blocks"] for original IP-Adapter
99
  ip_model = IPAdapterXL(
100
+ self.pipe, self.image_encoder_local_path, self.ip_ckpt, device, target_blocks=["blocks"]
101
  )
102
  elif target == "Load only style blocks":
103
  # target_blocks=["up_blocks.0.attentions.1"] for style blocks only
104
  ip_model = IPAdapterXL(
105
  self.pipe,
106
+ self.image_encoder_local_path,
107
+ self.ip_ckpt,
108
  device,
109
  target_blocks=["up_blocks.0.attentions.1"],
110
  )
 
112
  # target_blocks = ["up_blocks.0.attentions.1", "down_blocks.2.attentions.1"] # for style+layout blocks
113
  ip_model = IPAdapterXL(
114
  self.pipe,
115
+ self.image_encoder_local_path,
116
+ self.ip_ckpt,
117
  device,
118
  target_blocks=["up_blocks.0.attentions.1", "down_blocks.2.attentions.1"],
119
  )
ip_adapter/ip_adapter.py CHANGED
@@ -77,22 +77,22 @@ class IPAdapter:
77
 
78
  # load image encoder
79
 
80
- repo_id = "h94/IP-Adapter"
81
 
82
- # Descargar todo el contenido del directorio image_encoder
83
- local_repo_path = snapshot_download(repo_id=repo_id)
84
- # image_encoder_local_path = os.path.join(local_repo_path, "image_encoder")
85
- image_encoder_local_path = os.path.join(local_repo_path, "sdxl_models", "image_encoder")
86
 
87
- print("Contenido del directorio image_encoder:", os.listdir(image_encoder_local_path))
88
 
89
- if "config.json" in os.listdir(image_encoder_local_path) and "pytorch_model.bin" in os.listdir(image_encoder_local_path):
90
- # Usar el path local para cargar el modelo
91
- self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(
92
- image_encoder_local_path
93
- ).to(self.device, dtype=torch.float16)
94
- else:
95
- raise Exception("Los archivos necesarios para cargar el modelo no están en el directorio.")
96
 
97
 
98
  # Usar el path local descargado para cargar el modelo
@@ -101,9 +101,9 @@ class IPAdapter:
101
  # ).to(self.device, dtype=torch.float16)
102
 
103
  # load image encoder
104
- # self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(self.image_encoder_path).to(
105
- # self.device, dtype=torch.float16
106
- # )
107
 
108
  self.clip_image_processor = CLIPImageProcessor()
109
  # image proj model
 
77
 
78
  # load image encoder
79
 
80
+ # repo_id = "h94/IP-Adapter"
81
 
82
+ # # Descargar todo el contenido del directorio image_encoder
83
+ # local_repo_path = snapshot_download(repo_id=repo_id)
84
+ # # image_encoder_local_path = os.path.join(local_repo_path, "image_encoder")
85
+ # image_encoder_local_path = os.path.join(local_repo_path, "sdxl_models", "image_encoder")
86
 
87
+ # print("Contenido del directorio image_encoder:", os.listdir(image_encoder_local_path))
88
 
89
+ # if "config.json" in os.listdir(image_encoder_local_path) and "pytorch_model.bin" in os.listdir(image_encoder_local_path):
90
+ # # Usar el path local para cargar el modelo
91
+ # self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(
92
+ # image_encoder_local_path
93
+ # ).to(self.device, dtype=torch.float16)
94
+ # else:
95
+ # raise Exception("Los archivos necesarios para cargar el modelo no están en el directorio.")
96
 
97
 
98
  # Usar el path local descargado para cargar el modelo
 
101
  # ).to(self.device, dtype=torch.float16)
102
 
103
  # load image encoder
104
+ self.image_encoder = CLIPVisionModelWithProjection.from_pretrained(self.image_encoder_path).to(
105
+ self.device, dtype=torch.float16
106
+ )
107
 
108
  self.clip_image_processor = CLIPImageProcessor()
109
  # image proj model