Inmental commited on
Commit
461c048
·
verified ·
1 Parent(s): 917f728

Upload folder using huggingface_hub

Browse files
gradio_sketch2image.py CHANGED
@@ -64,29 +64,54 @@ def pil_image_to_data_uri(img, format="PNG"):
64
  return f"data:image/{format.lower()};base64,{img_str}"
65
 
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  def run(image, prompt, prompt_template, style_name, seed, val_r):
68
  print(f"prompt: {prompt}")
69
  print("sketch updated")
 
70
  if image is None:
71
- ones = Image.new("L", (512, 512), 255)
72
  temp_uri = pil_image_to_data_uri(ones)
73
  return ones, gr.update(link=temp_uri), gr.update(link=temp_uri)
 
74
  prompt = prompt_template.replace("{prompt}", prompt)
75
  image = image.convert("RGB")
76
- image_t = F.to_tensor(image) > 0.5
77
- image_t = F.to_tensor(image) * 2 - 1 # Normalize to [-1, 1]
 
 
78
  print(f"r_val={val_r}, seed={seed}")
79
 
80
  with torch.no_grad():
81
  c_t = image_t.unsqueeze(0).cuda().float()
82
  torch.manual_seed(seed)
83
  B, C, H, W = c_t.shape
84
- noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device)
 
85
  print("Calling Pix2Pix model... ct: {}, prompt: {}, deterministic: False, r: {}, noise_map: {}".format(c_t.shape, prompt, val_r, noise.shape))
86
  output_image = model(c_t, prompt, deterministic=False, r=val_r, noise_map=noise)
87
  output_pil = F.to_pil_image(output_image[0].cpu() * 0.5 + 0.5)
88
  input_sketch_uri = pil_image_to_data_uri(Image.fromarray(255 - np.array(image)))
89
  output_image_uri = pil_image_to_data_uri(output_pil)
 
 
90
  return (
91
  output_pil,
92
  gr.update(link=input_sketch_uri),
 
64
  return f"data:image/{format.lower()};base64,{img_str}"
65
 
66
 
67
+ def normalize_image(image, range_from=(-1, 1)):
68
+ """
69
+ Normalize the input image to a specified range.
70
+
71
+ :param image: The PIL Image to be normalized.
72
+ :param range_from: The target range for normalization, typically (-1, 1) or (0, 1).
73
+ :return: Normalized image tensor.
74
+ """
75
+ # Convert the image to a tensor
76
+ image_t = F.to_tensor(image)
77
+
78
+ if range_from == (-1, 1):
79
+ # Normalize from [0, 1] to [-1, 1]
80
+ image_t = image_t * 2 - 1
81
+
82
+ return image_t
83
+
84
+
85
  def run(image, prompt, prompt_template, style_name, seed, val_r):
86
  print(f"prompt: {prompt}")
87
  print("sketch updated")
88
+ print(image)
89
  if image is None:
90
+ ones = Image.new("L", (1024, 1024), 255)
91
  temp_uri = pil_image_to_data_uri(ones)
92
  return ones, gr.update(link=temp_uri), gr.update(link=temp_uri)
93
+ print(f"Input Image Size: {image.size}")
94
  prompt = prompt_template.replace("{prompt}", prompt)
95
  image = image.convert("RGB")
96
+
97
+ # Normalize image to [-1, 1]
98
+ image_t = normalize_image(image, range_from=(-1, 1))
99
+
100
  print(f"r_val={val_r}, seed={seed}")
101
 
102
  with torch.no_grad():
103
  c_t = image_t.unsqueeze(0).cuda().float()
104
  torch.manual_seed(seed)
105
  B, C, H, W = c_t.shape
106
+ #noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device)
107
+ noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device) * 0.8
108
  print("Calling Pix2Pix model... ct: {}, prompt: {}, deterministic: False, r: {}, noise_map: {}".format(c_t.shape, prompt, val_r, noise.shape))
109
  output_image = model(c_t, prompt, deterministic=False, r=val_r, noise_map=noise)
110
  output_pil = F.to_pil_image(output_image[0].cpu() * 0.5 + 0.5)
111
  input_sketch_uri = pil_image_to_data_uri(Image.fromarray(255 - np.array(image)))
112
  output_image_uri = pil_image_to_data_uri(output_pil)
113
+
114
+ print(f"Output Image Size: {output_pil.size}")
115
  return (
116
  output_pil,
117
  gr.update(link=input_sketch_uri),
gradio_sketch2imagehd.py CHANGED
@@ -95,6 +95,62 @@ def extract_main_words(item: str) -> str:
95
  nouns = [word.capitalize() for word, tag in tagged if tag in ('NN', 'NNP', 'NNPS', 'NNS')]
96
  return ' '.join(nouns)
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  def run(image, prompt, prompt_template, style_name, seed, val_r):
99
  """Runs the main image processing pipeline."""
100
  logging.debug("Running model inference...")
 
95
  nouns = [word.capitalize() for word, tag in tagged if tag in ('NN', 'NNP', 'NNPS', 'NNS')]
96
  return ' '.join(nouns)
97
 
98
+ def normalize_image(image, range_from=(-1, 1)):
99
+ """
100
+ Normalize the input image to a specified range.
101
+
102
+ :param image: The PIL Image to be normalized.
103
+ :param range_from: The target range for normalization, typically (-1, 1) or (0, 1).
104
+ :return: Normalized image tensor.
105
+ """
106
+ # Convert the image to a tensor
107
+ image_t = F.to_tensor(image)
108
+
109
+ if range_from == (-1, 1):
110
+ # Normalize from [0, 1] to [-1, 1]
111
+ image_t = image_t * 2 - 1
112
+
113
+ return image_t
114
+
115
+
116
+ def run(image, prompt, prompt_template, style_name, seed, val_r):
117
+ print(f"prompt: {prompt}")
118
+ print("sketch updated")
119
+ print(image)
120
+ if image is None:
121
+ ones = Image.new("L", (1024, 1024), 255)
122
+ temp_uri = pil_image_to_data_uri(ones)
123
+ return ones, gr.update(link=temp_uri), gr.update(link=temp_uri)
124
+ print(f"Input Image Size: {image.size}")
125
+ prompt = prompt_template.replace("{prompt}", prompt)
126
+ image = image.convert("RGB")
127
+ image_t = normalize_image(image, range_from=(-1, 1))
128
+
129
+ print(f"r_val={val_r}, seed={seed}")
130
+
131
+ with torch.no_grad():
132
+ c_t = image_t.unsqueeze(0).cuda().float()
133
+ torch.manual_seed(seed)
134
+ #B, C, H, W = c_t.shape
135
+ #noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device)
136
+ #noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device) * 0.8
137
+
138
+ B, C, H, W = c_t.shape
139
+ #noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device)
140
+ noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device) * 0.4
141
+ print("Calling Pix2Pix model... ct: {}, prompt: {}, deterministic: False, r: {}, noise_map: {}".format(c_t.shape, prompt, val_r, noise.shape))
142
+ output_image = pix2pix_model(c_t, prompt, deterministic=False, r=val_r, noise_map=noise)
143
+ output_pil = F.to_pil_image(output_image[0].cpu() * 0.5 + 0.5)
144
+ input_sketch_uri = pil_image_to_data_uri(Image.fromarray(255 - np.array(image)))
145
+ output_image_uri = pil_image_to_data_uri(output_pil)
146
+
147
+ print(f"Output Image Size: {output_pil.size}")
148
+ return (
149
+ output_pil,
150
+ gr.update(link=input_sketch_uri),
151
+ gr.update(link=output_image_uri),
152
+ )
153
+
154
  def run(image, prompt, prompt_template, style_name, seed, val_r):
155
  """Runs the main image processing pipeline."""
156
  logging.debug("Running model inference...")
output.png CHANGED
run - Copy (2).cmd ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ REM Activate the Conda environment
3
+ call conda activate img2img-turbo
4
+
5
+ REM Navigate to the project directory
6
+ cd /d D:\I+D\ia\img2img\img2img-turbo
7
+
8
+ REM Run the Python script
9
+ python gradio_sketch2image.py
10
+
11
+ REM Pause the script to keep the command prompt open in case of errors
12
+ pause