Alexander Bagus commited on
Commit
fe208e4
·
1 Parent(s): 6a1c863
Files changed (1) hide show
  1. app.py +19 -61
app.py CHANGED
@@ -48,69 +48,31 @@ def prepare(prompt, is_polish_prompt):
48
  def inference(
49
  prompt,
50
  negative_prompt,
51
- input_image,
52
- image_scale=1.0,
53
- control_mode='Canny',
54
- control_context_scale = 0.75,
55
  seed=42,
56
  randomize_seed=True,
57
  guidance_scale=1.5,
58
  num_inference_steps=8,
59
  progress=gr.Progress(track_tqdm=True),
60
  ):
61
- # timestamp = time.time()
62
- # print(f"timestamp: {timestamp}")
63
-
64
- # # process image
65
- # print("DEBUG: process image")
66
- # if input_image is None:
67
- # print("Error: input_image is empty.")
68
- # return None
69
-
70
- # # input_image, width, height = scale_image(input_image, image_scale)
71
- # # control_mode='HED'
72
- # processor_id = 'canny'
73
- # if control_mode == 'HED':
74
- # processor_id = 'softedge_hed'
75
- # if control_mode =='Depth':
76
- # processor_id = 'depth_midas'
77
- # if control_mode =='MLSD':
78
- # processor_id = 'mlsd'
79
- # if control_mode =='Pose':
80
- # processor_id = 'openpose_full'
81
-
82
- # print(f"DEBUG: processor_id={processor_id}")
83
- # processor = Processor(processor_id)
84
-
85
- # # Width must be divisible by 16
86
- # control_image, width, height = rescale_image(input_image, image_scale, 16)
87
- # control_image = control_image.resize((1024, 1024))
88
-
89
- # print("DEBUG: processor running")
90
- # control_image = processor(control_image, to_pil=True)
91
- # control_image = control_image.resize((width, height))
92
-
93
- # print("DEBUG: control_image_torch")
94
- # control_image_torch = get_image_latent(control_image, sample_size=[height, width])[:, :, 0]
95
-
96
- # # generation
97
- # if randomize_seed: seed = random.randint(0, MAX_SEED)
98
- # generator = torch.Generator().manual_seed(seed)
99
-
100
- # image = pipe(
101
- # prompt=prompt,
102
- # negative_prompt = negative_prompt,
103
- # height=height,
104
- # width=width,
105
- # generator=generator,
106
- # guidance_scale=guidance_scale,
107
- # control_image=control_image_torch,
108
- # num_inference_steps=num_inference_steps,
109
- # control_context_scale=control_context_scale,
110
- # ).images[0]
111
-
112
- # return image, seed, control_image
113
- return True
114
 
115
 
116
  def read_file(path: str) -> str:
@@ -229,10 +191,6 @@ with gr.Blocks() as demo:
229
  inputs=[
230
  polished_prompt,
231
  negative_prompt,
232
- input_image,
233
- image_scale,
234
- control_mode,
235
- control_context_scale,
236
  seed,
237
  randomize_seed,
238
  guidance_scale,
 
48
  def inference(
49
  prompt,
50
  negative_prompt,
 
 
 
 
51
  seed=42,
52
  randomize_seed=True,
53
  guidance_scale=1.5,
54
  num_inference_steps=8,
55
  progress=gr.Progress(track_tqdm=True),
56
  ):
57
+ timestamp = time.time()
58
+ print(f"timestamp: {timestamp}")
59
+
60
+
61
+ # generation
62
+ if randomize_seed: seed = random.randint(0, MAX_SEED)
63
+ generator = torch.Generator().manual_seed(seed)
64
+
65
+ image = pipe(
66
+ prompt=prompt,
67
+ negative_prompt = negative_prompt,
68
+ height=1024,
69
+ width=1024,
70
+ generator=generator,
71
+ guidance_scale=guidance_scale,
72
+ num_inference_steps=num_inference_steps
73
+ ).images[0]
74
+
75
+ return image, seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
 
78
  def read_file(path: str) -> str:
 
191
  inputs=[
192
  polished_prompt,
193
  negative_prompt,
 
 
 
 
194
  seed,
195
  randomize_seed,
196
  guidance_scale,