sam2ai commited on
Commit
49f83cc
·
verified ·
1 Parent(s): 03ddf17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +282 -193
app.py CHANGED
@@ -1,25 +1,7 @@
1
  import gradio as gr
2
  import json
3
- import os
4
- import mimetypes
5
- from google import genai
6
- from google.genai import types
7
- from PIL import Image
8
- import time
9
 
10
- # --- Helper Function to Save Generated Image ---
11
- def save_binary_file(directory, file_name, data):
12
- """Saves binary data to a file, creating the directory if needed."""
13
- if not os.path.exists(directory):
14
- os.makedirs(directory)
15
- file_path = os.path.join(directory, file_name)
16
- with open(file_path, "wb") as f:
17
- f.write(data)
18
- print(f"File saved to: {file_path}")
19
- return file_path
20
-
21
- # --- Main Function to Generate Image ---
22
- def generate_image(
23
  api_key,
24
  reference_image,
25
  scene,
@@ -66,203 +48,310 @@ def generate_image(
66
  variant_name,
67
  variant_angle,
68
  ):
69
- # --- Input Validation ---
70
- if not api_key:
71
- raise gr.Error("API Key is missing. Please enter your Gemini API key.")
72
- if reference_image is None:
73
- raise gr.Error("Reference image is missing. Please upload an image.")
74
-
75
- # --- Build Banned List ---
76
  banned_items = []
77
- if ban_mirror: banned_items.append("mirror")
78
- if ban_phone: banned_items.append("phone")
79
- if ban_selfie: banned_items.append("selfie look")
80
- if ban_grainy: banned_items.append("grainy noise")
81
- if ban_harsh_flash: banned_items.append("harsh LED flash")
82
- if ban_logos: banned_items.append("logos/brand text")
83
- if ban_nsfw: banned_items.append("nsfw")
84
- if ban_cropped_feet: banned_items.append("cropped feet")
 
 
 
 
 
 
 
 
85
 
86
- # --- Construct JSON Payload ---
87
  output_json = {
88
  "scene": scene,
89
- "subject": {"type": subject_type, "age_range": age_range, "hair": hair, "makeup": makeup, "jewellery": jewellery},
90
- "wardrobe": {"top": top, "bottom": bottom, "footwear": footwear, "notes": wardrobe_notes},
91
- "pose": {"angle": pose_angle, "body": body_pose, "hands": hands_pose, "framing": framing},
92
- "camera": {"device": camera_device, "flash": flash, "orientation": orientation, "aspect_ratio": aspect_ratio, "distance": distance, "focus": focus},
93
- "look": {"texture": texture, "sharpness": sharpness, "color": color, "effects": effects},
94
- "background": {"environment": background_environment, "props": background_props},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  "style": {"genre": style_genre, "authenticity": authenticity},
96
- "reference_face": {"use_original_structure": use_original_structure, "description": face_description},
 
 
 
97
  "ban": banned_items,
98
- "output": {"count": int(output_count), "size": output_size, "safety": safety},
 
 
 
 
99
  "variants": [{"name": variant_name, "angle": variant_angle}],
100
  }
101
- final_json_string = json.dumps(output_json, indent=4)
102
-
103
- # --- Call Gemini API ---
104
- try:
105
- # Configure the client
106
- client = genai.Client(api_key=api_key)
107
-
108
- # Prepare the prompt parts (JSON instructions + reference image)
109
- prompt_text_part = types.Part.from_text(text=final_json_string)
110
-
111
- with open(reference_image, 'rb') as f:
112
- image_data = f.read()
113
- image_mime_type = mimetypes.guess_type(reference_image)[0]
114
- image_part = types.Part.from_data(data=image_data, mime_type=image_mime_type)
115
-
116
- # Define the model and generation config
117
- model = "gemini-1.5-flash-latest" # Using a standard available model name
118
- contents = [types.Content(role="user", parts=[prompt_text_part, image_part])]
119
- generate_content_config = types.GenerateContentConfig(
120
- response_modalities=["IMAGE", "TEXT"],
121
- )
122
 
123
- # --- Process Streaming Response ---
124
- output_files = []
125
- output_directory = "generated_images"
126
- timestamp = int(time.time())
127
- file_index = 0
128
-
129
- # Make the streaming API call
130
- response_stream = client.models.generate_content_stream(
131
- model=model,
132
- contents=contents,
133
- config=generate_content_config,
134
- )
135
 
136
- for chunk in response_stream:
137
- if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
138
- part = chunk.candidates[0].content.parts[0]
139
- if part.inline_data and part.inline_data.data:
140
- inline_data = part.inline_data
141
- file_extension = mimetypes.guess_extension(inline_data.mime_type)
142
- file_name = f"output_{timestamp}_{file_index}{file_extension}"
143
-
144
- # Save the file and get its path
145
- saved_file_path = save_binary_file(output_directory, file_name, inline_data.data)
146
- output_files.append(saved_file_path)
147
-
148
- file_index += 1
149
- elif part.text:
150
- print(f"Received text chunk: {part.text}")
151
 
152
 
153
- if not output_files:
154
- return None, final_json_string, "No image was generated. Please check the model's response or your prompt."
155
-
156
- # Return file paths for the Gallery and the JSON for inspection
157
- return output_files, final_json_string, "Image generation complete."
158
-
159
- except Exception as e:
160
- # Handle potential errors gracefully
161
- error_message = f"An error occurred: {str(e)}"
162
- print(error_message)
163
- raise gr.Error(error_message)
164
-
165
- # --- Gradio Interface Definition ---
166
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
167
- gr.Markdown("# Gemini Image Generation Studio")
168
- gr.Markdown("Use the tabs below to define your image, then click 'Generate Image' to call the API.")
169
 
170
- with gr.Row():
171
- with gr.Column(scale=1):
172
- # --- Left Column for Inputs ---
173
- with gr.Tabs():
174
- with gr.TabItem("🔑 API & Image"):
175
- api_key_input = gr.Textbox(label="Gemini API Key", type="password", info="Your API key is required to generate images.")
176
- reference_image_input = gr.Image(label="Reference Image", type="filepath", info="Upload the base image for generation or editing.")
 
177
 
178
- with gr.TabItem("🎨 Scene & Subject"):
179
- scene_input = gr.Textbox(label="Scene", value="cinematic outdoor portrait; professional photography")
180
- subject_type_input = gr.Textbox(label="Subject Type", value="adult woman (idol vibe)")
181
- age_range_input = gr.Textbox(label="Age Range", value="20s")
182
- hair_input = gr.Textbox(label="Hair", value="straight or styled natural open hair with natural shine")
183
- makeup_input = gr.Textbox(label="Makeup", value="glossy lips, soft eyeliner, luminous skin")
184
- jewellery_input = gr.Textbox(label="Jewellery", value="small hoops, thin chain, subtle bracelets")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
- with gr.TabItem("👕 Wardrobe"):
187
- top_input = gr.Textbox(label="Top", value="basic tee or camisole")
188
- bottom_input = gr.Textbox(label="Bottom", value="denim shorts or mini skirt")
189
- footwear_input = gr.Textbox(label="Footwear", value="sneakers or ankle boots")
190
- wardrobe_notes_input = gr.Textbox(label="Wardrobe Notes", value="casual modern look, styled for natural setting")
 
 
 
 
 
 
 
 
 
191
 
192
- with gr.TabItem("🧍 Pose & Framing"):
193
- pose_angle_input = gr.Dropdown(label="Pose Angle", choices=["three-quarter", "full body"], value="three-quarter")
194
- body_pose_input = gr.Textbox(label="Body Pose", value="standing or walking casually, relaxed natural posture")
195
- hands_pose_input = gr.Textbox(label="Hands Pose", value="one resting by side or touching hair, the other relaxed")
196
- framing_input = gr.Dropdown(label="Framing", choices=["head-to-toe", "waist-up", "cinematic composition"], value="waist-up")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
- with gr.TabItem("📷 Camera & Look"):
199
- camera_device_input = gr.Textbox(label="Camera Device", value="professional cinema camera / DSLR with prime lens")
200
- flash_input = gr.Textbox(label="Flash", value="none; natural golden hour light or soft reflectors")
201
- orientation_input = gr.Dropdown(label="Orientation", choices=["vertical", "horizontal"], value="vertical")
202
- aspect_ratio_input = gr.Dropdown(label="Aspect Ratio", choices=["16:9", "3:2", "4:3", "1:1"], value="16:9")
203
- distance_input = gr.Textbox(label="Distance", value="cinematic portrait distance with shallow depth")
204
- focus_input = gr.Textbox(label="Focus", value="sharp on subject; soft bokeh background")
205
- texture_input = gr.Textbox(label="Texture", value="smooth high-resolution detail")
206
- sharpness_input = gr.Textbox(label="Sharpness", value="very high; crisp cinematic clarity")
207
- color_input = gr.Textbox(label="Color", value="warm cinematic grading; golden tones and soft contrast")
208
- effects_input = gr.Textbox(label="Effects", value="subtle film grain; natural light flares, depth of field")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
- with gr.TabItem("🌳 Background & Style"):
211
- background_environment_input = gr.Textbox(label="Background Environment", value="nature setting — forest, park, or meadow with soft light")
212
- background_props_input = gr.Textbox(label="Background Props", value="none; focus on subject against natural backdrop")
213
- style_genre_input = gr.Textbox(label="Style Genre", value="cinematic portrait photography")
214
- authenticity_input = gr.Textbox(label="Authenticity", value="natural, elegant, polished")
 
 
 
 
 
 
 
 
 
 
 
215
 
216
- with gr.TabItem("👤 Face & Bans"):
217
- use_original_structure_input = gr.Checkbox(label="Use Original Face Structure", value=True)
218
- face_description_input = gr.Textbox(label="Face Description", value="maintain the same face shape, features, and proportions as in the provided reference image")
219
- gr.Markdown("#### Banned Items")
220
- with gr.Row():
221
- ban_mirror_input = gr.Checkbox(label="Mirror")
222
- ban_phone_input = gr.Checkbox(label="Phone")
223
- ban_selfie_input = gr.Checkbox(label="Selfie Look")
224
- ban_grainy_input = gr.Checkbox(label="Grainy Noise")
225
- with gr.Row():
226
- ban_harsh_flash_input = gr.Checkbox(label="Harsh Flash")
227
- ban_logos_input = gr.Checkbox(label="Logos")
228
- ban_nsfw_input = gr.Checkbox(label="NSFW")
229
- ban_cropped_feet_input = gr.Checkbox(label="Cropped Feet")
 
 
 
 
 
 
 
 
230
 
231
- with gr.TabItem("⚙️ Output & Variants"):
232
- output_count_input = gr.Slider(label="Output Count", minimum=1, maximum=4, step=1, value=1)
233
- output_size_input = gr.Textbox(label="Output Size", value="1024x1024")
234
- safety_input = gr.Dropdown(label="Safety", choices=["strict", "moderate", "none"], value="strict")
235
- variant_name_input = gr.Textbox(label="Variant Name", value="cinematic_nature_fullbody")
236
- variant_angle_input = gr.Textbox(label="Variant Angle", value="full-body shot in meadow or forest path, subject centered with depth of field")
 
 
 
 
 
 
 
 
 
 
 
237
 
238
- with gr.Column(scale=1):
239
- # --- Right Column for Outputs ---
240
- generate_button = gr.Button("Generate Image", variant="primary")
241
- status_text = gr.Textbox(label="Status", interactive=False)
242
- image_gallery = gr.Gallery(label="Generated Image(s)", show_label=True, elem_id="gallery", columns=[2], rows=[2], object_fit="contain", height="auto")
243
- json_output = gr.JSON(label="Generated JSON Input")
244
 
245
- # --- Button Click Action ---
246
- all_inputs = [
247
- api_key_input, reference_image_input, scene_input, subject_type_input,
248
- age_range_input, hair_input, makeup_input, jewellery_input, top_input,
249
- bottom_input, footwear_input, wardrobe_notes_input, pose_angle_input,
250
- body_pose_input, hands_pose_input, framing_input, camera_device_input,
251
- flash_input, orientation_input, aspect_ratio_input, distance_input,
252
- focus_input, texture_input, sharpness_input, color_input, effects_input,
253
- background_environment_input, background_props_input, style_genre_input,
254
- authenticity_input, use_original_structure_input, face_description_input,
255
- ban_mirror_input, ban_phone_input, ban_selfie_input, ban_grainy_input,
256
- ban_harsh_flash_input, ban_logos_input, ban_nsfw_input,
257
- ban_cropped_feet_input, output_count_input, output_size_input,
258
- safety_input, variant_name_input, variant_angle_input
259
- ]
260
-
261
  generate_button.click(
262
- fn=generate_image,
263
- inputs=all_inputs,
264
- outputs=[image_gallery, json_output, status_text],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  )
266
 
267
  if __name__ == "__main__":
268
- demo.launch(debug=True)
 
1
  import gradio as gr
2
  import json
 
 
 
 
 
 
3
 
4
+ def build_json(
 
 
 
 
 
 
 
 
 
 
 
 
5
  api_key,
6
  reference_image,
7
  scene,
 
48
  variant_name,
49
  variant_angle,
50
  ):
 
 
 
 
 
 
 
51
  banned_items = []
52
+ if ban_mirror:
53
+ banned_items.append("mirror")
54
+ if ban_phone:
55
+ banned_items.append("phone")
56
+ if ban_selfie:
57
+ banned_items.append("selfie look")
58
+ if ban_grainy:
59
+ banned_items.append("grainy noise")
60
+ if ban_harsh_flash:
61
+ banned_items.append("harsh LED flash")
62
+ if ban_logos:
63
+ banned_items.append("logos/brand text")
64
+ if ban_nsfw:
65
+ banned_items.append("nsfw")
66
+ if ban_cropped_feet:
67
+ banned_items.append("cropped feet")
68
 
 
69
  output_json = {
70
  "scene": scene,
71
+ "subject": {
72
+ "type": subject_type,
73
+ "age_range": age_range,
74
+ "hair": hair,
75
+ "makeup": makeup,
76
+ "jewellery": jewellery,
77
+ },
78
+ "wardrobe": {
79
+ "top": top,
80
+ "bottom": bottom,
81
+ "footwear": footwear,
82
+ "notes": wardrobe_notes,
83
+ },
84
+ "pose": {
85
+ "angle": pose_angle,
86
+ "body": body_pose,
87
+ "hands": hands_pose,
88
+ "framing": framing,
89
+ },
90
+ "camera": {
91
+ "device": camera_device,
92
+ "flash": flash,
93
+ "orientation": orientation,
94
+ "aspect_ratio": aspect_ratio,
95
+ "distance": distance,
96
+ "focus": focus,
97
+ },
98
+ "look": {
99
+ "texture": texture,
100
+ "sharpness": sharpness,
101
+ "color": color,
102
+ "effects": effects,
103
+ },
104
+ "background": {
105
+ "environment": background_environment,
106
+ "props": background_props,
107
+ },
108
  "style": {"genre": style_genre, "authenticity": authenticity},
109
+ "reference_face": {
110
+ "use_original_structure": use_original_structure,
111
+ "description": face_description,
112
+ },
113
  "ban": banned_items,
114
+ "output": {
115
+ "count": output_count,
116
+ "size": output_size,
117
+ "safety": safety
118
+ },
119
  "variants": [{"name": variant_name, "angle": variant_angle}],
120
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
+ # The user's API key is available in the 'api_key' variable.
123
+ # You can now use this key and the reference_image path to call the
124
+ # gemini nano banana API with the generated 'output_json'.
 
 
 
 
 
 
 
 
 
125
 
126
+ # For this example, we will just return the generated JSON.
127
+ return json.dumps(output_json, indent=4)
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
 
130
+ with gr.Blocks() as demo:
131
+ gr.Markdown("# Gemini Nano Banana Image Generator Interface")
132
+ gr.Markdown(
133
+ "Fill in the details below to generate the JSON input for the image generation model."
134
+ )
 
 
 
 
 
 
 
 
 
 
 
135
 
136
+ with gr.Tabs():
137
+ with gr.TabItem("Authentication and Image"):
138
+ api_key_input = gr.Textbox(
139
+ label="Gemini Nano Banana API Key", type="password"
140
+ )
141
+ reference_image_input = gr.Image(
142
+ label="Reference Image", type="filepath"
143
+ )
144
 
145
+ with gr.TabItem("Scene and Subject"):
146
+ with gr.Row():
147
+ scene_input = gr.Textbox(
148
+ label="Scene", value="cinematic outdoor portrait; professional photography"
149
+ )
150
+ with gr.Row():
151
+ subject_type_input = gr.Textbox(
152
+ label="Subject Type", value="adult woman (idol vibe)"
153
+ )
154
+ age_range_input = gr.Textbox(label="Age Range", value="20s")
155
+ with gr.Row():
156
+ hair_input = gr.Textbox(
157
+ label="Hair",
158
+ value="straight or styled natural open hair with natural shine",
159
+ )
160
+ makeup_input = gr.Textbox(
161
+ label="Makeup", value="glossy lips, soft eyeliner, luminous skin"
162
+ )
163
+ jewellery_input = gr.Textbox(
164
+ label="Jewellery", value="small hoops, thin chain, subtle bracelets"
165
+ )
166
 
167
+ with gr.TabItem("Wardrobe"):
168
+ with gr.Row():
169
+ top_input = gr.Textbox(label="Top", value="basic tee or camisole")
170
+ bottom_input = gr.Textbox(
171
+ label="Bottom", value="denim shorts or mini skirt"
172
+ )
173
+ footwear_input = gr.Textbox(
174
+ label="Footwear", value="sneakers or ankle boots"
175
+ )
176
+ with gr.Row():
177
+ wardrobe_notes_input = gr.Textbox(
178
+ label="Wardrobe Notes",
179
+ value="casual modern look, styled for natural setting",
180
+ )
181
 
182
+ with gr.TabItem("Pose and Framing"):
183
+ with gr.Row():
184
+ pose_angle_input = gr.Dropdown(
185
+ label="Pose Angle",
186
+ choices=["three-quarter", "full body"],
187
+ value="three-quarter",
188
+ )
189
+ body_pose_input = gr.Textbox(
190
+ label="Body Pose",
191
+ value="standing or walking casually, relaxed natural posture",
192
+ )
193
+ with gr.Row():
194
+ hands_pose_input = gr.Textbox(
195
+ label="Hands Pose",
196
+ value="one resting by side or touching hair, the other relaxed",
197
+ )
198
+ framing_input = gr.Dropdown(
199
+ label="Framing",
200
+ choices=["head-to-toe", "waist-up"],
201
+ value="waist-up",
202
+ )
203
 
204
+ with gr.TabItem("Camera and Look"):
205
+ with gr.Row():
206
+ camera_device_input = gr.Textbox(
207
+ label="Camera Device",
208
+ value="professional cinema camera / DSLR with prime lens",
209
+ )
210
+ flash_input = gr.Textbox(
211
+ label="Flash", value="none; natural golden hour light or soft reflectors"
212
+ )
213
+ with gr.Row():
214
+ orientation_input = gr.Dropdown(
215
+ label="Orientation", choices=["vertical", "horizontal"], value="vertical"
216
+ )
217
+ aspect_ratio_input = gr.Dropdown(
218
+ label="Aspect Ratio", choices=["16:9", "3:2", "4:3", "1:1"], value="16:9"
219
+ )
220
+ with gr.Row():
221
+ distance_input = gr.Textbox(
222
+ label="Distance", value="cinematic portrait distance with shallow depth"
223
+ )
224
+ focus_input = gr.Textbox(
225
+ label="Focus", value="sharp on subject; soft bokeh background"
226
+ )
227
+ with gr.Row():
228
+ texture_input = gr.Textbox(
229
+ label="Texture", value="smooth high-resolution detail"
230
+ )
231
+ sharpness_input = gr.Textbox(
232
+ label="Sharpness", value="very high; crisp cinematic clarity"
233
+ )
234
+ with gr.Row():
235
+ color_input = gr.Textbox(
236
+ label="Color", value="warm cinematic grading; golden tones and soft contrast"
237
+ )
238
+ effects_input = gr.Textbox(
239
+ label="Effects",
240
+ value="subtle film grain; natural light flares, depth of field",
241
+ )
242
 
243
+ with gr.TabItem("Background and Style"):
244
+ with gr.Row():
245
+ background_environment_input = gr.Textbox(
246
+ label="Background Environment",
247
+ value="nature setting — forest, park, or meadow with soft light",
248
+ )
249
+ background_props_input = gr.Textbox(
250
+ label="Background Props", value="none; focus on subject against natural backdrop"
251
+ )
252
+ with gr.Row():
253
+ style_genre_input = gr.Textbox(
254
+ label="Style Genre", value="cinematic portrait photography"
255
+ )
256
+ authenticity_input = gr.Textbox(
257
+ label="Authenticity", value="natural, elegant, polished"
258
+ )
259
 
260
+ with gr.TabItem("Reference Face and Bans"):
261
+ with gr.Row():
262
+ use_original_structure_input = gr.Checkbox(
263
+ label="Use Original Face Structure", value=True
264
+ )
265
+ with gr.Row():
266
+ face_description_input = gr.Textbox(
267
+ label="Face Description",
268
+ value="maintain the same face shape, features, and proportions as in the provided reference image",
269
+ )
270
+ with gr.Row():
271
+ gr.Markdown("#### Banned Items")
272
+ with gr.Row():
273
+ ban_mirror_input = gr.Checkbox(label="Mirror")
274
+ ban_phone_input = gr.Checkbox(label="Phone")
275
+ ban_selfie_input = gr.Checkbox(label="Selfie Look")
276
+ ban_grainy_input = gr.Checkbox(label="Grainy Noise")
277
+ with gr.Row():
278
+ ban_harsh_flash_input = gr.Checkbox(label="Harsh LED Flash")
279
+ ban_logos_input = gr.Checkbox(label="Logos/Brand Text")
280
+ ban_nsfw_input = gr.Checkbox(label="NSFW")
281
+ ban_cropped_feet_input = gr.Checkbox(label="Cropped Feet")
282
 
283
+ with gr.TabItem("Output and Variants"):
284
+ with gr.Row():
285
+ output_count_input = gr.Slider(
286
+ label="Output Count", minimum=1, maximum=10, step=1, value=1
287
+ )
288
+ output_size_input = gr.Textbox(label="Output Size", value="1920x1080")
289
+ safety_input = gr.Dropdown(
290
+ label="Safety", choices=["strict", "moderate", "none"], value="strict"
291
+ )
292
+ with gr.Row():
293
+ variant_name_input = gr.Textbox(
294
+ label="Variant Name", value="cinematic_nature_fullbody"
295
+ )
296
+ variant_angle_input = gr.Textbox(
297
+ label="Variant Angle",
298
+ value="full-body shot in meadow or forest path, subject centered with depth of field",
299
+ )
300
 
301
+ generate_button = gr.Button("Generate JSON")
302
+ json_output = gr.JSON(label="Generated JSON")
 
 
 
 
303
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
  generate_button.click(
305
+ fn=build_json,
306
+ inputs=[
307
+ api_key_input,
308
+ reference_image_input,
309
+ scene_input,
310
+ subject_type_input,
311
+ age_range_input,
312
+ hair_input,
313
+ makeup_input,
314
+ jewellery_input,
315
+ top_input,
316
+ bottom_input,
317
+ footwear_input,
318
+ wardrobe_notes_input,
319
+ pose_angle_input,
320
+ body_pose_input,
321
+ hands_pose_input,
322
+ framing_input,
323
+ camera_device_input,
324
+ flash_input,
325
+ orientation_input,
326
+ aspect_ratio_input,
327
+ distance_input,
328
+ focus_input,
329
+ texture_input,
330
+ sharpness_input,
331
+ color_input,
332
+ effects_input,
333
+ background_environment_input,
334
+ background_props_input,
335
+ style_genre_input,
336
+ authenticity_input,
337
+ use_original_structure_input,
338
+ face_description_input,
339
+ ban_mirror_input,
340
+ ban_phone_input,
341
+ ban_selfie_input,
342
+ ban_grainy_input,
343
+ ban_harsh_flash_input,
344
+ ban_logos_input,
345
+ ban_nsfw_input,
346
+ ban_cropped_feet_input,
347
+ output_count_input,
348
+ output_size_input,
349
+ safety_input,
350
+ variant_name_input,
351
+ variant_angle_input,
352
+ ],
353
+ outputs=json_output,
354
  )
355
 
356
  if __name__ == "__main__":
357
+ demo.launch()