linoyts HF Staff commited on
Commit
8fd3953
·
verified ·
1 Parent(s): dd12221

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +183 -85
app.py CHANGED
@@ -14,7 +14,7 @@ import torch
14
  from pathlib import Path
15
  from typing import Optional
16
  from huggingface_hub import hf_hub_download
17
- from ltx_pipelines.ti2vid_two_stages import TI2VidTwoStagesPipeline
18
  from ltx_core.tiling import TilingConfig
19
  from ltx_pipelines.constants import (
20
  DEFAULT_SEED,
@@ -31,8 +31,8 @@ MAX_SEED = np.iinfo(np.int32).max
31
  # Custom negative prompt
32
  DEFAULT_NEGATIVE_PROMPT = "shaky, glitchy, low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly, transition, static"
33
 
34
- # Default prompt from docstring example
35
- DEFAULT_PROMPT = "An astronaut hatches from a fragile egg on the surface of the Moon, the shell cracking and peeling apart in gentle low-gravity motion. Fine lunar dust lifts and drifts outward with each movement, floating in slow arcs before settling back onto the ground. The astronaut pushes free in a deliberate, weightless motion, small fragments of the egg tumbling and spinning through the air. In the background, the deep darkness of space subtly shifts as stars glide with the camera's movement, emphasizing vast depth and scale. The camera performs a smooth, cinematic slow push-in, with natural parallax between the foreground dust, the astronaut, and the distant starfield. Ultra-realistic detail, physically accurate low-gravity motion, cinematic lighting, and a breath-taking, movie-like shot."
36
 
37
  # HuggingFace Hub defaults
38
  DEFAULT_REPO_ID = "Lightricks/LTX-2"
@@ -42,6 +42,8 @@ DEFAULT_SPATIAL_UPSAMPLER_FILENAME = "ltx-2-spatial-upscaler-x2-1.0.safetensors"
42
 
43
  # Text encoder space URL
44
  TEXT_ENCODER_SPACE = "linoyts/gemma-text-encoder"
 
 
45
 
46
  def get_hub_or_local_checkpoint(repo_id: Optional[str] = None, filename: Optional[str] = None):
47
  """Download from HuggingFace Hub or use local checkpoint."""
@@ -62,7 +64,7 @@ def get_hub_or_local_checkpoint(repo_id: Optional[str] = None, filename: Optiona
62
 
63
  # Initialize pipeline at startup
64
  print("=" * 80)
65
- print("Loading LTX-2 2-stage pipeline...")
66
  print("=" * 80)
67
 
68
  checkpoint_path = get_hub_or_local_checkpoint(DEFAULT_REPO_ID, DEFAULT_CHECKPOINT_FILENAME)
@@ -77,7 +79,7 @@ print(f" text_encoder_space={TEXT_ENCODER_SPACE}")
77
 
78
  # Initialize pipeline WITHOUT text encoder (gemma_root=None)
79
  # Text encoding will be done by external space
80
- pipeline = TI2VidTwoStagesPipeline(
81
  checkpoint_path=checkpoint_path,
82
  distilled_lora_path=distilled_lora_path,
83
  distilled_lora_strength=DEFAULT_LORA_STRENGTH,
@@ -85,7 +87,6 @@ pipeline = TI2VidTwoStagesPipeline(
85
  gemma_root=None,
86
  loras=[],
87
  fp8transformer=False,
88
- local_files_only=False
89
  )
90
 
91
  # Initialize text encoder client
@@ -97,22 +98,72 @@ except Exception as e:
97
  print(f"⚠ Warning: Could not connect to text encoder space: {e}")
98
  text_encoder_client = None
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  @spaces.GPU(duration=300)
101
  def generate_video(
102
- input_image,
 
 
103
  prompt: str,
104
  duration: float,
105
  enhance_prompt: bool = True,
106
  negative_prompt: str = DEFAULT_NEGATIVE_PROMPT,
107
  seed: int = 42,
108
  randomize_seed: bool = True,
109
- num_inference_steps: int = 25,
110
  cfg_guidance_scale: float = DEFAULT_CFG_GUIDANCE_SCALE,
111
  height: int = DEFAULT_HEIGHT,
112
  width: int = DEFAULT_WIDTH,
113
  progress=gr.Progress(track_tqdm=True)
114
  ):
115
- """Generate a video based on the given parameters."""
116
  try:
117
  # Randomize seed if checkbox is enabled
118
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
@@ -124,36 +175,52 @@ def generate_video(
124
  # Create output directory if it doesn't exist
125
  output_dir = Path("outputs")
126
  output_dir.mkdir(exist_ok=True)
127
- output_path = output_dir / f"video_{current_seed}.mp4"
128
 
129
- # Handle image input
130
  images = []
131
- temp_image_path = None # Initialize to None
132
- if input_image is not None:
133
- # Save uploaded image temporarily
134
- temp_image_path = output_dir / f"temp_input_{current_seed}.jpg"
135
- if hasattr(input_image, 'save'):
136
- input_image.save(temp_image_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  else:
138
- # If it's a file path already
139
- temp_image_path = Path(input_image)
140
- # Format: (image_path, frame_idx, strength)
141
- images = [(str(temp_image_path), 0, 1.0)]
142
  # Get embeddings from text encoder space
143
  print(f"Encoding prompt: {prompt}")
144
-
145
  if text_encoder_client is None:
146
  raise RuntimeError(
147
  f"Text encoder client not connected. Please ensure the text encoder space "
148
  f"({TEXT_ENCODER_SPACE}) is running and accessible."
149
  )
150
-
151
  try:
152
- # Prepare image for upload if it exists
153
- image_input = None
154
- if temp_image_path is not None:
155
- image_input = handle_file(str(temp_image_path))
156
-
157
  result = text_encoder_client.predict(
158
  prompt=prompt,
159
  enhance_prompt=enhance_prompt,
@@ -170,6 +237,9 @@ def generate_video(
170
  video_context_positive = embeddings['video_context']
171
  audio_context_positive = embeddings['audio_context']
172
 
 
 
 
173
  # Load negative contexts if available
174
  video_context_negative = embeddings.get('video_context_negative', None)
175
  audio_context_negative = embeddings.get('audio_context_negative', None)
@@ -203,50 +273,74 @@ def generate_video(
203
  audio_context_negative=audio_context_negative,
204
  )
205
 
206
- return str(output_path), current_seed
207
 
208
  except Exception as e:
209
  import traceback
210
  error_msg = f"Error: {str(e)}\n{traceback.format_exc()}"
211
  print(error_msg)
212
- return None
213
 
214
 
215
  # Create Gradio interface
216
- with gr.Blocks(title="LTX-2 Video 🎥🔈") as demo:
217
- gr.Markdown("# LTX-2 🎥🔈: The First Open Source Audio-Video Model")
218
- gr.Markdown("State-of-the-art video & audio generation with Lightricks LTX-2 TI2V. Read more: [[model]](https://huggingface.co/Lightricks/LTX-2), [[code]](https://github.com/Lightricks/LTX-2)")
219
- with gr.Row():
220
- with gr.Column():
221
- input_image = gr.Image(
222
- label="Input Image (Optional)",
223
- type="pil",
224
- )
225
-
226
- prompt = gr.Textbox(
227
- label="Prompt",
228
- info="for best results - make it as elaborate as possible",
229
- value="Make this image come alive with cinematic motion, smooth animation",
230
- lines=3,
231
- placeholder="Describe the motion and animation you want..."
232
- )
233
 
234
- with gr.Row():
235
- duration = gr.Slider(
236
- label="Duration (seconds)",
237
- minimum=1.0,
238
- maximum=10.0,
239
- value=3.0,
240
- step=0.1
241
- )
242
- enhance_prompt = gr.Checkbox(
243
- label="Enhance Prompt",
244
- value=True
245
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
 
247
  generate_btn = gr.Button("Generate Video", variant="primary")
248
 
249
  with gr.Accordion("Advanced Settings", open=False):
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  negative_prompt = gr.Textbox(
251
  label="Negative Prompt",
252
  value=DEFAULT_NEGATIVE_PROMPT,
@@ -270,7 +364,7 @@ with gr.Blocks(title="LTX-2 Video 🎥🔈") as demo:
270
  label="Inference Steps",
271
  minimum=1,
272
  maximum=100,
273
- value=25,
274
  step=1
275
  )
276
 
@@ -296,11 +390,26 @@ with gr.Blocks(title="LTX-2 Video 🎥🔈") as demo:
296
 
297
  with gr.Column():
298
  output_video = gr.Video(label="Generated Video", autoplay=True)
 
 
 
 
 
299
 
 
 
 
 
 
 
 
 
300
  generate_btn.click(
301
  fn=generate_video,
302
  inputs=[
303
- input_image,
 
 
304
  prompt,
305
  duration,
306
  enhance_prompt,
@@ -312,33 +421,22 @@ with gr.Blocks(title="LTX-2 Video 🎥🔈") as demo:
312
  height,
313
  width,
314
  ],
315
- outputs=[output_video,seed]
316
- )
317
-
318
- # Add example
319
- gr.Examples(
320
- examples=[
321
- [
322
- "astronaut.jpg",
323
- "An astronaut hatches from a fragile egg on the surface of the Moon, the shell cracking and peeling apart in gentle low-gravity motion. Fine lunar dust lifts and drifts outward with each movement, floating in slow arcs before settling back onto the ground. The astronaut pushes free in a deliberate, weightless motion, small fragments of the egg tumbling and spinning through the air. In the background, the deep darkness of space subtly shifts as stars glide with the camera's movement, emphasizing vast depth and scale. The camera performs a smooth, cinematic slow push-in, with natural parallax between the foreground dust, the astronaut, and the distant starfield. Ultra-realistic detail, physically accurate low-gravity motion, cinematic lighting, and a breath-taking, movie-like shot.",
324
- 5.0,
325
- ],
326
- [
327
- "kill_bill.jpeg",
328
- "A low, subsonic drone pulses as Uma Thurman's character, Beatrix Kiddo, holds her razor-sharp katana blade steady in the cinematic lighting. A faint electrical hum fills the silence. Suddenly, accompanied by a deep metallic groan, the polished steel begins to soften and distort, like heated metal starting to lose its structural integrity. Discordant strings swell as the blade's perfect edge slowly warps and droops, molten steel beginning to flow downward in silvery rivulets while maintaining its metallic sheen—each drip producing a wet, viscous stretching sound. The transformation starts subtly at first—a slight bend in the blade—then accelerates as the metal becomes increasingly fluid, the groaning intensifying. The camera holds steady on her face as her piercing eyes gradually narrow, not with lethal focus, but with confusion and growing alarm as she watches her weapon dissolve before her eyes. Her heartbeat rises in the mix—thump... thump-thump—as her breathing quickens slightly while she witnesses this impossible transformation. Sharp violin stabs punctuate each breath. The melting intensifies, the katana's perfect form becoming increasingly abstract, dripping like liquid mercury from her grip. Molten droplets fall to the ground with soft, bell-like pings. Unintelligible whispers fade in and out as her expression shifts from calm readiness to bewilderment and concern, her heartbeat now pounding like a war drum, as her legendary instrument of vengeance literally liquefies in her hands, leaving her defenseless and disoriented. All sound cuts to silence—then a single devastating bass drop as the final droplet falls, leaving only her unsteady breathing in the dark.",
329
- 3.0,
330
- ]
331
- ],
332
- fn=generate_video,
333
- inputs=[input_image, prompt, duration],
334
- outputs = [output_video,seed],
335
- label="Example",
336
- cache_examples=True,
337
- cache_mode="lazy",
338
  )
339
 
340
  css = '''
341
- .gradio-container .contain{max-width: 1200px !important; margin: 0 auto !important}
 
 
 
 
 
 
 
 
 
 
342
  '''
 
343
  if __name__ == "__main__":
344
- demo.launch(theme=gr.themes.Citrus())
 
14
  from pathlib import Path
15
  from typing import Optional
16
  from huggingface_hub import hf_hub_download
17
+ from ltx_pipelines.keyframe_interpolation import KeyframeInterpolationPipeline
18
  from ltx_core.tiling import TilingConfig
19
  from ltx_pipelines.constants import (
20
  DEFAULT_SEED,
 
31
  # Custom negative prompt
32
  DEFAULT_NEGATIVE_PROMPT = "shaky, glitchy, low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly, transition, static"
33
 
34
+ # Default prompt for keyframe interpolation
35
+ DEFAULT_PROMPT = "Smooth cinematic transition between keyframes with natural motion and consistent lighting"
36
 
37
  # HuggingFace Hub defaults
38
  DEFAULT_REPO_ID = "Lightricks/LTX-2"
 
42
 
43
  # Text encoder space URL
44
  TEXT_ENCODER_SPACE = "linoyts/gemma-text-encoder"
45
+ # Image edit space URL
46
+ IMAGE_EDIT_SPACE = "linoyts/Qwen-Image-Edit-2509-Fast"
47
 
48
  def get_hub_or_local_checkpoint(repo_id: Optional[str] = None, filename: Optional[str] = None):
49
  """Download from HuggingFace Hub or use local checkpoint."""
 
64
 
65
  # Initialize pipeline at startup
66
  print("=" * 80)
67
+ print("Loading LTX-2 Keyframe Interpolation pipeline...")
68
  print("=" * 80)
69
 
70
  checkpoint_path = get_hub_or_local_checkpoint(DEFAULT_REPO_ID, DEFAULT_CHECKPOINT_FILENAME)
 
79
 
80
  # Initialize pipeline WITHOUT text encoder (gemma_root=None)
81
  # Text encoding will be done by external space
82
+ pipeline = KeyframeInterpolationPipeline(
83
  checkpoint_path=checkpoint_path,
84
  distilled_lora_path=distilled_lora_path,
85
  distilled_lora_strength=DEFAULT_LORA_STRENGTH,
 
87
  gemma_root=None,
88
  loras=[],
89
  fp8transformer=False,
 
90
  )
91
 
92
  # Initialize text encoder client
 
98
  print(f"⚠ Warning: Could not connect to text encoder space: {e}")
99
  text_encoder_client = None
100
 
101
+ # Initialize image edit client
102
+ print(f"Connecting to image edit space: {IMAGE_EDIT_SPACE}")
103
+ try:
104
+ image_edit_client = Client(IMAGE_EDIT_SPACE)
105
+ print("✓ Image edit client connected!")
106
+ except Exception as e:
107
+ print(f"⚠ Warning: Could not connect to image edit space: {e}")
108
+ image_edit_client = None
109
+
110
+ def generate_end_frame(start_frame, edit_prompt: str):
111
+ """Generate an end frame from the start frame using Qwen Image Edit."""
112
+ try:
113
+ if start_frame is None:
114
+ raise gr.Error("Please provide a start frame first")
115
+
116
+ if image_edit_client is None:
117
+ raise gr.Error(
118
+ f"Image edit client not connected. Please ensure the image edit space "
119
+ f"({IMAGE_EDIT_SPACE}) is running and accessible."
120
+ )
121
+
122
+ # Save start frame temporarily if needed
123
+ output_dir = Path("outputs")
124
+ output_dir.mkdir(exist_ok=True)
125
+ temp_path = output_dir / f"temp_start_for_edit.jpg"
126
+
127
+ if hasattr(start_frame, 'save'):
128
+ start_frame.save(temp_path)
129
+ image_input = handle_file(str(temp_path))
130
+ else:
131
+ image_input = handle_file(str(start_frame))
132
+
133
+ # Call Qwen Image Edit
134
+ result = image_edit_client.predict(
135
+ images=image_input,
136
+ prompt=edit_prompt,
137
+ api_name="/infer"
138
+ )
139
+
140
+ # Result should be the path to the edited image
141
+ return result
142
+
143
+ except Exception as e:
144
+ import traceback
145
+ error_msg = f"Error generating end frame: {str(e)}\n{traceback.format_exc()}"
146
+ print(error_msg)
147
+ raise gr.Error(error_msg)
148
+
149
  @spaces.GPU(duration=300)
150
  def generate_video(
151
+ start_frame,
152
+ end_frame_upload,
153
+ end_frame_generated,
154
  prompt: str,
155
  duration: float,
156
  enhance_prompt: bool = True,
157
  negative_prompt: str = DEFAULT_NEGATIVE_PROMPT,
158
  seed: int = 42,
159
  randomize_seed: bool = True,
160
+ num_inference_steps: int = DEFAULT_NUM_INFERENCE_STEPS,
161
  cfg_guidance_scale: float = DEFAULT_CFG_GUIDANCE_SCALE,
162
  height: int = DEFAULT_HEIGHT,
163
  width: int = DEFAULT_WIDTH,
164
  progress=gr.Progress(track_tqdm=True)
165
  ):
166
+ """Generate a video with keyframe interpolation between start and end frames."""
167
  try:
168
  # Randomize seed if checkbox is enabled
169
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
 
175
  # Create output directory if it doesn't exist
176
  output_dir = Path("outputs")
177
  output_dir.mkdir(exist_ok=True)
178
+ output_path = output_dir / f"keyframe_video_{current_seed}.mp4"
179
 
180
+ # Handle keyframe inputs - build list of (image_path, frame_idx, strength)
181
  images = []
182
+ temp_paths = []
183
+
184
+ # Determine which end frame to use (uploaded or generated)
185
+ end_frame = end_frame_generated if end_frame_generated is not None else end_frame_upload
186
+
187
+ if start_frame is None and end_frame is None:
188
+ raise ValueError("Please provide at least one keyframe (start or end frame)")
189
+
190
+ # Save start frame (frame index 0) if provided
191
+ if start_frame is not None:
192
+ temp_start_path = output_dir / f"temp_start_{current_seed}.jpg"
193
+ if hasattr(start_frame, 'save'):
194
+ start_frame.save(temp_start_path)
195
+ else:
196
+ temp_start_path = Path(start_frame)
197
+ temp_paths.append(temp_start_path)
198
+ images.append((str(temp_start_path), 0, 1.0))
199
+
200
+ # Save end frame (last frame index) if provided
201
+ if end_frame is not None:
202
+ temp_end_path = output_dir / f"temp_end_{current_seed}.jpg"
203
+ if hasattr(end_frame, 'save'):
204
+ end_frame.save(temp_end_path)
205
  else:
206
+ temp_end_path = Path(end_frame)
207
+ temp_paths.append(temp_end_path)
208
+ images.append((str(temp_end_path), num_frames - 1, 1.0))
209
+
210
  # Get embeddings from text encoder space
211
  print(f"Encoding prompt: {prompt}")
212
+
213
  if text_encoder_client is None:
214
  raise RuntimeError(
215
  f"Text encoder client not connected. Please ensure the text encoder space "
216
  f"({TEXT_ENCODER_SPACE}) is running and accessible."
217
  )
218
+
219
  try:
220
+ # Use first available frame for prompt enhancement
221
+ first_frame_path = temp_paths[0] if temp_paths else None
222
+ image_input = handle_file(str(first_frame_path)) if first_frame_path else None
223
+
 
224
  result = text_encoder_client.predict(
225
  prompt=prompt,
226
  enhance_prompt=enhance_prompt,
 
237
  video_context_positive = embeddings['video_context']
238
  audio_context_positive = embeddings['audio_context']
239
 
240
+ # Get the final prompt that was used (enhanced or original)
241
+ final_prompt = embeddings.get('prompt', prompt)
242
+
243
  # Load negative contexts if available
244
  video_context_negative = embeddings.get('video_context_negative', None)
245
  audio_context_negative = embeddings.get('audio_context_negative', None)
 
273
  audio_context_negative=audio_context_negative,
274
  )
275
 
276
+ return str(output_path), final_prompt, current_seed
277
 
278
  except Exception as e:
279
  import traceback
280
  error_msg = f"Error: {str(e)}\n{traceback.format_exc()}"
281
  print(error_msg)
282
+ return None, f"Error: {str(e)}", current_seed
283
 
284
 
285
  # Create Gradio interface
286
+ with gr.Blocks(title="LTX-2 Keyframe Interpolation 🎥🔈") as demo:
287
+ gr.Markdown("# LTX-2 Keyframe Interpolation 🎥🔈")
288
+ gr.Markdown("Generate smooth video transitions between keyframes with Lightricks LTX-2. Read more: [[model]](https://huggingface.co/Lightricks/LTX-2), [[code]](https://github.com/Lightricks/LTX-2)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
 
290
+ with gr.Row(elem_id="general_items"):
291
+ with gr.Column():
292
+ with gr.Group(elem_id="group_all"):
293
+ with gr.Row():
294
+ start_frame = gr.Image(
295
+ label="Start Frame (Optional)",
296
+ type="pil",
 
 
 
 
297
  )
298
+ with gr.Tabs():
299
+ with gr.Tab("Upload"):
300
+ end_frame_upload = gr.Image(
301
+ label="End Frame",
302
+ type="pil",
303
+ )
304
+
305
+ with gr.Tab("Generate"):
306
+ gr.Markdown("Generate an end frame from your start frame using AI")
307
+ edit_prompt = gr.Textbox(
308
+ label="Edit Prompt",
309
+ placeholder="Describe the transformation (e.g., '5 seconds later, sunset lighting')",
310
+ lines=2,
311
+ value="5 seconds in the future"
312
+ )
313
+ generate_end_btn = gr.Button("Generate End Frame", variant="secondary")
314
+ end_frame_generated = gr.Image(
315
+ label="Generated End Frame",
316
+ type="pil",
317
+ )
318
+
319
+ prompt = gr.Textbox(
320
+ label="Prompt",
321
+ info="Describe the motion/transition between frames",
322
+ value=DEFAULT_PROMPT,
323
+ lines=3,
324
+ placeholder="Describe the animation style and motion..."
325
+ )
326
+
327
 
328
  generate_btn = gr.Button("Generate Video", variant="primary")
329
 
330
  with gr.Accordion("Advanced Settings", open=False):
331
+ with gr.Row():
332
+ duration = gr.Slider(
333
+ label="Duration (seconds)",
334
+ minimum=1.0,
335
+ maximum=10.0,
336
+ value=3.0,
337
+ step=0.1
338
+ )
339
+ enhance_prompt = gr.Checkbox(
340
+ label="Enhance Prompt",
341
+ value=True
342
+ )
343
+
344
  negative_prompt = gr.Textbox(
345
  label="Negative Prompt",
346
  value=DEFAULT_NEGATIVE_PROMPT,
 
364
  label="Inference Steps",
365
  minimum=1,
366
  maximum=100,
367
+ value=DEFAULT_NUM_INFERENCE_STEPS,
368
  step=1
369
  )
370
 
 
390
 
391
  with gr.Column():
392
  output_video = gr.Video(label="Generated Video", autoplay=True)
393
+ final_prompt_output = gr.Textbox(
394
+ label="Final Prompt Used",
395
+ lines=5,
396
+ info="This is the prompt that was used for generation (enhanced if enabled)"
397
+ )
398
 
399
+ # Wire up generate end frame button
400
+ generate_end_btn.click(
401
+ fn=generate_end_frame,
402
+ inputs=[start_frame, edit_prompt],
403
+ outputs=[end_frame_generated]
404
+ )
405
+
406
+ # Wire up generate video button
407
  generate_btn.click(
408
  fn=generate_video,
409
  inputs=[
410
+ start_frame,
411
+ end_frame_upload,
412
+ end_frame_generated,
413
  prompt,
414
  duration,
415
  enhance_prompt,
 
421
  height,
422
  width,
423
  ],
424
+ outputs=[output_video, final_prompt_output, seed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
  )
426
 
427
  css = '''
428
+ .fillable{max-width: 1100px !important}
429
+ .dark .progress-text {color: white}
430
+ #general_items{margin-top: 2em}
431
+ #group_all{overflow:visible}
432
+ #group_all .styler{overflow:visible}
433
+ #group_tabs .tabitem{padding: 0}
434
+ .tab-wrapper{margin-top: 0px;z-index: 999;position: absolute;width: 100%;background-color: var(--block-background-fill);padding: 0;}
435
+ #component-9-button{width: 50%;justify-content: center}
436
+ #component-11-button{width: 50%;justify-content: center}
437
+ #or_item{text-align: center; padding-top: 1em; padding-bottom: 1em; font-size: 1.1em;margin-left: .5em;margin-right: .5em;width: calc(100% - 1em)}
438
+ #fivesec{margin-top: 5em;margin-left: .5em;margin-right: .5em;width: calc(100% - 1em)}
439
  '''
440
+
441
  if __name__ == "__main__":
442
+ demo.launch(theme=gr.themes.Citrus(), css=css, share=True)