akhaliq HF Staff commited on
Commit
2aefcae
Β·
verified Β·
1 Parent(s): 0a20908

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -26
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import os
3
  import tempfile
4
  import shutil
5
- from typing import Optional, Tuple, Union
6
  from huggingface_hub import InferenceClient
7
  from pathlib import Path
8
 
@@ -176,7 +176,7 @@ def generate_video(
176
  duration: int = 8,
177
  size: str = "1280x720",
178
  api_key: Optional[str] = None
179
- ) -> Tuple[Optional[str], str]:
180
  """Generate video using Sora-2 through Hugging Face Inference API with fal-ai provider."""
181
  cleanup_temp_files()
182
  try:
@@ -189,7 +189,7 @@ def generate_video(
189
  else:
190
  temp_client = client
191
  if not os.environ.get("HF_TOKEN") and not api_key:
192
- return None, "❌ Please set HF_TOKEN environment variable."
193
 
194
  video_bytes = temp_client.text_to_video(
195
  prompt,
@@ -204,19 +204,19 @@ def generate_video(
204
  finally:
205
  temp_file.close()
206
 
207
- return video_path, "βœ… Video generated successfully!"
208
  except Exception as e:
209
- return None, f"❌ Error generating video: {str(e)}"
210
 
211
  def generate_video_from_image(
212
  image: Union[str, bytes],
213
  prompt: str,
214
  api_key: Optional[str] = None
215
- ) -> Tuple[Optional[str], str]:
216
  """Generate a video from a single input image + prompt using Sora-2 image-to-video."""
217
  cleanup_temp_files()
218
  if not prompt or prompt.strip() == "":
219
- return None, "❌ Please enter a prompt"
220
  try:
221
  if api_key:
222
  temp_client = InferenceClient(
@@ -227,7 +227,7 @@ def generate_video_from_image(
227
  else:
228
  temp_client = client
229
  if not os.environ.get("HF_TOKEN") and not api_key:
230
- return None, "❌ Please set HF_TOKEN environment variable."
231
 
232
  if isinstance(image, str):
233
  with open(image, "rb") as f:
@@ -235,7 +235,7 @@ def generate_video_from_image(
235
  elif isinstance(image, (bytes, bytearray)):
236
  input_image = image
237
  else:
238
- return None, "❌ Invalid image input. Please upload an image."
239
 
240
  video_bytes = temp_client.image_to_video(
241
  input_image,
@@ -251,20 +251,20 @@ def generate_video_from_image(
251
  finally:
252
  temp_file.close()
253
 
254
- return video_path, "βœ… Video generated from image successfully!"
255
  except Exception as e:
256
- return None, f"❌ Error generating video from image: {str(e)}"
257
 
258
  def generate_with_auth(
259
  prompt: str,
260
  profile: gr.OAuthProfile | None
261
- ) -> Tuple[Optional[str], str]:
262
  """Wrapper function that checks if user is logged in before generating video."""
263
  if profile is None:
264
  raise gr.Error("Click Sign in with Hugging Face button to use this app for free")
265
 
266
  if not prompt or prompt.strip() == "":
267
- return None, "❌ Please enter a prompt"
268
 
269
  return generate_video(
270
  prompt,
@@ -277,12 +277,12 @@ def generate_with_auth_image(
277
  prompt: str,
278
  image_path: Optional[str],
279
  profile: gr.OAuthProfile | None
280
- ) -> Tuple[Optional[str], str]:
281
  """Checks login status then calls image->video generator."""
282
  if profile is None:
283
  raise gr.Error("Click Sign in with Hugging Face button to use this app for free")
284
  if not image_path:
285
- return None, "❌ Please upload an image"
286
  return generate_video_from_image(image=image_path, prompt=prompt, api_key=None)
287
 
288
  def create_ui():
@@ -304,14 +304,14 @@ def create_ui():
304
  }
305
  #share-btn-container, #share-btn-container-img {
306
  display: flex;
307
- padding-left: 0.5rem !important;
308
- padding-right: 0.5rem !important;
309
  background-color: #000000;
310
  justify-content: center;
311
  align-items: center;
312
  border-radius: 9999px !important;
313
- width: 13rem;
314
- margin-top: 10px;
315
  margin-left: auto;
316
  margin-right: auto;
317
  }
@@ -324,9 +324,10 @@ def create_ui():
324
  font-weight: 600;
325
  cursor:pointer;
326
  font-family: 'IBM Plex Sans', sans-serif;
327
- margin-left: 0.5rem !important;
328
- padding-top: 0.25rem !important;
329
- padding-bottom: 0.25rem !important;
 
330
  right:0;
331
  }
332
  #share-btn *, #share-btn-img * {
@@ -376,7 +377,6 @@ def create_ui():
376
  show_download_button=True,
377
  elem_id="text-to-video"
378
  )
379
- status_output = gr.Textbox(label="Status", interactive=False, visible=True)
380
 
381
  # Share button for text-to-video
382
  with gr.Group(elem_id="share-btn-container"):
@@ -387,7 +387,7 @@ def create_ui():
387
  generate_btn.click(
388
  fn=generate_with_auth,
389
  inputs=[prompt_input],
390
- outputs=[video_output, status_output],
391
  )
392
 
393
  share_button.click(
@@ -422,7 +422,6 @@ def create_ui():
422
  show_download_button=True,
423
  elem_id="image-to-video"
424
  )
425
- status_output_img = gr.Textbox(label="Status", interactive=False, visible=True)
426
 
427
  # Share button for image-to-video
428
  with gr.Group(elem_id="share-btn-container-img"):
@@ -433,7 +432,7 @@ def create_ui():
433
  generate_img_btn.click(
434
  fn=generate_with_auth_image,
435
  inputs=[img_prompt_input, image_input],
436
- outputs=[video_output_img, status_output_img],
437
  )
438
 
439
  share_button_img.click(
 
2
  import os
3
  import tempfile
4
  import shutil
5
+ from typing import Optional, Union
6
  from huggingface_hub import InferenceClient
7
  from pathlib import Path
8
 
 
176
  duration: int = 8,
177
  size: str = "1280x720",
178
  api_key: Optional[str] = None
179
+ ) -> Optional[str]:
180
  """Generate video using Sora-2 through Hugging Face Inference API with fal-ai provider."""
181
  cleanup_temp_files()
182
  try:
 
189
  else:
190
  temp_client = client
191
  if not os.environ.get("HF_TOKEN") and not api_key:
192
+ return None
193
 
194
  video_bytes = temp_client.text_to_video(
195
  prompt,
 
204
  finally:
205
  temp_file.close()
206
 
207
+ return video_path
208
  except Exception as e:
209
+ return None
210
 
211
  def generate_video_from_image(
212
  image: Union[str, bytes],
213
  prompt: str,
214
  api_key: Optional[str] = None
215
+ ) -> Optional[str]:
216
  """Generate a video from a single input image + prompt using Sora-2 image-to-video."""
217
  cleanup_temp_files()
218
  if not prompt or prompt.strip() == "":
219
+ return None
220
  try:
221
  if api_key:
222
  temp_client = InferenceClient(
 
227
  else:
228
  temp_client = client
229
  if not os.environ.get("HF_TOKEN") and not api_key:
230
+ return None
231
 
232
  if isinstance(image, str):
233
  with open(image, "rb") as f:
 
235
  elif isinstance(image, (bytes, bytearray)):
236
  input_image = image
237
  else:
238
+ return None
239
 
240
  video_bytes = temp_client.image_to_video(
241
  input_image,
 
251
  finally:
252
  temp_file.close()
253
 
254
+ return video_path
255
  except Exception as e:
256
+ return None
257
 
258
  def generate_with_auth(
259
  prompt: str,
260
  profile: gr.OAuthProfile | None
261
+ ) -> Optional[str]:
262
  """Wrapper function that checks if user is logged in before generating video."""
263
  if profile is None:
264
  raise gr.Error("Click Sign in with Hugging Face button to use this app for free")
265
 
266
  if not prompt or prompt.strip() == "":
267
+ return None
268
 
269
  return generate_video(
270
  prompt,
 
277
  prompt: str,
278
  image_path: Optional[str],
279
  profile: gr.OAuthProfile | None
280
+ ) -> Optional[str]:
281
  """Checks login status then calls image->video generator."""
282
  if profile is None:
283
  raise gr.Error("Click Sign in with Hugging Face button to use this app for free")
284
  if not image_path:
285
+ return None
286
  return generate_video_from_image(image=image_path, prompt=prompt, api_key=None)
287
 
288
  def create_ui():
 
304
  }
305
  #share-btn-container, #share-btn-container-img {
306
  display: flex;
307
+ padding-left: 0.4rem !important;
308
+ padding-right: 0.4rem !important;
309
  background-color: #000000;
310
  justify-content: center;
311
  align-items: center;
312
  border-radius: 9999px !important;
313
+ width: 10rem;
314
+ margin-top: 8px;
315
  margin-left: auto;
316
  margin-right: auto;
317
  }
 
324
  font-weight: 600;
325
  cursor:pointer;
326
  font-family: 'IBM Plex Sans', sans-serif;
327
+ margin-left: 0.4rem !important;
328
+ padding-top: 0.2rem !important;
329
+ padding-bottom: 0.2rem !important;
330
+ font-size: 0.85rem;
331
  right:0;
332
  }
333
  #share-btn *, #share-btn-img * {
 
377
  show_download_button=True,
378
  elem_id="text-to-video"
379
  )
 
380
 
381
  # Share button for text-to-video
382
  with gr.Group(elem_id="share-btn-container"):
 
387
  generate_btn.click(
388
  fn=generate_with_auth,
389
  inputs=[prompt_input],
390
+ outputs=[video_output],
391
  )
392
 
393
  share_button.click(
 
422
  show_download_button=True,
423
  elem_id="image-to-video"
424
  )
 
425
 
426
  # Share button for image-to-video
427
  with gr.Group(elem_id="share-btn-container-img"):
 
432
  generate_img_btn.click(
433
  fn=generate_with_auth_image,
434
  inputs=[img_prompt_input, image_input],
435
+ outputs=[video_output_img],
436
  )
437
 
438
  share_button_img.click(