root commited on
Commit
b040c7e
·
1 Parent(s): 3a724c5

adding directly to firebase

Browse files
__pycache__/handler.cpython-310.pyc CHANGED
Binary files a/__pycache__/handler.cpython-310.pyc and b/__pycache__/handler.cpython-310.pyc differ
 
dancing.mp4 DELETED
Binary file (520 kB)
 
handler.py CHANGED
@@ -31,6 +31,10 @@ from rembg import remove
31
  import onnxruntime as ort
32
  import shutil
33
 
 
 
 
 
34
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
35
 
36
  if device.type != 'cuda':
@@ -44,6 +48,19 @@ class EndpointHandler():
44
  if not os.path.exists(config_path):
45
  raise FileNotFoundError(f"The configuration file was not found at: {config_path}")
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  self.config = OmegaConf.load(config_path)
48
  self.weight_dtype = torch.float16
49
  self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@@ -277,10 +294,10 @@ class EndpointHandler():
277
  num_inference_steps = inputs.get("num_inference_steps", 25)
278
  cfg = inputs.get("cfg", 3.5)
279
  seed = inputs.get("seed", 123)
 
280
 
281
  base_dir = os.path.dirname(os.path.abspath(__file__))
282
 
283
- # Create a unique temporary directory for this request
284
  with tempfile.TemporaryDirectory() as temp_dir:
285
  print(f"Temporary directory created at {temp_dir}") # Debug statement
286
  video_root = os.path.join(temp_dir, "dw_poses_videos")
@@ -288,14 +305,10 @@ class EndpointHandler():
288
  downloaded_video_path = os.path.join(video_root, "downloaded_video.mp4")
289
  downloaded_image_path = os.path.join(video_root, "downloaded_image.jpg")
290
 
291
- # Download the video from the URL
292
  self.download_file(video_url, downloaded_video_path)
293
-
294
- # Download the reference image from the URL
295
  self.download_file(ref_image_url, downloaded_image_path)
296
  ref_image = Image.open(downloaded_image_path)
297
 
298
- # Calculate new dimensions
299
  original_width, original_height = ref_image.size
300
  max_dimension = max(original_width, original_height)
301
  if max_dimension > 600:
@@ -306,41 +319,12 @@ class EndpointHandler():
306
  width = original_width
307
  height = original_height
308
 
309
- # Remove the background from the reference image
310
  ref_image_no_bg = remove(ref_image)
311
  ref_image_no_bg_path = os.path.join(video_root, "ref_image_no_bg.png")
312
  ref_image_no_bg.save(ref_image_no_bg_path)
313
 
314
- # pose_output_path = os.path.join(temp_dir, "pose_videos")
315
-
316
- # print("we are number 1")
317
- # # Run the extract_dwpose_from_vid.py script
318
- # extract_pose_path = os.path.join(base_dir, 'extract_dwpose_from_vid.py')
319
- # command = f'python3 {extract_pose_path} --video_root {video_root}'
320
-
321
- # # Run the command with shell=True
322
- # result = subprocess.run(command, shell=True, capture_output=True, text=True)
323
- # if result.returncode != 0:
324
- # raise RuntimeError(f"Error running extract_dwpose_from_vid.py: {result.stderr}")
325
- # print("we are number 2")
326
-
327
- # # Locate the extracted pose video
328
- # save_dir = video_root + "_dwpose"
329
- # print(f"Expected save directory: {save_dir}") # Debug statement
330
- # pose_video_path = os.path.join(save_dir, "downloaded_video.mp4")
331
-
332
- # if not os.path.exists(pose_video_path):
333
- # print("Contents of the temporary directory:")
334
- # self.print_directory_contents(temp_dir)
335
- # raise FileNotFoundError(f"The pose video was not found at: {pose_video_path}")
336
-
337
- # Speed up the pose video by 4x
338
- # sped_up_pose_video_path = os.path.join(temp_dir, "sped_up_pose_video.mp4")
339
- # self.speed_up_video(pose_video_path, sped_up_pose_video_path, factor=4)
340
-
341
  torch.manual_seed(seed)
342
 
343
- # temporary_pose_video_path = os.path.join(base_dir, 'dancing.mp4')
344
  pose_images = read_frames(downloaded_video_path)
345
  src_fps = get_fps(downloaded_video_path)
346
 
@@ -365,53 +349,36 @@ class EndpointHandler():
365
  animation_path = os.path.join(save_dir, "animation_output.mp4")
366
  save_videos_grid(video, animation_path, n_rows=1, fps=src_fps)
367
 
368
- # Crop the face from the reference image and save it
369
  cropped_face_path = os.path.join(save_dir, "cropped_face.jpg")
370
  cropped_face = self._crop_face(ref_image_no_bg, save_path=cropped_face_path)
371
 
372
  torch.cuda.empty_cache()
373
 
374
- # Perform face swapping
375
- # self.print_directory_contents()
376
  swapped_face_video_path = os.path.join(save_dir, "swapped_face_output.mp4")
377
-
378
- # Subprocess call to facefusion for face swapping
379
- # self.print_directory_contents()
380
  facefusion_script_path = os.path.join(base_dir, 'facefusion', 'core.py')
381
  swap_command = f'python3 {facefusion_script_path} --source {cropped_face_path} --target {animation_path} --output {swapped_face_video_path}'
382
  swap_result = subprocess.run(swap_command, shell=True, capture_output=True, text=True)
383
  if swap_result.returncode != 0:
384
  raise RuntimeError(f"Error running face swap: {swap_result.stderr}")
385
 
386
- # Slow down the produced video by 4x
387
- # self.print_directory_contents(temp_dir)
388
- # slowed_down_animation_path = os.path.join(save_dir, "slowed_down_animation_output.mp4")
389
- # self.slow_down_video(animation_path, slowed_down_animation_path, factor=4)
390
-
391
- # Clear CUDA cache before RIFE interpolation
392
  torch.cuda.empty_cache()
393
 
394
-
395
- #remove background
396
- # self.print_directory_contents()
397
- # removed_background_output_path = os.path.join(save_dir, "removed_background_result.mp4")
398
- # remove_background_script_path = os.path.join(base_dir, "rembg_video.py")
399
- # remove_background_command = f'python3 {remove_background_script_path} {swapped_face_video_path} {removed_background_output_path}'
400
- # print("Command is " + remove_background_command)
401
- # remove_background_result = subprocess.run(remove_background_command, shell=True, capture_output=True, text=True)
402
- # if remove_background_result.returncode != 0:
403
- # raise RuntimeError(f"Error running removing backgriund: {remove_background_result.stderr}")
404
-
405
-
406
- # Perform RIFE interpolation
407
- # self.print_directory_contents(temp_dir)
408
- # rife_output_path = os.path.join(save_dir, "completed_result.mp4")
409
- # self.run_rife_interpolation(swapped_face_video_path, rife_output_path, multi=2, scale=0.5)
410
-
411
- # Encode the final video in base64
412
  with open(swapped_face_video_path, "rb") as video_file:
413
  video_base64 = base64.b64encode(video_file.read()).decode("utf-8")
414
 
415
- torch.cuda.empty_cache()
 
 
 
 
 
 
 
 
 
 
 
 
 
416
 
417
  return {"video": video_base64}
 
31
  import onnxruntime as ort
32
  import shutil
33
 
34
+ import firebase_admin
35
+ from firebase_admin import credentials, storage, firestore
36
+ import json
37
+
38
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
39
 
40
  if device.type != 'cuda':
 
48
  if not os.path.exists(config_path):
49
  raise FileNotFoundError(f"The configuration file was not found at: {config_path}")
50
 
51
+ service_account_info = os.getenv("FIREBASE_ACCOUNT_INFO")
52
+
53
+ if not service_account_info:
54
+ raise ValueError("The FIREBASE_SERVICE_ACCOUNT environment variable is not set.")
55
+ service_account_info = service_account_info.replace('/\\n/g', '\n')
56
+
57
+ service_account_info_dict = json.loads(service_account_info)
58
+
59
+ cred = credentials.Certificate(service_account_info_dict)
60
+ firebase_admin.initialize_app(cred, {
61
+ 'storageBucket': 'quiz-app-edffe.appspot.com'
62
+ })
63
+
64
  self.config = OmegaConf.load(config_path)
65
  self.weight_dtype = torch.float16
66
  self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
294
  num_inference_steps = inputs.get("num_inference_steps", 25)
295
  cfg = inputs.get("cfg", 3.5)
296
  seed = inputs.get("seed", 123)
297
+ firebase_doc_id = inputs.get("firebase_doc_id", "")
298
 
299
  base_dir = os.path.dirname(os.path.abspath(__file__))
300
 
 
301
  with tempfile.TemporaryDirectory() as temp_dir:
302
  print(f"Temporary directory created at {temp_dir}") # Debug statement
303
  video_root = os.path.join(temp_dir, "dw_poses_videos")
 
305
  downloaded_video_path = os.path.join(video_root, "downloaded_video.mp4")
306
  downloaded_image_path = os.path.join(video_root, "downloaded_image.jpg")
307
 
 
308
  self.download_file(video_url, downloaded_video_path)
 
 
309
  self.download_file(ref_image_url, downloaded_image_path)
310
  ref_image = Image.open(downloaded_image_path)
311
 
 
312
  original_width, original_height = ref_image.size
313
  max_dimension = max(original_width, original_height)
314
  if max_dimension > 600:
 
319
  width = original_width
320
  height = original_height
321
 
 
322
  ref_image_no_bg = remove(ref_image)
323
  ref_image_no_bg_path = os.path.join(video_root, "ref_image_no_bg.png")
324
  ref_image_no_bg.save(ref_image_no_bg_path)
325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  torch.manual_seed(seed)
327
 
 
328
  pose_images = read_frames(downloaded_video_path)
329
  src_fps = get_fps(downloaded_video_path)
330
 
 
349
  animation_path = os.path.join(save_dir, "animation_output.mp4")
350
  save_videos_grid(video, animation_path, n_rows=1, fps=src_fps)
351
 
 
352
  cropped_face_path = os.path.join(save_dir, "cropped_face.jpg")
353
  cropped_face = self._crop_face(ref_image_no_bg, save_path=cropped_face_path)
354
 
355
  torch.cuda.empty_cache()
356
 
 
 
357
  swapped_face_video_path = os.path.join(save_dir, "swapped_face_output.mp4")
 
 
 
358
  facefusion_script_path = os.path.join(base_dir, 'facefusion', 'core.py')
359
  swap_command = f'python3 {facefusion_script_path} --source {cropped_face_path} --target {animation_path} --output {swapped_face_video_path}'
360
  swap_result = subprocess.run(swap_command, shell=True, capture_output=True, text=True)
361
  if swap_result.returncode != 0:
362
  raise RuntimeError(f"Error running face swap: {swap_result.stderr}")
363
 
 
 
 
 
 
 
364
  torch.cuda.empty_cache()
365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
  with open(swapped_face_video_path, "rb") as video_file:
367
  video_base64 = base64.b64encode(video_file.read()).decode("utf-8")
368
 
369
+ # Upload video to Firebase Storage
370
+ bucket = storage.bucket()
371
+ blob = bucket.blob(f"videos/{firebase_doc_id}/swapped_face_output.mp4")
372
+ blob.upload_from_filename(swapped_face_video_path)
373
+
374
+ # Make the file publicly accessible
375
+ blob.make_public()
376
+
377
+ video_url = blob.public_url
378
+
379
+ # Update Firestore document
380
+ db = firestore.client()
381
+ doc_ref = db.collection('danceResults').document(firebase_doc_id)
382
+ doc_ref.update({"videoURL": video_url})
383
 
384
  return {"video": video_base64}
requirements.txt CHANGED
@@ -61,4 +61,7 @@ mediapipe==0.9.1.0
61
  filetype==1.2.0
62
 
63
 
64
- transparent-background
 
 
 
 
61
  filetype==1.2.0
62
 
63
 
64
+ transparent-background
65
+
66
+
67
+ firebase-admin==6.0.1
sampler.py CHANGED
@@ -24,10 +24,11 @@ inputs = {
24
  "inputs": {
25
  "ref_image_url": "https://media.discordapp.net/attachments/1237667074210267217/1247311431028703242/image.png?ex=665f90cb&is=665e3f4b&hm=89767149275a6ef40e39d4a73d77019ab30ab665512398816bc614cfecdddb34&=&format=webp&quality=lossless&width=590&height=1194",
26
  "video_url": "https://firebasestorage.googleapis.com/v0/b/quiz-app-edffe.appspot.com/o/creeper_shortneed_pose.mp4?alt=media&token=f87f6e8c-6ab2-445c-8b55-205f08f1f5d5",
27
- "length": 96,
28
  "num_inference_steps": 25,
29
  "cfg": 3.5,
30
- "seed": 123
 
31
  }
32
  }
33
 
 
24
  "inputs": {
25
  "ref_image_url": "https://media.discordapp.net/attachments/1237667074210267217/1247311431028703242/image.png?ex=665f90cb&is=665e3f4b&hm=89767149275a6ef40e39d4a73d77019ab30ab665512398816bc614cfecdddb34&=&format=webp&quality=lossless&width=590&height=1194",
26
  "video_url": "https://firebasestorage.googleapis.com/v0/b/quiz-app-edffe.appspot.com/o/creeper_shortneed_pose.mp4?alt=media&token=f87f6e8c-6ab2-445c-8b55-205f08f1f5d5",
27
+ "length": 6,
28
  "num_inference_steps": 25,
29
  "cfg": 3.5,
30
+ "seed": 123,
31
+ "firebase_doc_id" : "0gkm91Ao0GsmQ2WdLP9c"
32
  }
33
  }
34