roll-ai commited on
Commit
223dbfe
·
verified ·
2 Parent(s): 41c65d5 0c5a9b6

Merge branch #roll-ai/vfx-3' into 'roll-ai/vfx-4'

Browse files
gradio_epic_only.py CHANGED
@@ -142,15 +142,19 @@ def anchor_generation(
142
  init_theta,
143
  init_phi,
144
  reverse_effect,
145
- extended_effect
 
146
  ):
147
  cross_frames = 3
148
  if extended_effect:
149
  camera = "extended"
150
  num_frames = 98 - cross_frames
 
 
151
  else:
152
  camera = "target"
153
 
 
154
  temp_input_path = f"/app/{Unique_identifier}/input.mp4"
155
  global RENDER_PATH, CROPS_PATH
156
 
@@ -250,7 +254,7 @@ def anchor_generation(
250
  "--init_dy", str(init_dy),
251
  "--init_dz", str(init_dz),
252
  "--init_theta", str(init_theta),
253
- "--init_phi", str(init_phi),
254
  ]
255
 
256
  print("🚀 Starting subprocess inference...")
 
142
  init_theta,
143
  init_phi,
144
  reverse_effect,
145
+ extended_effect,
146
+ centered_motion
147
  ):
148
  cross_frames = 3
149
  if extended_effect:
150
  camera = "extended"
151
  num_frames = 98 - cross_frames
152
+ elif centered_motion:
153
+ camera = "centered"
154
  else:
155
  camera = "target"
156
 
157
+
158
  temp_input_path = f"/app/{Unique_identifier}/input.mp4"
159
  global RENDER_PATH, CROPS_PATH
160
 
 
254
  "--init_dy", str(init_dy),
255
  "--init_dz", str(init_dz),
256
  "--init_theta", str(init_theta),
257
+ "--init_phi", str(init_phi)
258
  ]
259
 
260
  print("🚀 Starting subprocess inference...")
gradio_server.py CHANGED
@@ -87,7 +87,7 @@ def run_anchor_gen(
87
  depth_guidance_scale, window_size, overlap, max_res, load_size,
88
  sample_size, depth_size, seed_input, aspect_ratio_inputs,
89
  init_dx, init_dy, init_dz, init_theta, init_phi,
90
- reverse_effect, extended_effect
91
  ):
92
  if not unique_identifier:
93
  unique_identifier = str(uuid.uuid4())
@@ -136,7 +136,8 @@ def run_anchor_gen(
136
  init_theta=init_theta,
137
  init_phi=init_phi,
138
  reverse_effect=reverse_effect,
139
- extended_effect=extended_effect
 
140
  )
141
 
142
  # Save logs, captions, frame_shape
@@ -413,6 +414,7 @@ with gr.Blocks(title="Camera vfx Server") as demo:
413
  anchor_phi = gr.Number(label="Init Phi", value=0.0)
414
  anchor_reverse = gr.Checkbox(label="Reverse Effect", value=False)
415
  anchor_extended = gr.Checkbox(label="Extended Effect", value=False)
 
416
 
417
  anchor_btn = gr.Button("Generate Anchor")
418
  anchor_out = gr.File(label="Anchor ZIP Output")
@@ -427,7 +429,7 @@ with gr.Blocks(title="Camera vfx Server") as demo:
427
  anchor_depth_guidance, anchor_window, anchor_overlap, anchor_max_res, anchor_load_size,
428
  anchor_sample_size, anchor_depth_size, anchor_seed, anchor_aspect,
429
  anchor_dx, anchor_dy, anchor_dz, anchor_theta, anchor_phi,
430
- anchor_reverse, anchor_extended
431
  ],
432
  outputs=[anchor_out],
433
  api_name="anchor"
 
87
  depth_guidance_scale, window_size, overlap, max_res, load_size,
88
  sample_size, depth_size, seed_input, aspect_ratio_inputs,
89
  init_dx, init_dy, init_dz, init_theta, init_phi,
90
+ reverse_effect, extended_effect, centered_motion
91
  ):
92
  if not unique_identifier:
93
  unique_identifier = str(uuid.uuid4())
 
136
  init_theta=init_theta,
137
  init_phi=init_phi,
138
  reverse_effect=reverse_effect,
139
+ extended_effect=extended_effect,
140
+ centered_motion=centered_motion
141
  )
142
 
143
  # Save logs, captions, frame_shape
 
414
  anchor_phi = gr.Number(label="Init Phi", value=0.0)
415
  anchor_reverse = gr.Checkbox(label="Reverse Effect", value=False)
416
  anchor_extended = gr.Checkbox(label="Extended Effect", value=False)
417
+ centered_motion = gr.Checkbox(label="Centered Motion", value=False)
418
 
419
  anchor_btn = gr.Button("Generate Anchor")
420
  anchor_out = gr.File(label="Anchor ZIP Output")
 
429
  anchor_depth_guidance, anchor_window, anchor_overlap, anchor_max_res, anchor_load_size,
430
  anchor_sample_size, anchor_depth_size, anchor_seed, anchor_aspect,
431
  anchor_dx, anchor_dy, anchor_dz, anchor_theta, anchor_phi,
432
+ anchor_reverse, anchor_extended, centered_motion
433
  ],
434
  outputs=[anchor_out],
435
  api_name="anchor"
inference/v2v_data/demo.py CHANGED
@@ -684,7 +684,7 @@ class GetAnchorVideos:
684
  .unsqueeze(0)
685
  )
686
 
687
- if opts.camera == "extended":
688
  print("Using reverse camera trajectory")
689
 
690
  dtheta, dphi, dr, dx, dy = opts.target_pose
 
684
  .unsqueeze(0)
685
  )
686
 
687
+ if opts.camera == "extended" or opts.camera == "centered":
688
  print("Using reverse camera trajectory")
689
 
690
  dtheta, dphi, dr, dx, dy = opts.target_pose
inference/v2v_data/models/utils.py CHANGED
@@ -402,6 +402,7 @@ def generate_traj_specified_fast(c2ws_anchor, theta, phi, d_r, d_x, d_y, frame,
402
 
403
  c2ws_list = []
404
 
 
405
  for th, ph, r, x, y in zip(thetas, phis, rs, xs, ys):
406
  c2w_new = sphere2pose(
407
  c2ws_anchor,
 
402
 
403
  c2ws_list = []
404
 
405
+
406
  for th, ph, r, x, y in zip(thetas, phis, rs, xs, ys):
407
  c2w_new = sphere2pose(
408
  c2ws_anchor,