megalado commited on
Commit
dbc1b3f
·
1 Parent(s): 0dd5200

Fix PYTHONPATH issue and add runtime deps

Browse files
Files changed (2) hide show
  1. app.py +33 -18
  2. requirements.txt +2 -2
app.py CHANGED
@@ -1,46 +1,61 @@
1
- import subprocess, uuid, os
 
 
2
  from pathlib import Path
3
  import gradio as gr
4
 
5
- # --- CONFIG ---------------------------------------------------------------
6
- CKPT_PATH = "checkpoints/t2m_50step.pt" # you already uploaded this
 
 
7
  DEVICE = "cpu" # free HF Spaces have no GPU
8
- # --------------------------------------------------------------------------
9
 
10
 
11
  def generate_motion(prompt: str) -> str:
12
  """
13
- Calls the repo’s built-in inference script and returns the path to
14
- the generated BVH file so Gradio can hand it to the user.
15
  """
16
  out_file = Path("/tmp") / f"{uuid.uuid4().hex}.bvh"
17
 
18
  cmd = [
19
  "python",
20
  "-m",
21
- "motion_diffusion_model.sample.generate", # script in the repo
22
- "--model_path", CKPT_PATH,
23
- "--prompt", prompt,
24
- "--output", str(out_file),
25
- "--device", DEVICE,
26
- "--num_steps", "50" # matches the checkpoint
27
  ]
28
 
29
- # Run the subprocess and surface errors cleanly in the UI
30
- completed = subprocess.run(cmd, capture_output=True, text=True)
 
 
 
 
 
31
  if completed.returncode != 0:
32
  raise RuntimeError(f"Inference failed:\n{completed.stderr}")
33
 
34
  return str(out_file)
35
 
36
 
37
- # ----------------- GRADIO UI ------------------------------------------------
38
  iface = gr.Interface(
39
  fn=generate_motion,
40
- inputs=gr.Textbox(lines=2, placeholder="e.g. a person walks and waves"),
 
 
 
41
  outputs=gr.File(label="Download BVH"),
42
- title="Text-to-Motion (MDM – 50-step)",
43
- description="Enter a natural-language description to generate a 3-D human-motion BVH file.",
 
 
 
44
  )
45
 
46
  if __name__ == "__main__":
 
1
+ import subprocess
2
+ import uuid
3
+ import os
4
  from pathlib import Path
5
  import gradio as gr
6
 
7
+ # ---------------------------------------------------------------------
8
+ # CONFIG
9
+ # ---------------------------------------------------------------------
10
+ CKPT_PATH = "checkpoints/t2m_50step.pt" # make sure this file exists
11
  DEVICE = "cpu" # free HF Spaces have no GPU
12
+ # ---------------------------------------------------------------------
13
 
14
 
15
  def generate_motion(prompt: str) -> str:
16
  """
17
+ Runs the MDM sampling script in a subprocess and returns the BVH
18
+ file path so Gradio can hand it to the user.
19
  """
20
  out_file = Path("/tmp") / f"{uuid.uuid4().hex}.bvh"
21
 
22
  cmd = [
23
  "python",
24
  "-m",
25
+ "motion_diffusion_model.sample.generate",
26
+ "--model_path", str(CKPT_PATH),
27
+ "--prompt", prompt,
28
+ "--output", str(out_file),
29
+ "--device", DEVICE,
30
+ "--num_steps", "50", # matches the checkpoint
31
  ]
32
 
33
+ # --- make sure the local repo root is on PYTHONPATH so
34
+ # 'utils.*' imports inside the script can be resolved
35
+ env = os.environ.copy()
36
+ root = Path(__file__).parent
37
+ env["PYTHONPATH"] = f"{env.get('PYTHONPATH', '')}:{root}"
38
+
39
+ completed = subprocess.run(cmd, env=env, capture_output=True, text=True)
40
  if completed.returncode != 0:
41
  raise RuntimeError(f"Inference failed:\n{completed.stderr}")
42
 
43
  return str(out_file)
44
 
45
 
46
+ # ----------------------- Gradio UI ----------------------------------
47
  iface = gr.Interface(
48
  fn=generate_motion,
49
+ inputs=gr.Textbox(
50
+ lines=2,
51
+ placeholder="e.g. a person walks forward and waves"
52
+ ),
53
  outputs=gr.File(label="Download BVH"),
54
+ title="Motion Diffusion Model – Text-to-Motion (50-step CPU demo)",
55
+ description=(
56
+ "Enter a natural-language prompt and receive a 3-D skeletal "
57
+ "animation in BVH format."
58
+ ),
59
  )
60
 
61
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -2,6 +2,6 @@ torch>=2.0
2
  gradio
3
  numpy
4
  tqdm
5
- transformers
6
  scipy
7
- git+https://github.com/openai/CLIP.git # CLIP is needed for the text encoder
 
 
2
  gradio
3
  numpy
4
  tqdm
 
5
  scipy
6
+ transformers
7
+ git+https://github.com/openai/CLIP.git