dn6 HF Staff commited on
Commit
8c4dc53
·
1 Parent(s): 29e941b
Files changed (2) hide show
  1. __pycache__/aoti.cpython-310.pyc +0 -0
  2. app.py +7 -3
__pycache__/aoti.cpython-310.pyc CHANGED
Binary files a/__pycache__/aoti.cpython-310.pyc and b/__pycache__/aoti.cpython-310.pyc differ
 
app.py CHANGED
@@ -52,8 +52,8 @@ torch._dynamo.config.capture_scalar_outputs = True
52
  # --- Configuration ---
53
  MODEL_ID = os.environ.get("MODEL_PATH", "diffusers-internal-dev/world-engine-modular")
54
  pipe = ModularPipeline.from_pretrained(MODEL_ID, trust_remote_code=True, revision="aot-compatible")
55
- pipe.load_components("transformer", trust_remote_code=True, revision="aot-compatible", torch_dtype=torch.bfloat16)
56
- pipe.load_components(["vae", "text_encoder", "tokenizer"], trust_remote_code=True, torch_dtype=torch.bfloat16)
57
 
58
  SEED_FRAME_URLS = [
59
  "https://gist.github.com/user-attachments/assets/5d91c49a-2ae9-418f-99c0-e93ae387e1de",
@@ -113,7 +113,11 @@ def create_gpu_game_loop(command_queue: Queue):
113
  """
114
  pipe.to("cuda")
115
  pipe.blocks.sub_blocks['before_denoise'].sub_blocks['setup_kv_cache']._setup_kv_cache(pipe.transformer, pipe.device, torch.bfloat16)
116
- aoti_load_(pipe.transformer, "diffusers-internal-dev/world-engine-aot", "transformer.pt2")
 
 
 
 
117
 
118
  n_frames = pipe.transformer.config.n_frames
119
  print(f"Model loaded! (n_frames={n_frames})")
 
52
  # --- Configuration ---
53
  MODEL_ID = os.environ.get("MODEL_PATH", "diffusers-internal-dev/world-engine-modular")
54
  pipe = ModularPipeline.from_pretrained(MODEL_ID, trust_remote_code=True, revision="aot-compatible")
55
+ pipe.load_components(["transformer", "vae"], trust_remote_code=True, revision="aot-compatible", torch_dtype=torch.bfloat16)
56
+ pipe.load_components(["text_encoder", "tokenizer"], trust_remote_code=True, torch_dtype=torch.bfloat16)
57
 
58
  SEED_FRAME_URLS = [
59
  "https://gist.github.com/user-attachments/assets/5d91c49a-2ae9-418f-99c0-e93ae387e1de",
 
113
  """
114
  pipe.to("cuda")
115
  pipe.blocks.sub_blocks['before_denoise'].sub_blocks['setup_kv_cache']._setup_kv_cache(pipe.transformer, pipe.device, torch.bfloat16)
116
+ aoti_load_(pipe.transformer, "diffusers-internal-dev/world-engine-aot", "transformer-inference-patch.pt2")
117
+
118
+ pipe.vae.bake_weight_norm()
119
+ aoti_load_(pipe.vae.encoder, "diffusers-internal-dev/world-engine-aot", "encoder.pt2")
120
+ aoti_load_(pipe.vae.decoder, "diffusers-internal-dev/world-engine-aot", "decoder.pt2")
121
 
122
  n_frames = pipe.transformer.config.n_frames
123
  print(f"Model loaded! (n_frames={n_frames})")