BestWishYsh commited on
Commit
c02899d
·
verified ·
1 Parent(s): 3621eec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -34,20 +34,20 @@ pipe.to("cuda")
34
  # pipe.transformer.set_attention_backend("flash_hub")
35
  pipe.transformer.set_attention_backend("_flash_3_hub")
36
 
37
- @spaces.GPU(duration=1500)
38
- def compile_transformer():
39
- with spaces.aoti_capture(pipe.transformer) as call:
40
- pipe("arbitrary example prompt")
41
 
42
- exported = torch.export.export(
43
- pipe.transformer,
44
- args=call.args,
45
- kwargs=call.kwargs,
46
- )
47
- return spaces.aoti_compile(exported)
48
 
49
- compiled_transformer = compile_transformer()
50
- spaces.aoti_apply(compiled_transformer, pipe.transformer)
51
 
52
  # ---------------------------------------------------------------------------
53
  # Generation
 
34
  # pipe.transformer.set_attention_backend("flash_hub")
35
  pipe.transformer.set_attention_backend("_flash_3_hub")
36
 
37
+ # @spaces.GPU(duration=1500)
38
+ # def compile_transformer():
39
+ # with spaces.aoti_capture(pipe.transformer) as call:
40
+ # pipe("arbitrary example prompt")
41
 
42
+ # exported = torch.export.export(
43
+ # pipe.transformer,
44
+ # args=call.args,
45
+ # kwargs=call.kwargs,
46
+ # )
47
+ # return spaces.aoti_compile(exported)
48
 
49
+ # compiled_transformer = compile_transformer()
50
+ # spaces.aoti_apply(compiled_transformer, pipe.transformer)
51
 
52
  # ---------------------------------------------------------------------------
53
  # Generation