anvilinteractiv commited on
Commit
da30fa1
Β·
verified Β·
1 Parent(s): ccc279e

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +24 -4
gradio_app.py CHANGED
@@ -694,15 +694,16 @@ def build_app(
694
 
695
  if __name__ == '__main__':
696
  import argparse
 
697
  parser = argparse.ArgumentParser()
698
  parser.add_argument("--model_path", type=str, default='tencent/Hunyuan3D-2')
699
  parser.add_argument("--subfolder", type=str, default='hunyuan3d-dit-v2-0')
700
  parser.add_argument("--texgen_model_path", type=str, default='tencent/Hunyuan3D-2')
701
- parser.add_argument('--port', type=int, default=80)
702
  parser.add_argument('--host', type=str, default='0.0.0.0')
703
  parser.add_argument('--device', type=str, default='cuda')
704
  parser.add_argument('--mc_algo', type=str, default='mc')
705
- parser.add_argument('--cache_path', type=str, default='gradio_cache') # Fixed typo from 'cache-path' to 'cache_path'
706
  parser.add_argument('--enable_t23d', action='store_true')
707
  parser.add_argument('--disable_tex', action='store_true')
708
  parser.add_argument('--enable_flashvdm', action='store_true')
@@ -746,6 +747,7 @@ if __name__ == '__main__':
746
  if not args.disable_tex:
747
  try:
748
  from hy3dgen.texgen import Hunyuan3DPaintPipeline
 
749
  texgen_worker = Hunyuan3DPaintPipeline.from_pretrained(args.texgen_model_path)
750
  if args.low_vram_mode:
751
  texgen_worker.enable_model_cpu_offload()
@@ -759,15 +761,28 @@ if __name__ == '__main__':
759
  HAS_T2I = True
760
  if args.enable_t23d:
761
  from hy3dgen.text2image import HunyuanDiTPipeline
 
762
  t2i_worker = HunyuanDiTPipeline('Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled')
763
  HAS_T2I = True
764
 
765
- from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline
 
 
 
 
 
 
 
766
  from hy3dgen.shapegen.pipelines import export_to_trimesh
767
  from hy3dgen.rembg import BackgroundRemover
768
 
769
  rmbg_worker = BackgroundRemover()
770
- i23d_worker = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained(args.model_path)
 
 
 
 
 
771
  if args.enable_flashvdm:
772
  mc_algo = 'mc' if args.device in ['cpu', 'mps'] else args.mc_algo
773
  i23d_worker.enable_flashvdm(mc_algo=mc_algo)
@@ -778,7 +793,10 @@ if __name__ == '__main__':
778
  degenerate_face_remove_worker = DegenerateFaceRemover()
779
  face_reduce_worker = FaceReducer()
780
 
 
 
781
  app = FastAPI()
 
782
  static_dir = Path(SAVE_DIR).absolute()
783
  static_dir.mkdir(parents=True, exist_ok=True)
784
  app.mount("/static", StaticFiles(directory=static_dir, html=True), name="static")
@@ -788,4 +806,6 @@ if __name__ == '__main__':
788
  torch.cuda.empty_cache()
789
  demo = build_app()
790
  app = gr.mount_gradio_app(app, demo, path="/")
 
 
791
  uvicorn.run(app, host=args.host, port=args.port)
 
694
 
695
  if __name__ == '__main__':
696
  import argparse
697
+
698
  parser = argparse.ArgumentParser()
699
  parser.add_argument("--model_path", type=str, default='tencent/Hunyuan3D-2')
700
  parser.add_argument("--subfolder", type=str, default='hunyuan3d-dit-v2-0')
701
  parser.add_argument("--texgen_model_path", type=str, default='tencent/Hunyuan3D-2')
702
+ parser.add_argument('--port', type=int, default=7860)
703
  parser.add_argument('--host', type=str, default='0.0.0.0')
704
  parser.add_argument('--device', type=str, default='cuda')
705
  parser.add_argument('--mc_algo', type=str, default='mc')
706
+ parser.add_argument('--cache-path', type=str, default='gradio_cache')
707
  parser.add_argument('--enable_t23d', action='store_true')
708
  parser.add_argument('--disable_tex', action='store_true')
709
  parser.add_argument('--enable_flashvdm', action='store_true')
 
747
  if not args.disable_tex:
748
  try:
749
  from hy3dgen.texgen import Hunyuan3DPaintPipeline
750
+
751
  texgen_worker = Hunyuan3DPaintPipeline.from_pretrained(args.texgen_model_path)
752
  if args.low_vram_mode:
753
  texgen_worker.enable_model_cpu_offload()
 
761
  HAS_T2I = True
762
  if args.enable_t23d:
763
  from hy3dgen.text2image import HunyuanDiTPipeline
764
+
765
  t2i_worker = HunyuanDiTPipeline('Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled')
766
  HAS_T2I = True
767
 
768
+ # Try importing with explicit module path to ensure availability
769
+ try:
770
+ from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline
771
+ from hy3dgen.shapegen.postprocessing import FloaterRemover, DegenerateFaceRemover, FaceReducer
772
+ except ImportError as e:
773
+ print(f"Import error: {e}. Ensure hy3dgen.shapegen.postprocessing is installed.")
774
+ raise
775
+
776
  from hy3dgen.shapegen.pipelines import export_to_trimesh
777
  from hy3dgen.rembg import BackgroundRemover
778
 
779
  rmbg_worker = BackgroundRemover()
780
+ i23d_worker = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained(
781
+ args.model_path,
782
+ subfolder=args.subfolder,
783
+ use_safetensors=True,
784
+ device=args.device,
785
+ )
786
  if args.enable_flashvdm:
787
  mc_algo = 'mc' if args.device in ['cpu', 'mps'] else args.mc_algo
788
  i23d_worker.enable_flashvdm(mc_algo=mc_algo)
 
793
  degenerate_face_remove_worker = DegenerateFaceRemover()
794
  face_reduce_worker = FaceReducer()
795
 
796
+ # https://discuss.huggingface.co/t/how-to-serve-an-html-file/33921/2
797
+ # create a FastAPI app
798
  app = FastAPI()
799
+ # create a static directory to store the static files
800
  static_dir = Path(SAVE_DIR).absolute()
801
  static_dir.mkdir(parents=True, exist_ok=True)
802
  app.mount("/static", StaticFiles(directory=static_dir, html=True), name="static")
 
806
  torch.cuda.empty_cache()
807
  demo = build_app()
808
  app = gr.mount_gradio_app(app, demo, path="/")
809
+ from spaces import zero
810
+ zero.startup()
811
  uvicorn.run(app, host=args.host, port=args.port)