tuan2308 commited on
Commit
24dca9f
·
verified ·
1 Parent(s): 039eed5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -9
app.py CHANGED
@@ -22,15 +22,7 @@ EXEC_DEVICE = "cpu"
22
  FORCE_CPU = bool(int(os.getenv("FORCE_CPU", "0")))
23
 
24
 
25
- @spaces.GPU # bắt buộc cho ZeroGPU
26
- def load_pipeline():
27
- global EXEC_DEVICE
28
-
29
- # Ưu tiên GPU của ZeroGPU; cho phép ép CPU để tránh abort/OOM.
30
- device = "cuda" if torch.cuda.is_available() and not FORCE_CPU else "cpu"
31
- dtype = torch.float16 if device == "cuda" else torch.float32
32
- EXEC_DEVICE = device
33
-
34
  pipe = QwenImageEditPlusPipeline.from_pretrained(
35
  HF_BASE_MODEL,
36
  torch_dtype=dtype,
@@ -53,6 +45,30 @@ def load_pipeline():
53
  return pipe
54
 
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  pipe = load_pipeline()
57
 
58
 
 
22
  FORCE_CPU = bool(int(os.getenv("FORCE_CPU", "0")))
23
 
24
 
25
+ def _build_pipeline(device: str, dtype: torch.dtype):
 
 
 
 
 
 
 
 
26
  pipe = QwenImageEditPlusPipeline.from_pretrained(
27
  HF_BASE_MODEL,
28
  torch_dtype=dtype,
 
45
  return pipe
46
 
47
 
48
+ @spaces.GPU # bắt buộc cho ZeroGPU
49
+ def load_pipeline():
50
+ global EXEC_DEVICE
51
+
52
+ # Ưu tiên GPU của ZeroGPU; cho phép ép CPU để tránh abort/OOM.
53
+ prefer_cuda = torch.cuda.is_available() and not FORCE_CPU
54
+ device = "cuda" if prefer_cuda else "cpu"
55
+ dtype = torch.float16 if device == "cuda" else torch.float32
56
+
57
+ try:
58
+ pipe = _build_pipeline(device, dtype)
59
+ EXEC_DEVICE = device
60
+ return pipe
61
+ except Exception as exc:
62
+ # GPU worker thường abort vì OOM. Fallback CPU để không crash app.
63
+ if device == "cuda":
64
+ device = "cpu"
65
+ dtype = torch.float32
66
+ pipe = _build_pipeline(device, dtype)
67
+ EXEC_DEVICE = device
68
+ return pipe
69
+ raise exc
70
+
71
+
72
  pipe = load_pipeline()
73
 
74