MogensR commited on
Commit
e544268
·
1 Parent(s): 9226b16

Update core/app.py

Browse files
Files changed (1) hide show
  1. core/app.py +22 -2
core/app.py CHANGED
@@ -33,6 +33,8 @@ def _map_env(src: str, dst: str):
33
  _map_env("HFHOME", "HF_HOME")
34
  _map_env("TRANSFORMERSCACHE", "TRANSFORMERS_CACHE")
35
  _map_env("TORCHHOME", "TORCH_HOME")
 
 
36
 
37
  # Critical defaults (safe even if already set elsewhere)
38
  os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
@@ -40,6 +42,25 @@ def _map_env(src: str, dst: str):
40
  os.environ.setdefault("APP_ENV", "production")
41
  os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:128")
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  # If you use early_env in your project, keep this import (harmless if absent)
44
  try:
45
  import early_env # sets OMP/MKL/OPENBLAS + torch threads safely
@@ -51,7 +72,6 @@ def _map_env(src: str, dst: str):
51
  import traceback
52
  import sys
53
  import time
54
- from pathlib import Path
55
  from typing import Optional, Tuple, Dict, Any, Callable
56
 
57
  # Mitigate CUDA fragmentation (must be set before importing torch)
@@ -177,7 +197,7 @@ def __init__(self):
177
  self.two_stage_processor: Optional[Any] = None
178
  self.models_loaded = False
179
  self.loading_lock = threading.Lock()
180
- self.cancel_event = threading.Event()
181
  self.progress_tracker: Optional[ProgressTracker] = None
182
  logger.info(f"VideoProcessor on device: {self.device_manager.get_optimal_device()}")
183
 
 
33
  _map_env("HFHOME", "HF_HOME")
34
  _map_env("TRANSFORMERSCACHE", "TRANSFORMERS_CACHE")
35
  _map_env("TORCHHOME", "TORCH_HOME")
36
+ # Optional: GPU visibility alias
37
+ _map_env("CUDAVISIBLEDEVICES", "CUDA_VISIBLE_DEVICES")
38
 
39
  # Critical defaults (safe even if already set elsewhere)
40
  os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
 
42
  os.environ.setdefault("APP_ENV", "production")
43
  os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:128")
44
 
45
+ # Ensure reasonable cache defaults if not set
46
+ from pathlib import Path
47
+ _base_cache = Path.home() / ".cache"
48
+ os.environ.setdefault("HF_HOME", str(_base_cache / "huggingface"))
49
+ os.environ.setdefault("TRANSFORMERS_CACHE", str(_base_cache / "huggingface" / "hub"))
50
+ os.environ.setdefault("TORCH_HOME", str(_base_cache / "torch"))
51
+
52
+ # Synthesize CLOUDINARY_URL from parts if missing
53
+ def _ensure_cloudinary_url():
54
+ if os.getenv("CLOUDINARY_URL"):
55
+ return
56
+ key = os.getenv("CLOUDINARY_API_KEY")
57
+ sec = os.getenv("CLOUDINARY_API_SECRET")
58
+ name = os.getenv("CLOUDINARY_CLOUD_NAME")
59
+ if key and sec and name:
60
+ os.environ["CLOUDINARY_URL"] = f"cloudinary://{key}:{sec}@{name}"
61
+
62
+ _ensure_cloudinary_url()
63
+
64
  # If you use early_env in your project, keep this import (harmless if absent)
65
  try:
66
  import early_env # sets OMP/MKL/OPENBLAS + torch threads safely
 
72
  import traceback
73
  import sys
74
  import time
 
75
  from typing import Optional, Tuple, Dict, Any, Callable
76
 
77
  # Mitigate CUDA fragmentation (must be set before importing torch)
 
197
  self.two_stage_processor: Optional[Any] = None
198
  self.models_loaded = False
199
  self.loading_lock = threading.Lock()
200
+ self.cancel_event = threading.Event()
201
  self.progress_tracker: Optional[ProgressTracker] = None
202
  logger.info(f"VideoProcessor on device: {self.device_manager.get_optimal_device()}")
203