Spaces:
Runtime error
Runtime error
har1zarD commited on
Commit ·
9aa2932
1
Parent(s): 80f45eb
docker
Browse files- Dockerfile +1 -1
- app.py +28 -2
Dockerfile
CHANGED
|
@@ -26,7 +26,7 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
| 26 |
COPY --chown=user:user app.py .
|
| 27 |
|
| 28 |
# Create cache directory with correct permissions
|
| 29 |
-
RUN mkdir -p /home/user/.cache /tmp/transformers /tmp/huggingface /tmp/torch && chown -R user:user /home/user/.cache
|
| 30 |
|
| 31 |
# Switch to non-root user
|
| 32 |
USER user
|
|
|
|
| 26 |
COPY --chown=user:user app.py .
|
| 27 |
|
| 28 |
# Create cache directory with correct permissions
|
| 29 |
+
RUN mkdir -p /home/user/.cache /tmp/transformers /tmp/huggingface /tmp/torch && chown -R user:user /home/user/.cache /tmp/transformers /tmp/huggingface /tmp/torch
|
| 30 |
|
| 31 |
# Switch to non-root user
|
| 32 |
USER user
|
app.py
CHANGED
|
@@ -107,8 +107,29 @@ class ZeroShotFoodClassifier:
|
|
| 107 |
|
| 108 |
logger.info(f"🚀 Loading CLIP model: {self.model_name}")
|
| 109 |
|
| 110 |
-
# Centralizovan cache u /tmp
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
load_kwargs: Dict[str, Any] = {"cache_dir": cache_dir}
|
| 114 |
if self.device in ("cuda", "mps"):
|
|
@@ -125,6 +146,11 @@ class ZeroShotFoodClassifier:
|
|
| 125 |
if torch.cuda.is_available():
|
| 126 |
torch.cuda.empty_cache()
|
| 127 |
self.model_name = FALLBACK_MODEL_NAME
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
self.processor = CLIPProcessor.from_pretrained(self.model_name, cache_dir=cache_dir)
|
| 129 |
fallback_kwargs = load_kwargs.copy()
|
| 130 |
self.model = CLIPModel.from_pretrained(self.model_name, **fallback_kwargs).to(self.device)
|
|
|
|
| 107 |
|
| 108 |
logger.info(f"🚀 Loading CLIP model: {self.model_name}")
|
| 109 |
|
| 110 |
+
# Centralizovan cache u /tmp; prefer HF_HOME, zatim TRANSFORMERS_CACHE
|
| 111 |
+
hf_home = os.environ.get("HF_HOME")
|
| 112 |
+
cache_dir = hf_home if hf_home else os.environ.get("TRANSFORMERS_CACHE", "/tmp/transformers")
|
| 113 |
+
|
| 114 |
+
# Ensure cache directories exist and are writable; clean stale locks
|
| 115 |
+
try:
|
| 116 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 117 |
+
# Transformers also uses a models subdir sometimes; ensure base is writable
|
| 118 |
+
for root_dir in {cache_dir, "/tmp/huggingface", "/tmp/torch"}:
|
| 119 |
+
try:
|
| 120 |
+
os.makedirs(root_dir, exist_ok=True)
|
| 121 |
+
except Exception:
|
| 122 |
+
pass
|
| 123 |
+
# Remove stale lock files that can block downloads
|
| 124 |
+
for dirpath, dirnames, filenames in os.walk(cache_dir):
|
| 125 |
+
for filename in filenames:
|
| 126 |
+
if filename.endswith(".lock") or filename.endswith("-partial"): # defensive
|
| 127 |
+
try:
|
| 128 |
+
os.remove(os.path.join(dirpath, filename))
|
| 129 |
+
except Exception:
|
| 130 |
+
pass
|
| 131 |
+
except Exception as e:
|
| 132 |
+
logger.warning(f"⚠️ Cache setup warning: {e}")
|
| 133 |
|
| 134 |
load_kwargs: Dict[str, Any] = {"cache_dir": cache_dir}
|
| 135 |
if self.device in ("cuda", "mps"):
|
|
|
|
| 146 |
if torch.cuda.is_available():
|
| 147 |
torch.cuda.empty_cache()
|
| 148 |
self.model_name = FALLBACK_MODEL_NAME
|
| 149 |
+
# On fallback, also retry ensuring cache writability and cleaning locks
|
| 150 |
+
try:
|
| 151 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 152 |
+
except Exception:
|
| 153 |
+
pass
|
| 154 |
self.processor = CLIPProcessor.from_pretrained(self.model_name, cache_dir=cache_dir)
|
| 155 |
fallback_kwargs = load_kwargs.copy()
|
| 156 |
self.model = CLIPModel.from_pretrained(self.model_name, **fallback_kwargs).to(self.device)
|