Spaces:
Sleeping
Sleeping
Fix permission issues: Change cache directory to /app/models
Browse files- Dockerfile +6 -6
- app.py +7 -3
- configs/envs/.env.prod +4 -4
- scripts/download_models.py +10 -2
Dockerfile
CHANGED
|
@@ -31,17 +31,17 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
| 31 |
COPY . .
|
| 32 |
|
| 33 |
# Create necessary directories with proper permissions
|
| 34 |
-
RUN mkdir -p /
|
| 35 |
-
chmod 777 /
|
| 36 |
|
| 37 |
# Set environment variables for Spaces
|
| 38 |
ENV APP_ENV=prod \
|
| 39 |
GRADIO_SERVER_NAME=0.0.0.0 \
|
| 40 |
GRADIO_SERVER_PORT=7860 \
|
| 41 |
-
MODEL_CACHE_DIR=/
|
| 42 |
-
HF_HOME=/
|
| 43 |
-
TRANSFORMERS_CACHE=/
|
| 44 |
-
TORCH_HOME=/
|
| 45 |
|
| 46 |
# Expose port for Spaces
|
| 47 |
EXPOSE 7860
|
|
|
|
| 31 |
COPY . .
|
| 32 |
|
| 33 |
# Create necessary directories with proper permissions
|
| 34 |
+
RUN mkdir -p /app/models /tmp/logs /tmp/temp && \
|
| 35 |
+
chmod 777 /app/models /tmp/logs /tmp/temp
|
| 36 |
|
| 37 |
# Set environment variables for Spaces
|
| 38 |
ENV APP_ENV=prod \
|
| 39 |
GRADIO_SERVER_NAME=0.0.0.0 \
|
| 40 |
GRADIO_SERVER_PORT=7860 \
|
| 41 |
+
MODEL_CACHE_DIR=/app/models \
|
| 42 |
+
HF_HOME=/app/models \
|
| 43 |
+
TRANSFORMERS_CACHE=/app/models \
|
| 44 |
+
TORCH_HOME=/app/models
|
| 45 |
|
| 46 |
# Expose port for Spaces
|
| 47 |
EXPOSE 7860
|
app.py
CHANGED
|
@@ -13,9 +13,9 @@ from pathlib import Path
|
|
| 13 |
os.environ['APP_ENV'] = 'prod'
|
| 14 |
os.environ['GRADIO_SERVER_NAME'] = '0.0.0.0'
|
| 15 |
os.environ['GRADIO_SERVER_PORT'] = '7860'
|
| 16 |
-
os.environ['MODEL_CACHE_DIR'] = '/
|
| 17 |
-
os.environ['HF_HOME'] = '/
|
| 18 |
-
os.environ['TRANSFORMERS_CACHE'] = '/
|
| 19 |
|
| 20 |
# Add src to Python path
|
| 21 |
src_path = Path(__file__).parent / "src"
|
|
@@ -78,6 +78,10 @@ def download_essential_models():
|
|
| 78 |
|
| 79 |
return successful > 0
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
except Exception as e:
|
| 82 |
logger.error(f"❌ Error downloading models: {e}")
|
| 83 |
return False
|
|
|
|
| 13 |
os.environ['APP_ENV'] = 'prod'
|
| 14 |
os.environ['GRADIO_SERVER_NAME'] = '0.0.0.0'
|
| 15 |
os.environ['GRADIO_SERVER_PORT'] = '7860'
|
| 16 |
+
os.environ['MODEL_CACHE_DIR'] = '/app/models'
|
| 17 |
+
os.environ['HF_HOME'] = '/app/models'
|
| 18 |
+
os.environ['TRANSFORMERS_CACHE'] = '/app/models'
|
| 19 |
|
| 20 |
# Add src to Python path
|
| 21 |
src_path = Path(__file__).parent / "src"
|
|
|
|
| 78 |
|
| 79 |
return successful > 0
|
| 80 |
|
| 81 |
+
except PermissionError as e:
|
| 82 |
+
logger.error(f"❌ Permission denied for model cache: {e}")
|
| 83 |
+
logger.info("🔄 Using fallback model loading strategy...")
|
| 84 |
+
return True # Continue with app initialization
|
| 85 |
except Exception as e:
|
| 86 |
logger.error(f"❌ Error downloading models: {e}")
|
| 87 |
return False
|
configs/envs/.env.prod
CHANGED
|
@@ -10,12 +10,12 @@ GRADIO_SHARE=False
|
|
| 10 |
# Model Configuration
|
| 11 |
DEFAULT_MODEL=distil-whisper
|
| 12 |
DEFAULT_LANGUAGE=hindi
|
| 13 |
-
MODEL_CACHE_DIR=/
|
| 14 |
|
| 15 |
# HuggingFace Configuration
|
| 16 |
-
HF_HOME=/
|
| 17 |
-
TRANSFORMERS_CACHE=/
|
| 18 |
-
TORCH_HOME=/
|
| 19 |
|
| 20 |
# GPU Configuration
|
| 21 |
ENABLE_GPU=False
|
|
|
|
| 10 |
# Model Configuration
|
| 11 |
DEFAULT_MODEL=distil-whisper
|
| 12 |
DEFAULT_LANGUAGE=hindi
|
| 13 |
+
MODEL_CACHE_DIR=/app/models
|
| 14 |
|
| 15 |
# HuggingFace Configuration
|
| 16 |
+
HF_HOME=/app/models
|
| 17 |
+
TRANSFORMERS_CACHE=/app/models
|
| 18 |
+
TORCH_HOME=/app/models
|
| 19 |
|
| 20 |
# GPU Configuration
|
| 21 |
ENABLE_GPU=False
|
scripts/download_models.py
CHANGED
|
@@ -26,9 +26,17 @@ warnings.filterwarnings("ignore")
|
|
| 26 |
class ModelDownloader:
|
| 27 |
"""Downloads and manages speech-to-text models for Spaces deployment."""
|
| 28 |
|
| 29 |
-
def __init__(self, cache_dir: str = "/
|
| 30 |
self.cache_dir = Path(cache_dir)
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
# Configure logging
|
| 34 |
logging.basicConfig(
|
|
|
|
| 26 |
class ModelDownloader:
|
| 27 |
"""Downloads and manages speech-to-text models for Spaces deployment."""
|
| 28 |
|
| 29 |
+
def __init__(self, cache_dir: str = "/app/models", use_auth_token: Optional[str] = None):
|
| 30 |
self.cache_dir = Path(cache_dir)
|
| 31 |
+
try:
|
| 32 |
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
| 33 |
+
# Set permissions
|
| 34 |
+
os.chmod(str(self.cache_dir), 0o777)
|
| 35 |
+
except PermissionError:
|
| 36 |
+
# Fallback to a different directory if needed
|
| 37 |
+
self.cache_dir = Path.home() / ".cache" / "models"
|
| 38 |
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
| 39 |
+
self.logger.warning(f"Using fallback cache directory: {self.cache_dir}")
|
| 40 |
|
| 41 |
# Configure logging
|
| 42 |
logging.basicConfig(
|