llm-inference-server / Dockerfile
Andres37062's picture
Create cache directory with proper permissions
088a71f verified
FROM python:3.10-slim
# Set working directory
WORKDIR /app
# Copy requirements and install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
FROM python:3.10-slim
# Set working directory
WORKDIR /app
# Set cache directory to writable location
ENV HF_HOME=/app/.cache
ENV TRANSFORMERS_CACHE=/app/.cache
# Copy requirements and install dependencies
FROM python:3.10-slim
# Set working directory
WORKDIR /app
# Set cache directory to writable location
ENV HF_HOME=/app/.cache
ENV TRANSFORMERS_CACHE=/app/.cache
# Create cache directory with proper permissions
RUN mkdir -p /app/.cache && chmod 777 /app/.cache
# Copy requirements and install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY app.py .
# Expose port 7860 (Hugging Face Spaces default)
EXPOSE 7860
# Run the FastAPI server
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY app.py .
# Expose port 7860 (Hugging Face Spaces default)
EXPOSE 7860
# Run the FastAPI server
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
# Copy application code
COPY app.py .
# Expose port 7860 (Hugging Face Spaces default)
EXPOSE 7860
# Run the FastAPI server
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]