Spaces:
Sleeping
Sleeping
Update Dockerfile
Browse files- Dockerfile +14 -12
Dockerfile
CHANGED
|
@@ -1,35 +1,37 @@
|
|
| 1 |
FROM python:3.11-slim
|
| 2 |
|
| 3 |
-
# Install
|
| 4 |
RUN apt-get update && apt-get install -y \
|
| 5 |
git curl build-essential cmake \
|
| 6 |
&& rm -rf /var/lib/apt/lists/*
|
| 7 |
|
|
|
|
| 8 |
WORKDIR /app
|
| 9 |
|
| 10 |
-
# Create writable directories
|
| 11 |
RUN mkdir -p /app/.cache /app/vector_database && chmod -R 777 /app
|
| 12 |
|
| 13 |
-
# Set environment variables
|
| 14 |
ENV TRANSFORMERS_CACHE=/app/.cache \
|
| 15 |
HF_HOME=/app/.cache \
|
| 16 |
CHROMADB_DISABLE_TELEMETRY=true
|
| 17 |
|
| 18 |
-
#
|
| 19 |
-
RUN pip install --no-cache-dir llama-cpp-python==0.2.
|
| 20 |
|
| 21 |
-
# Install
|
| 22 |
COPY requirements.txt .
|
| 23 |
RUN pip install --no-cache-dir -r requirements.txt
|
| 24 |
|
| 25 |
-
# Copy
|
| 26 |
COPY . .
|
| 27 |
|
| 28 |
-
# Download your
|
| 29 |
-
RUN curl -
|
| 30 |
-
https://huggingface.co/Kalpokoch/
|
| 31 |
-
&& echo "✅ Model downloaded."
|
| 32 |
|
|
|
|
| 33 |
EXPOSE 7860
|
| 34 |
|
| 35 |
-
|
|
|
|
|
|
| 1 |
FROM python:3.11-slim
|
| 2 |
|
| 3 |
+
# Install required system dependencies
|
| 4 |
RUN apt-get update && apt-get install -y \
|
| 5 |
git curl build-essential cmake \
|
| 6 |
&& rm -rf /var/lib/apt/lists/*
|
| 7 |
|
| 8 |
+
# Set working directory
|
| 9 |
WORKDIR /app
|
| 10 |
|
| 11 |
+
# Create writable directories for caches and the database
|
| 12 |
RUN mkdir -p /app/.cache /app/vector_database && chmod -R 777 /app
|
| 13 |
|
| 14 |
+
# Set environment variables for caching
|
| 15 |
ENV TRANSFORMERS_CACHE=/app/.cache \
|
| 16 |
HF_HOME=/app/.cache \
|
| 17 |
CHROMADB_DISABLE_TELEMETRY=true
|
| 18 |
|
| 19 |
+
# Pre-install the specific, known-working version of llama-cpp-python
|
| 20 |
+
RUN pip install --no-cache-dir llama-cpp-python==0.2.61
|
| 21 |
|
| 22 |
+
# Install other dependencies from requirements.txt
|
| 23 |
COPY requirements.txt .
|
| 24 |
RUN pip install --no-cache-dir -r requirements.txt
|
| 25 |
|
| 26 |
+
# Copy the application code into the container
|
| 27 |
COPY . .
|
| 28 |
|
| 29 |
+
# Download your fine-tuned TinyLlama GGUF model
|
| 30 |
+
RUN curl -L -o /app/tinyllama_dop_q4_k_m.gguf \
|
| 31 |
+
https://huggingface.co/Kalpokoch/FinetunedQuantizedTinyLama/resolve/main/tinyllama_dop_q4_k_m.gguf
|
|
|
|
| 32 |
|
| 33 |
+
# Expose the application port
|
| 34 |
EXPOSE 7860
|
| 35 |
|
| 36 |
+
# Run the FastAPI application using uvicorn
|
| 37 |
+
CMD ["uvicorn", "app.app:app", "--host", "0.0.0.0", "--port", "7860"]
|