SarmaHighOnAI commited on
Commit
e440069
·
verified ·
1 Parent(s): 5f063dc

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +0 -21
Dockerfile CHANGED
@@ -1,32 +1,11 @@
1
- # Use a lightweight, standard Python image (Debian-based)
2
  FROM python:3.10-slim
3
 
4
- # Set working directory
5
  WORKDIR /app
6
 
7
- # Install system dependencies (required for some Python tools)
8
- RUN apt-get update && apt-get install -y \
9
- build-essential \
10
- curl \
11
- && rm -rf /var/lib/apt/lists/*
12
-
13
- # 1. Install the Pre-built AI Engine (Skips the compilation step that was failing)
14
- # We use the 'cpu' specific URL to get the binary directly.
15
- RUN pip install --no-cache-dir \
16
- llama-cpp-python \
17
- --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
18
-
19
- # 2. Install Web Server tools
20
  RUN pip install --no-cache-dir fastapi uvicorn huggingface_hub pydantic
21
 
22
- # Copy your app code
23
  COPY app.py .
24
 
25
- # Grant permissions (just in case)
26
- RUN chmod +x app.py
27
-
28
- # Expose the API port
29
  EXPOSE 7860
30
 
31
- # Start the server
32
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
1
  FROM python:3.10-slim
2
 
 
3
  WORKDIR /app
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  RUN pip install --no-cache-dir fastapi uvicorn huggingface_hub pydantic
6
 
 
7
  COPY app.py .
8
 
 
 
 
 
9
  EXPOSE 7860
10
 
 
11
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]