SarmaHighOnAI commited on
Commit
1f1d778
·
verified ·
1 Parent(s): 273d1bf

Create Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +32 -0
Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a lightweight, standard Python image (Debian-based)
2
+ FROM python:3.10-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies (required for some Python tools)
8
+ RUN apt-get update && apt-get install -y \
9
+ build-essential \
10
+ curl \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # 1. Install the Pre-built AI Engine (Skips the compilation step that was failing)
14
+ # We use the 'cpu' specific URL to get the binary directly.
15
+ RUN pip install --no-cache-dir \
16
+ llama-cpp-python \
17
+ --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
18
+
19
+ # 2. Install Web Server tools
20
+ RUN pip install --no-cache-dir fastapi uvicorn huggingface_hub pydantic
21
+
22
+ # Copy your app code
23
+ COPY app.py .
24
+
25
+ # Grant permissions (just in case)
26
+ RUN chmod +x app.py
27
+
28
+ # Expose the API port
29
+ EXPOSE 7860
30
+
31
+ # Start the server
32
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]