Nguyen5 commited on
Commit
1b4bd91
·
1 Parent(s): ce9c77b
Files changed (3) hide show
  1. Dockerfile +23 -12
  2. requirements.txt +8 -6
  3. start.sh +5 -2
Dockerfile CHANGED
@@ -1,24 +1,35 @@
1
- # Dockerfile – HuggingFace Space (Docker + Ollama + Python)
 
2
 
3
- FROM ollama/ollama:latest
4
 
5
- # ==== System + Python ====
6
- RUN apt-get update && apt-get install -y \
7
- python3 python3-pip python3-venv \
8
- && rm -rf /var/lib/apt/lists/*
 
 
 
9
 
10
  WORKDIR /workspace
11
 
12
- # ==== Python deps ====
13
  COPY requirements.txt .
14
- RUN pip3 install --no-cache-dir -r requirements.txt
15
 
16
- # ==== App code ====
 
 
 
 
 
 
17
  COPY . .
18
 
19
- # Ollama server listen on all interfaces
20
- ENV OLLAMA_HOST=0.0.0.0
21
 
 
22
  EXPOSE 7860
23
 
24
- CMD ["bash", "start.sh"]
 
 
1
+ # Base image
2
+ FROM ubuntu:22.04
3
 
4
+ ENV DEBIAN_FRONTEND=noninteractive
5
 
6
+ # Install basic dependencies + curl for Ollama
7
+ RUN apt-get update && \
8
+ apt-get install -y python3 python3-pip python3-venv curl && \
9
+ rm -rf /var/lib/apt/lists/*
10
+
11
+ # Install Ollama (HuggingFace Docker-friendly)
12
+ RUN curl -fsSL https://ollama.com/install.sh | sh
13
 
14
  WORKDIR /workspace
15
 
16
+ # Copy requirements first
17
  COPY requirements.txt .
 
18
 
19
+ # Create virtual environment
20
+ RUN python3 -m venv /workspace/venv
21
+
22
+ # Install dependencies inside VENV
23
+ RUN /workspace/venv/bin/pip install --no-cache-dir -r requirements.txt
24
+
25
+ # Copy rest of project
26
  COPY . .
27
 
28
+ # Give execute permission
29
+ RUN chmod +x start.sh
30
 
31
+ # Expose Gradio port
32
  EXPOSE 7860
33
 
34
+ # Start
35
+ CMD ["/bin/bash", "start.sh"]
requirements.txt CHANGED
@@ -12,11 +12,11 @@ langchain-community
12
  langchain-text-splitters
13
 
14
  # Embeddings (local, free)
15
- sentence-transformers
16
- langchain-huggingface
17
 
18
  # VectorStore
19
- faiss-cpu
20
 
21
  # PDF + HTTP
22
  pypdf
@@ -24,10 +24,12 @@ requests
24
  beautifulsoup4
25
  python-dotenv
26
 
27
- # STT/TTS local (transformers)
28
  transformers
29
  accelerate
30
- torch
31
- torchaudio
 
 
32
  soundfile
33
  scipy
 
12
  langchain-text-splitters
13
 
14
  # Embeddings (local, free)
15
+ #sentence-transformers
16
+ #langchain-huggingface
17
 
18
  # VectorStore
19
+ #faiss-cpu
20
 
21
  # PDF + HTTP
22
  pypdf
 
24
  beautifulsoup4
25
  python-dotenv
26
 
27
+ # Transformers (CPU ok)
28
  transformers
29
  accelerate
30
+ #torch
31
+ #torchaudio
32
+
33
+ # Audio
34
  soundfile
35
  scipy
start.sh CHANGED
@@ -1,10 +1,13 @@
1
  #!/bin/bash
2
  set -e
3
 
 
 
 
4
  echo ">>> Starte Ollama Server ..."
5
  ollama serve &
6
 
7
- # Đợi Ollama server khởi động
8
  sleep 15
9
 
10
  MODEL_NAME="qwen2.5:1.5b-instruct"
@@ -13,4 +16,4 @@ echo ">>> Pull Model: $MODEL_NAME"
13
  ollama pull "$MODEL_NAME" || true
14
 
15
  echo ">>> Starte Gradio App ..."
16
- python3 -u app.py
 
1
  #!/bin/bash
2
  set -e
3
 
4
+ # Activate virtual environment
5
+ source /workspace/venv/bin/activate
6
+
7
  echo ">>> Starte Ollama Server ..."
8
  ollama serve &
9
 
10
+ # wait for Ollama
11
  sleep 15
12
 
13
  MODEL_NAME="qwen2.5:1.5b-instruct"
 
16
  ollama pull "$MODEL_NAME" || true
17
 
18
  echo ">>> Starte Gradio App ..."
19
+ python -u app.py