lenzcom commited on
Commit
f537fa7
·
verified ·
1 Parent(s): ff3015e

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. Dockerfile +22 -14
Dockerfile CHANGED
@@ -1,29 +1,37 @@
1
- FROM node:18-slim
2
-
3
- # Install dependencies for building node-llama-cpp
4
- # ADDED: cmake is required for compiling llama.cpp
5
- RUN apt-get update && apt-get install -y python3 make g++ curl cmake
 
 
 
 
 
 
 
6
 
7
  WORKDIR /app
8
 
9
- # Copy package files
10
  COPY package*.json ./
11
 
12
- # Install npm dependencies
13
- # Note: node-llama-cpp might try to build during install
14
- RUN npm install
15
 
16
- # Copy source code
17
  COPY . .
18
 
19
- # Create models directory
20
  RUN mkdir -p models
21
 
22
- # Download the model during build
23
  RUN npx --no node-llama-cpp pull --dir ./models hf:Qwen/Qwen3-1.7B-GGUF:Q8_0 --filename Qwen3-1.7B-Q8_0.gguf
24
 
25
- # Expose the port HF expects
 
 
26
  EXPOSE 7860
27
 
28
- # Start the server
29
  CMD ["node", "server.js"]
 
1
+ FROM node:18
2
+
3
+ # Cài đặt đầy đủ công cụ build
4
+ RUN apt-get update && apt-get install -y \
5
+ python3 \
6
+ python3-pip \
7
+ make \
8
+ g++ \
9
+ cmake \
10
+ curl \
11
+ build-essential \
12
+ && rm -rf /var/lib/apt/lists/*
13
 
14
  WORKDIR /app
15
 
16
+ # Copy package files trước
17
  COPY package*.json ./
18
 
19
+ # Cài đặt dependencies với cờ --unsafe-perm để tránh lỗi quyền root
20
+ # Bỏ qua script postinstall lúc đầu để tránh build lỗi ngay
21
+ RUN npm install --ignore-scripts
22
 
23
+ # Copy toàn bộ code
24
  COPY . .
25
 
26
+ # Tạo thư mục models
27
  RUN mkdir -p models
28
 
29
+ # Tải model (dùng npx từ node_modules local)
30
  RUN npx --no node-llama-cpp pull --dir ./models hf:Qwen/Qwen3-1.7B-GGUF:Q8_0 --filename Qwen3-1.7B-Q8_0.gguf
31
 
32
+ # Rebuild node-llama-cpp một cách thủ công để đảm bảo nó nhận diện đúng môi trường Linux
33
+ RUN npm rebuild node-llama-cpp
34
+
35
  EXPOSE 7860
36
 
 
37
  CMD ["node", "server.js"]