Create Dockerfile
Browse files- Dockerfile +23 -0
Dockerfile
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM ghcr.io/ggml-org/llama.cpp:full
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
# パッケージのインストール
|
| 6 |
+
RUN apt update && apt install -y python3-pip
|
| 7 |
+
RUN pip install -U huggingface_hub
|
| 8 |
+
|
| 9 |
+
# モデルをHugging Faceからダウンロード
|
| 10 |
+
RUN python3 -c 'from huggingface_hub import hf_hub_download; \
|
| 11 |
+
repo="mradermacher/Qwen3-Coder-30B-A3B-Instruct-480B-Distill-V2-Fp32-i1-GGUF"; \
|
| 12 |
+
hf_hub_download(repo_id=repo, filename="Qwen3-Coder-30B-A3B-Instruct-480B-Distill-V2-Fp32.i1-IQ3_M.gguf", local_dir="/app")'
|
| 13 |
+
|
| 14 |
+
# サーバーの設定
|
| 15 |
+
CMD ["--server", \
|
| 16 |
+
"-m", "/app/Qwen3-Coder-30B-A3B-Instruct-480B-Distill-V2-Fp32.i1-IQ3_M.gguf", \
|
| 17 |
+
"--host", "0.0.0.0", \
|
| 18 |
+
"--port", "7860", \
|
| 19 |
+
"-t", "2", \
|
| 20 |
+
"--cache-type-k", "q8_0", \
|
| 21 |
+
"--cache-type-v", "iq4_nl", \
|
| 22 |
+
"-c", "32000", \
|
| 23 |
+
"-n", "8000"]
|