code-slicer commited on
Commit
69a3ceb
ยท
verified ยท
1 Parent(s): 8cfc181

Delete Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +0 -88
Dockerfile DELETED
@@ -1,88 +0,0 @@
1
- # 1. ํŒŒ์ด์ฌ ๋ฒ„์ „์„ 3.11์œผ๋กœ ์„ค์ •
2
- FROM python:3.11-slim
3
-
4
- # 2. ํ•„์š”ํ•œ ์‹œ์Šคํ…œ ํŒจํ‚ค์ง€ ๋ฐ git-lfs ์„ค์น˜
5
- RUN pip install --no-cache-dir hf_transfer>=0.1.6
6
- RUN apt-get update && apt-get install -y \
7
- build-essential \
8
- curl \
9
- git \
10
- git-lfs \
11
- && rm -rf /var/lib/apt/lists/*
12
-
13
- # Ollama ์„ค์น˜
14
- RUN curl -fsSL https://ollama.com/install.sh | sh
15
-
16
- # โœ… ํ•ต์‹ฌ ๋ณ€๊ฒฝ: /tmp ๋””๋ ‰ํ† ๋ฆฌ ์‚ฌ์šฉ
17
- ENV OLLAMA_MODEL=gemma2:9b
18
- ENV OLLAMA_HOME=/tmp/ollama-home
19
- ENV OLLAMA_MODELS=/tmp/ollama-home
20
-
21
- # ๋นŒ๋“œ ์ค‘ ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ (/tmp ์‚ฌ์šฉ)
22
- RUN set -eux; \
23
- mkdir -p /tmp/ollama-home; \
24
- OLLAMA_HOME=/tmp/ollama-home OLLAMA_MODELS=/tmp/ollama-home ollama serve & pid=$!; \
25
- i=0; \
26
- until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \
27
- i=$((i+1)); \
28
- [ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \
29
- sleep 1; \
30
- done; \
31
- OLLAMA_HOME=/tmp/ollama-home OLLAMA_MODELS=/tmp/ollama-home ollama pull gemma2:9b; \
32
- kill "$pid" || true
33
-
34
- # 3. ์ž‘์—… ๋””๋ ‰ํ„ฐ๋ฆฌ ์„ค์ •
35
- WORKDIR /app
36
-
37
- # 6. ํŒŒ์ด์ฌ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์„ค์น˜
38
- COPY requirements.txt .
39
- RUN pip install --no-cache-dir -r requirements.txt
40
-
41
- # 4. ์†Œ์Šค ์ฝ”๋“œ ๋ณต์‚ฌ
42
- COPY . /app
43
-
44
- # 5. Git LFS ์„ค์ • ๋ฐ ๋Œ€์šฉ๋Ÿ‰ ํŒŒ์ผ ๋‹ค์šด๋กœ๋“œ
45
- RUN git lfs install && git lfs pull || true
46
-
47
- # ํ™˜๊ฒฝ๋ณ€์ˆ˜ ์„ค์ •
48
- ENV HOME=/app
49
- ENV STREAMLIT_HOME=/app/.streamlit
50
- RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
51
-
52
- # ์บ์‹œ ๊ฒฝ๋กœ๋„ /tmp๋กœ ํ†ต์ผ
53
- ENV HF_HOME=/tmp/hf-home \
54
- TRANSFORMERS_CACHE=/tmp/hf-cache \
55
- HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
56
- TORCH_HOME=/tmp/torch-cache \
57
- XDG_CACHE_HOME=/tmp/xdg-cache
58
-
59
- ENV OLLAMA_HOST=http://127.0.0.1:11434
60
- EXPOSE 8501
61
- EXPOSE 11434
62
-
63
- # โœ… ๊ฐ„๋‹จํ•˜๊ณ  ์•ˆ์ „ํ•œ CMD
64
- CMD bash -lc '\
65
- export OLLAMA_MODEL=${OLLAMA_MODEL:-gemma2:9b}; \
66
- export OLLAMA_HOME=/tmp/ollama-home; \
67
- export OLLAMA_MODELS=/tmp/ollama-home; \
68
- echo "Starting Ollama with model: $OLLAMA_MODEL"; \
69
- echo "Using Ollama home: $OLLAMA_HOME"; \
70
- mkdir -p /tmp/ollama-home; \
71
- ollama serve & \
72
- for i in {1..120}; do \
73
- if curl -fsS http://127.0.0.1:11434/api/version >/dev/null 2>&1; then \
74
- echo "Ollama server is ready"; \
75
- break; \
76
- fi; \
77
- echo "Waiting for Ollama... ($i/120)"; \
78
- sleep 1; \
79
- done; \
80
- echo "Checking if model exists..."; \
81
- if ! ollama list | grep -q "$OLLAMA_MODEL"; then \
82
- echo "Model not found, pulling: $OLLAMA_MODEL"; \
83
- ollama pull "$OLLAMA_MODEL" || { echo "Failed to pull model"; exit 1; }; \
84
- else \
85
- echo "Model already available: $OLLAMA_MODEL"; \
86
- fi; \
87
- echo "Starting Streamlit app"; \
88
- streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'