File size: 1,593 Bytes
283db93 8014003 283db93 b23bd14 612d958 283db93 ddee0a9 dd78665 ddee0a9 612d958 d6cc7b7 e6382cf 612d958 ddee0a9 b23bd14 612d958 283db93 ddee0a9 283db93 ddee0a9 283db93 ddee0a9 283db93 612d958 b23bd14 283db93 612d958 14d4191 ddee0a9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
# 1. Install core utilities
RUN apt-get update && apt-get install -y \
python3 \
python3-pip \
sudo \
wget \
curl \
git \
vim \
nano \
htop \
net-tools \
iputils-ping \
build-essential \
openssh-server \
pciutils \
&& rm -rf /var/lib/apt/lists/*
# 2. Install modern Node.js (v20)
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
apt-get install -y nodejs
# 3. Install localtunnel
RUN npm install -g localtunnel
# 4. Install Ollama manually from GitHub Releases (Bypasses bot protection)
RUN curl -L https://github.com/ollama/ollama/releases/latest/download/ollama-linux-amd64.tgz -o ollama.tgz && \
tar -C /usr -xzf ollama.tgz && \
rm ollama.tgz
# 5. Pre-install the Qwen model during the Docker build process
RUN nohup bash -c "ollama serve &" && \
sleep 5 && \
ollama pull qwen2.5-coder:3b
# 6. Configure SSH for root access
RUN mkdir /var/run/sshd && \
echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config && \
echo 'PasswordAuthentication yes' >> /etc/ssh/sshd_config && \
echo "root:huggingface" | chpasswd
WORKDIR /app
# 7. Copy and install Python requirements
COPY requirements.txt .
RUN pip3 install --no-cache-dir -r requirements.txt
# 8. Copy the Python application
COPY app.py .
# 9. Expose ports
EXPOSE 7860
EXPOSE 11434
EXPOSE 22
USER root
# 10. Start Ollama, SSH, and the FastAPI app on port 7860
CMD ["bash", "-c", "ollama serve & /usr/sbin/sshd & sleep 5 && uvicorn app:app --host 0.0.0.0 --port 7860"]
|