FROM ubuntu:22.04 ENV DEBIAN_FRONTEND=noninteractive # 1. Install core utilities RUN apt-get update && apt-get install -y \ python3 \ python3-pip \ sudo \ wget \ curl \ git \ vim \ nano \ htop \ net-tools \ iputils-ping \ build-essential \ openssh-server \ pciutils \ && rm -rf /var/lib/apt/lists/* # 2. Install modern Node.js (v20) RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \ apt-get install -y nodejs # 3. Install localtunnel RUN npm install -g localtunnel # 4. Install Ollama manually from GitHub Releases (Bypasses bot protection) RUN curl -L https://github.com/ollama/ollama/releases/latest/download/ollama-linux-amd64.tgz -o ollama.tgz && \ tar -C /usr -xzf ollama.tgz && \ rm ollama.tgz # 5. Pre-install the Qwen model during the Docker build process RUN nohup bash -c "ollama serve &" && \ sleep 5 && \ ollama pull qwen2.5-coder:3b # 6. Configure SSH for root access RUN mkdir /var/run/sshd && \ echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config && \ echo 'PasswordAuthentication yes' >> /etc/ssh/sshd_config && \ echo "root:huggingface" | chpasswd WORKDIR /app # 7. Copy and install Python requirements COPY requirements.txt . RUN pip3 install --no-cache-dir -r requirements.txt # 8. Copy the Python application COPY app.py . # 9. Expose ports EXPOSE 7860 EXPOSE 11434 EXPOSE 22 USER root # 10. Start Ollama, SSH, and the FastAPI app on port 7860 CMD ["bash", "-c", "ollama serve & /usr/sbin/sshd & sleep 5 && uvicorn app:app --host 0.0.0.0 --port 7860"]