STOUT_V2 / Dockerfile
Jaimodiji's picture
Update Dockerfile
5bab9d4 verified
# 1. Base Image
FROM python:3.9-slim
# 2. System dependencies (Added locales and libstdc++6)
RUN apt-get update && apt-get install -y --no-install-recommends \
openjdk-21-jre-headless \
libxrender1 \
libxext6 \
libstdc++6 \
locales \
gcc \
python3-dev \
&& rm -rf /var/lib/apt/lists/*
# Generate locales to prevent character conversion crashes
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
locale-gen
ENV LANG=en_US.UTF-8 \
LANGUAGE=en_US:en \
LC_ALL=en_US.UTF-8
# 3. Environment Variables (Critical Fixes for SIGSEGV)
ENV JAVA_HOME=/usr/lib/jvm/java-21-openjdk-amd64 \
PATH=$JAVA_HOME/bin:$PATH \
TF_ENABLE_ONEDNN_OPTS=1 \
TF_CPP_MIN_LOG_LEVEL=2 \
# Forces C++ libs to resolve early and avoid conflicts
LD_BIND_NOW=1 \
# Force the standard C++ lib to be pre-loaded
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6 \
HOME=/home/user
# 4. Hugging Face User Setup
RUN useradd -m -u 1000 user
USER user
WORKDIR /home/user/app
ENV PATH=/home/user/.local/bin:$PATH
# 5. Install Python Stack
RUN pip install --no-cache-dir wheel && \
pip install --no-cache-dir \
intel-tensorflow \
fastapi uvicorn gunicorn STOUT-pypi rdkit-pypi "numpy<2.0"
# 6. Create app.py
RUN cat <<EOF > app.py
import os
import uvicorn
from fastapi import FastAPI, HTTPException, Body
from STOUT import translate_forward, translate_reverse
from rdkit import Chem
from typing import List
app = FastAPI(title="STOUT V2 SIGSEGV-Fixed API")
# Helper to ensure RDKit doesn't break
def clean_smi(s):
try:
m = Chem.MolFromSmiles(s)
return Chem.MolToSmiles(m, isomericSmiles=True) if m else None
except: return None
@app.get("/")
def root(): return {"message": "STOUT V2 - Stable Build"}
@app.api_route("/smiles_to_iupac", methods=["GET", "POST"])
def s2i(smiles: str):
s = clean_smi(smiles)
if not s: raise HTTPException(400, "Invalid SMILES")
return {"iupac": translate_forward(s)}
@app.get("/health")
def health(): return {"status": "healthy"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)
EOF
# 7. Expose Port and Start
EXPOSE 7860
# Reduce workers to 2 or 3 if memory crashes persist; 4 is the limit for 32GB
CMD ["gunicorn", "app:app", \
"--workers", "3", \
"--worker-class", "uvicorn.workers.UvicornWorker", \
"--bind", "0.0.0.0:7860", \
"--timeout", "300"]