File size: 1,816 Bytes
0e74d35 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 | FROM python:3.11-slim
ENV DEBIAN_FRONTEND=noninteractive
ENV TZ=Etc/UTC
ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1
# System dependencies for C++ extension compilation
RUN apt-get update && apt-get install -y \
git \
patch \
build-essential \
g++ \
cmake \
libgomp1 \
ninja-build \
&& rm -rf /var/lib/apt/lists/*
RUN pip install --upgrade pip
WORKDIR /testbed
# Full clone of the `bench` branch — every dataset base_commit lives on
# this branch, and a full (non-treeless, non-shallow) clone guarantees
# their trees are all locally available. A treeless or shallow clone
# would force git to lazy-fetch on the harness's per-task
# `git reset --hard <base_commit>`, and that fetch is flaky enough in
# CI sandboxes to silently leave the working tree on the wrong commit.
RUN git clone --branch bench https://github.com/bobbyyyan/scorch.git .
# Pre-create venv with CPU-only PyTorch so setup.sh's `pip install -r
# requirements.txt` (unpinned `torch`) sees the requirement as already
# satisfied. The default Linux torch wheel pulls in ~3.8GiB of CUDA libs
# (nvidia/, triton/, libtorch_cuda.so) that the 2-CPU Daytona sandboxes
# never use — the CPU wheel skips all of that.
RUN python3 -m venv venv && \
venv/bin/pip install --no-cache-dir --upgrade pip && \
venv/bin/pip install --no-cache-dir --index-url https://download.pytorch.org/whl/cpu torch
# Run setup.sh to install remaining requirements and build the C++
# extension; then drop the pip cache (~2.7GiB, only needed during install).
RUN bash setup.sh && \
rm -rf /root/.cache
# Activate venv for subsequent commands
ENV PATH="/testbed/venv/bin:$PATH"
ENV VIRTUAL_ENV="/testbed/venv"
COPY run_tests.sh /testbed/run_tests.sh
RUN chmod +x /testbed/run_tests.sh
CMD ["/testbed/run_tests.sh"]
|