File size: 1,217 Bytes
90084cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59

FROM python:3.10-slim


ENV PYTHONUNBUFFERED=1
ENV HF_HOME=/data/huggingface
ENV TRANSFORMERS_CACHE=/data/huggingface
ENV TORCH_HOME=/data/torch
ENV XDG_CACHE_HOME=/data/cache


RUN apt-get update && apt-get install -y \
    git \
    curl \
    ca-certificates \
    && rm -rf /var/lib/apt/lists/*


WORKDIR /app


COPY requirements.txt .
RUN pip install --upgrade pip \
    && pip install --no-cache-dir -r requirements.txt


RUN python - <<EOF
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoImageProcessor, AutoModelForImageClassification
import open_clip


QWEN_MODEL_ID = "Qwen/Qwen2.5-1.5B-Instruct"
AutoTokenizer.from_pretrained(QWEN_MODEL_ID, trust_remote_code=True)
AutoModelForCausalLM.from_pretrained(
    QWEN_MODEL_ID,
    trust_remote_code=True,
    device_map="cpu"
)


PLANT_MODEL_ID = "drrobot9/BIONEXUS_PLANT_CLASSIFICATION"
AutoImageProcessor.from_pretrained(PLANT_MODEL_ID)
AutoModelForImageClassification.from_pretrained(PLANT_MODEL_ID)


open_clip.create_model_and_transforms("hf-hub:imageomics/bioclip")

print("Models cached successfully for Hugging Face Spaces.")
EOF


COPY app ./app


EXPOSE 7860


CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]