guydffdsdsfd commited on
Commit
0bd5253
·
verified ·
1 Parent(s): 79787bf

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +85 -26
Dockerfile CHANGED
@@ -1,34 +1,93 @@
1
- # Use an official PyTorch image with CUDA support
2
- FROM pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime
3
-
4
- # 1. Fix the "Time Zone" hang: Set to non-interactive and default to UTC
5
- ENV DEBIAN_FRONTEND=noninteractive \
6
- TZ=Etc/UTC \
7
- PYTHONUNBUFFERED=1 \
8
- GRADIO_ALLOW_FLAGGING=never \
9
- GRADIO_SERVER_NAME="0.0.0.0" \
10
- HOME=/home/user
11
-
12
- # 2. Install system dependencies (tzdata is installed silently now)
13
  RUN apt-get update && apt-get install -y \
14
- tzdata \
15
  git \
16
- libgl1-mesa-glx \
17
  libglib2.0-0 \
18
  && rm -rf /var/lib/apt/lists/*
19
 
20
- # 3. Set up the Hugging Face User (Required for Spaces)
21
- RUN useradd -m -u 1000 user
22
- USER user
23
- WORKDIR $HOME/app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- # 4. Install Python dependencies
26
- COPY --chown=user requirements.txt .
27
- RUN pip install --no-cache-dir -r requirements.txt
28
 
29
- # 5. Copy the rest of your code
30
- COPY --chown=user . .
31
 
32
- # 6. Make start script executable and launch
33
- RUN chmod +x start.sh
34
- CMD ["./start.sh"]
 
1
+ FROM python:3.10-slim
2
+
3
+ # System deps (torch + diffusers need these or they sulk)
 
 
 
 
 
 
 
 
 
4
  RUN apt-get update && apt-get install -y \
 
5
  git \
6
+ libgl1 \
7
  libglib2.0-0 \
8
  && rm -rf /var/lib/apt/lists/*
9
 
10
+ # Python deps
11
+ RUN pip install --no-cache-dir \
12
+ torch \
13
+ torchvision \
14
+ torchaudio \
15
+ diffusers \
16
+ transformers \
17
+ accelerate \
18
+ safetensors \
19
+ flask \
20
+ flask-cors \
21
+ pillow
22
+
23
+ # Environment
24
+ ENV HOME=/home/sd
25
+ ENV HF_HOME=/home/sd/.cache
26
+ ENV TRANSFORMERS_CACHE=/home/sd/.cache
27
+ ENV DIFFUSERS_CACHE=/home/sd/.cache
28
+
29
+ # Writable dirs (HF Spaces is picky)
30
+ RUN mkdir -p /home/sd && chmod -R 777 /home/sd
31
+
32
+ # -------- Flask Stable Diffusion API --------
33
+ RUN cat <<'EOF' > /app.py
34
+ from flask import Flask, request, jsonify, send_file
35
+ from flask_cors import CORS
36
+ from diffusers import StableDiffusionPipeline
37
+ import torch
38
+ from io import BytesIO
39
+ import os
40
+
41
+ app = Flask(__name__)
42
+ CORS(app)
43
+
44
+ MODEL_ID = "runwayml/stable-diffusion-v1-5"
45
+
46
+ pipe = StableDiffusionPipeline.from_pretrained(
47
+ MODEL_ID,
48
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
49
+ )
50
+ pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
51
+
52
+ @app.route("/", methods=["GET"])
53
+ def health():
54
+ return "Stable Diffusion API Running", 200
55
+
56
+ @app.route("/api/generate", methods=["POST"])
57
+ def generate():
58
+ data = request.get_json()
59
+ prompt = data.get("prompt", "")
60
+ steps = int(data.get("steps", 25))
61
+ guidance = float(data.get("guidance", 7.5))
62
+
63
+ if not prompt:
64
+ return jsonify({"error": "No prompt provided"}), 400
65
+
66
+ image = pipe(
67
+ prompt=prompt,
68
+ num_inference_steps=steps,
69
+ guidance_scale=guidance
70
+ ).images[0]
71
+
72
+ buf = BytesIO()
73
+ image.save(buf, format="PNG")
74
+ buf.seek(0)
75
+
76
+ return send_file(buf, mimetype="image/png")
77
+
78
+ if __name__ == "__main__":
79
+ app.run(host="0.0.0.0", port=7860)
80
+ EOF
81
+
82
+ # -------- Startup Script --------
83
+ RUN cat <<'EOF' > /start.sh
84
+ #!/bin/bash
85
+ echo "Starting Stable Diffusion API..."
86
+ python3 /app.py
87
+ EOF
88
 
89
+ RUN chmod +x /start.sh
 
 
90
 
91
+ EXPOSE 7860
 
92
 
93
+ ENTRYPOINT ["/bin/bash", "/start.sh"]