wolfofbackstreet commited on
Commit
7f222d2
·
verified ·
1 Parent(s): 3321200

Upload 4 files

Browse files
Files changed (4) hide show
  1. .gitignore +1 -0
  2. Dockerfile +32 -0
  3. app.py +56 -0
  4. requirements.txt +8 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /.venv/
Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an Ubuntu-based image with Python 3.10
2
+ FROM python:3.10-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ ffmpeg \
10
+ libsm6 \
11
+ libxext6 \
12
+ && rm -rf /var/lib/apt/lists/*
13
+
14
+ # Install UV for faster dependency installation
15
+ RUN pip install uv
16
+
17
+ # Copy requirements and install
18
+ COPY requirements.txt .
19
+ RUN uv pip install --system -r requirements.txt
20
+
21
+ # Copy application code
22
+ COPY app.py .
23
+
24
+ # Download pre-converted OpenVINO model
25
+ RUN python -c "from optimum.intel.openvino import OVStableDiffusionPipeline; \
26
+ OVStableDiffusionPipeline.from_pretrained('rupeshs/hyper-sd-sdxl-1-step-openvino-int8', ov_config={'CACHE_DIR': ''})"
27
+
28
+ # Expose port 5000
29
+ EXPOSE 5000
30
+
31
+ # Command to run the Flask app
32
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from flask import Flask, request, jsonify, send_file
3
+ from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipeline
4
+ from PIL import Image
5
+ import io
6
+ import torch
7
+
8
+ app = Flask(__name__)
9
+
10
+ # Load the pre-converted OpenVINO SDXL model
11
+ model_id = "rupeshs/hyper-sd-sdxl-1-step-openvino-int8"
12
+ pipeline = OVStableDiffusionPipeline.from_pretrained(
13
+ model_id,
14
+ ov_config={"CACHE_DIR": ""},
15
+ device="CPU"
16
+ )
17
+
18
+ # Ensure Tiny Auto Encoder is enabled to reduce memory usage
19
+ pipeline.enable_tiny_auto_encoder()
20
+
21
+ @app.route('/generate', methods=['POST'])
22
+ def generate_image():
23
+ try:
24
+ # Get prompt from request
25
+ data = request.get_json()
26
+ prompt = data.get('prompt', 'A futuristic cityscape at sunset, cyberpunk style, 8k')
27
+ width = data.get('width', 512)
28
+ height = data.get('height', 512)
29
+ num_inference_steps = data.get('num_inference_steps', 1)
30
+ guidance_scale = data.get('guidance_scale', 1.0)
31
+
32
+ # Generate image
33
+ image = pipeline(
34
+ prompt=prompt,
35
+ width=width,
36
+ height=height,
37
+ num_inference_steps=num_inference_steps,
38
+ guidance_scale=guidance_scale
39
+ ).images[0]
40
+
41
+ # Save image to a bytes buffer
42
+ img_io = io.BytesIO()
43
+ image.save(img_io, 'PNG')
44
+ img_io.seek(0)
45
+
46
+ return send_file(
47
+ img_io,
48
+ mimetype='image/png',
49
+ as_attachment=True,
50
+ download_name='generated_image.png'
51
+ )
52
+ except Exception as e:
53
+ return jsonify({'error': str(e)}), 500
54
+
55
+ if __name__ == '__main__':
56
+ app.run(host='0.0.0.0', port=5000)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ flask
2
+ optimum[openvino]
3
+ diffusers
4
+ transformers
5
+ accelerate
6
+ opencv-python-headless
7
+ torch
8
+ pillow