Matthias Bachfischer commited on
Commit
75c1e5a
·
1 Parent(s): ea10bb5

add deployment files for hf spaces

Browse files
Files changed (2) hide show
  1. app.py +115 -1
  2. requirements.txt +5 -3
app.py CHANGED
@@ -1,3 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- gr.load("models/MatthiasBachfischer/open-engineering-orcas").launch()
 
 
1
+ import spaces
2
+ import argparse
3
+ import os
4
+ import time
5
+ from os import path
6
+ from safetensors.torch import load_file
7
+ from huggingface_hub import hf_hub_download
8
+
9
+ cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
10
+ os.environ["TRANSFORMERS_CACHE"] = cache_path
11
+ os.environ["HF_HUB_CACHE"] = cache_path
12
+ os.environ["HF_HOME"] = cache_path
13
+
14
  import gradio as gr
15
+ import torch
16
+ from diffusers import FluxPipeline
17
+
18
+ torch.backends.cuda.matmul.allow_tf32 = True
19
+
20
+ class timer:
21
+ def __init__(self, method_name="timed process"):
22
+ self.method = method_name
23
+ def __enter__(self):
24
+ self.start = time.time()
25
+ print(f"{self.method} starts")
26
+ def __exit__(self, exc_type, exc_val, exc_tb):
27
+ end = time.time()
28
+ print(f"{self.method} took {str(round(end - self.start, 2))}s")
29
+
30
+ if not path.exists(cache_path):
31
+ os.makedirs(cache_path, exist_ok=True)
32
+
33
+ pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
34
+ pipe.load_lora_weights(hf_hub_download("MatthiasBachfischer/open-engineering-orcas", "open-engineering-orcas.safetensors"))
35
+ pipe.fuse_lora(lora_scale=1.0)
36
+ pipe.to(device="cuda", dtype=torch.bfloat16)
37
+
38
+ theme = gr.themes.Base(
39
+ primary_hue=gr.themes.Color(c100="#f4e5dc", c200="#f6c1b0", c300="#f59a86", c400="#f05b48", c50="#fef2f2", c500="#ea1b0a", c600="#c41708", c700="#9d1207", c800="#991b1b", c900="#7f1d1d", c950="#6c1e1e"),
40
+ font=[gr.themes.GoogleFont('Arial'), 'ui-sans-serif', 'system-ui', 'sans-serif'],
41
+ ).set(
42
+ button_primary_background_fill='*primary_500',
43
+ button_primary_text_color='*neutral_50'
44
+ )
45
+
46
+ with gr.Blocks(theme=theme) as demo:
47
+ gr.Markdown(
48
+ """
49
+ <div style="text-align: center; max-width: 900px; margin: 0 auto;">
50
+ <h1 style="font-size: 2.5rem; font-weight: 700; margin-bottom: 1rem; display: contents;">E.ON Open Engineering Orcas</h1>
51
+ <p style="font-size: 1rem; margin-bottom: 1.5rem;">This space hosts a fine-tuned <a href="https://huggingface.co/black-forest-labs/FLUX.1-dev">FLUX.1 dev</a> LoRA model to create <a href="https://github.com/jansche/open-engineering-orcas">Open Engineering Orca mascots</a>.</p>
52
+ </div>
53
+ """
54
+ )
55
+
56
+ with gr.Row():
57
+ with gr.Column(scale=3):
58
+ with gr.Group():
59
+ prompt = gr.Textbox(
60
+ label="Your orca description",
61
+ placeholder="E.g., orca with a backpack",
62
+ lines=3
63
+ )
64
+
65
+ with gr.Accordion("Advanced Settings", open=False):
66
+ with gr.Group():
67
+ with gr.Row():
68
+ height = gr.Slider(label="Height", minimum=256, maximum=1152, step=64, value=1024)
69
+ width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=1024)
70
+
71
+ with gr.Row():
72
+ steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8)
73
+ scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=5.0, step=0.1, value=3.5)
74
+
75
+ seed = gr.Number(label="Seed (for reproducibility)", value=3413, precision=0)
76
+
77
+ generate_btn = gr.Button("Generate Orca", variant="primary", scale=1)
78
+
79
+ with gr.Column(scale=4):
80
+ output = gr.Image(label="Your Generated Image")
81
+
82
+ gr.Markdown(
83
+ """
84
+ <div style="max-width: 650px; margin: 2rem auto; padding: 1rem; border-radius: 10px; background-color: #f0f0f0;">
85
+ <h2 style="font-size: 1.5rem; margin-bottom: 1rem;">How to Use</h2>
86
+ <ol style="padding-left: 1.5rem;">
87
+ <li>Enter a detailed description of the orca you want to create.</li>
88
+ <li>Adjust advanced settings if desired (tap to expand).</li>
89
+ <li>Tap "Generate Image" and wait for your creation!</li>
90
+ </ol>
91
+ <p style="margin-top: 1rem; font-style: italic;">Tip: Be specific in your description for best results!</p>
92
+ </div>
93
+ """
94
+ )
95
+
96
+ @spaces.GPU
97
+ def process_image(height, width, steps, scales, prompt, seed):
98
+ global pipe
99
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
100
+ return pipe(
101
+ prompt=[prompt],
102
+ generator=torch.Generator().manual_seed(int(seed)),
103
+ num_inference_steps=int(steps),
104
+ guidance_scale=float(scales),
105
+ height=int(height),
106
+ width=int(width),
107
+ max_sequence_length=256
108
+ ).images[0]
109
+
110
+ generate_btn.click(
111
+ process_image,
112
+ inputs=[height, width, steps, scales, prompt, seed],
113
+ outputs=output
114
+ )
115
 
116
+ if __name__ == "__main__":
117
+ demo.launch()
requirements.txt CHANGED
@@ -1,6 +1,8 @@
1
  accelerate
2
- diffusers
3
  invisible_watermark
4
  torch
5
- transformers
6
- xformers
 
 
 
1
  accelerate
2
+ diffusers==0.30.0
3
  invisible_watermark
4
  torch
5
+ transformers==4.43.3
6
+ xformers
7
+ sentencepiece
8
+ peft