Ngene787 commited on
Commit
da03eea
·
1 Parent(s): 1394939

feat: add new app

Browse files
Files changed (6) hide show
  1. README.md +1 -1
  2. app.py +140 -12
  3. app_bak.py +28 -0
  4. requirements.txt +4 -2
  5. stable_diffusion_inference.py +50 -14
  6. utils.py +22 -0
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.31.0
8
- app_file: app.py
9
  pinned: false
10
  license: mit
11
  short_description: 'EEEM068 Spring 2025 Applied Machine Learning Project'
 
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.31.0
8
+ app_file: app_bak.py
9
  pinned: false
10
  license: mit
11
  short_description: 'EEEM068 Spring 2025 Applied Machine Learning Project'
app.py CHANGED
@@ -1,26 +1,154 @@
1
  # -*- coding: UTF-8 -*-
2
  """
3
- @Time : 28/05/2025 10:06
4
  @Author : xiaoguangliang
5
  @File : app.py
6
  @Project : Faice_text2face
7
  """
8
  import gradio as gr
9
- from stable_diffusion_inference import inference
10
 
 
 
11
 
12
- # def greet(name):
13
- # return "Hello " + name + "!!"
14
- #
15
- #
16
- # demo = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- def text2face(prompt):
20
- image = inference(prompt)
21
- return image
 
 
 
 
 
22
 
 
 
 
 
 
 
 
23
 
24
- demo = gr.Interface(fn=text2face, inputs="text", outputs="image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- demo.launch()
 
 
 
1
  # -*- coding: UTF-8 -*-
2
  """
3
+ @Time : 28/05/2025 16:29
4
  @Author : xiaoguangliang
5
  @File : app.py
6
  @Project : Faice_text2face
7
  """
8
  import gradio as gr
 
9
 
10
+ from stable_diffusion_inference import inference, MAX_SEED, MAX_IMAGE_SIZE
11
+ from utils import timer
12
 
13
+ examples = [
14
+ "A capybara wearing a suit holding a sign that reads Hello World",
15
+ "A serene mountain lake at sunset with cherry blossoms floating on the water",
16
+ "A magical crystal dragon with iridescent scales in a glowing forest",
17
+ "A Victorian steampunk teapot with intricate brass gears and rose gold accents",
18
+ "A futuristic neon cityscape with flying cars and holographic billboards",
19
+ "A red panda painter creating a masterpiece with tiny paws in an art studio",
20
+ ]
21
 
22
+ css = """
23
+ body {
24
+ background: linear-gradient(135deg, #f9e2e6 0%, #e8f3fc 50%, #e2f9f2 100%);
25
+ background-attachment: fixed;
26
+ min-height: 100vh;
27
+ }
28
+ #col-container {
29
+ margin: 0 auto;
30
+ max-width: 640px;
31
+ background-color: rgba(255, 255, 255, 0.85);
32
+ border-radius: 16px;
33
+ box-shadow: 0 8px 16px rgba(0, 0, 0, 0.1);
34
+ padding: 24px;
35
+ backdrop-filter: blur(10px);
36
+ }
37
+ .gradio-container {
38
+ background: transparent !important;
39
+ }
40
+ .gr-button-primary {
41
+ background: linear-gradient(90deg, #6b9dfc, #8c6bfc) !important;
42
+ border: none !important;
43
+ transition: all 0.3s ease;
44
+ }
45
+ .gr-button-primary:hover {
46
+ transform: translateY(-2px);
47
+ box-shadow: 0 5px 15px rgba(108, 99, 255, 0.3);
48
+ }
49
+ .gr-form {
50
+ border-radius: 12px;
51
+ background-color: rgba(255, 255, 255, 0.7);
52
+ }
53
+ .gr-accordion {
54
+ border-radius: 12px;
55
+ overflow: hidden;
56
+ }
57
+ h1 {
58
+ background: linear-gradient(90deg, #6b9dfc, #8c6bfc);
59
+ -webkit-background-clip: text;
60
+ -webkit-text-fill-color: transparent;
61
+ font-weight: 800;
62
+ }
63
+ """
64
+
65
+ with gr.Blocks(theme="apriel", css=css) as demo:
66
+ with gr.Column(elem_id="col-container"):
67
+ gr.Markdown(" # TensorArt Stable Diffusion 3.5 Large TurboX")
68
+ gr.Markdown(
69
+ "[8-step distilled turbo model](https://huggingface.co/tensorart/stable-diffusion-3.5-large-TurboX)")
70
+ with gr.Row():
71
+ prompt = gr.Text(
72
+ label="Prompt",
73
+ show_label=False,
74
+ max_lines=1,
75
+ placeholder="Enter your prompt",
76
+ container=False,
77
+ )
78
+
79
+ run_button = gr.Button("Run", scale=0, variant="primary")
80
+
81
+ result = gr.Image(label="Result", show_label=False)
82
+
83
+ with gr.Accordion("Advanced Settings", open=False):
84
+ negative_prompt = gr.Text(
85
+ label="Negative prompt",
86
+ max_lines=1,
87
+ placeholder="Enter a negative prompt",
88
+ )
89
+
90
+ seed = gr.Slider(
91
+ label="Seed",
92
+ minimum=0,
93
+ maximum=MAX_SEED,
94
+ step=1,
95
+ value=0,
96
+ )
97
+
98
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
99
+
100
+ with gr.Row():
101
+ width = gr.Slider(
102
+ label="Width",
103
+ minimum=512,
104
+ maximum=MAX_IMAGE_SIZE,
105
+ step=32,
106
+ value=1024,
107
+ )
108
+
109
+ height = gr.Slider(
110
+ label="Height",
111
+ minimum=512,
112
+ maximum=MAX_IMAGE_SIZE,
113
+ step=32,
114
+ value=1024,
115
+ )
116
 
117
+ with gr.Row():
118
+ guidance_scale = gr.Slider(
119
+ label="Guidance scale",
120
+ minimum=0.0,
121
+ maximum=7.5,
122
+ step=0.1,
123
+ value=1.5,
124
+ )
125
 
126
+ num_inference_steps = gr.Slider(
127
+ label="Number of inference steps",
128
+ minimum=1,
129
+ maximum=50,
130
+ step=1,
131
+ value=8,
132
+ )
133
 
134
+ gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=inference,
135
+ cache_examples=True, cache_mode="lazy")
136
+ gr.on(
137
+ triggers=[run_button.click, prompt.submit],
138
+ fn=inference,
139
+ inputs=[
140
+ prompt,
141
+ negative_prompt,
142
+ seed,
143
+ randomize_seed,
144
+ width,
145
+ height,
146
+ guidance_scale,
147
+ num_inference_steps,
148
+ ],
149
+ outputs=[result, seed],
150
+ )
151
 
152
+ if __name__ == "__main__":
153
+ with timer("All tasks"):
154
+ demo.launch(mcp_server=True)
app_bak.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ @Time : 28/05/2025 10:06
4
+ @Author : xiaoguangliang
5
+ @File : app_bak.py
6
+ @Project : Faice_text2face
7
+ """
8
+ import gradio as gr
9
+ from stable_diffusion_inference import inference
10
+ from utils import timer
11
+
12
+
13
+ # def greet(name):
14
+ # return "Hello " + name + "!!"
15
+ #
16
+ #
17
+ # demo = gr.Interface(fn=greet, inputs="text", outputs="text")
18
+
19
+
20
+ def text2face(prompt):
21
+ image = inference(prompt)
22
+ return image
23
+
24
+
25
+ with timer("All tasks"):
26
+ demo = gr.Interface(fn=text2face, inputs="text", outputs="image")
27
+
28
+ demo.launch()
requirements.txt CHANGED
@@ -21,6 +21,7 @@ huggingface-hub==0.32.2
21
  idna==3.10
22
  importlib_metadata==8.7.0
23
  Jinja2==3.1.6
 
24
  markdown-it-py==3.0.0
25
  MarkupSafe==3.0.2
26
  mdurl==0.1.2
@@ -31,7 +32,7 @@ orjson==3.10.18
31
  packaging==25.0
32
  pandas==2.2.3
33
  pillow==11.2.1
34
- psutil==7.0.0
35
  pydantic==2.11.5
36
  pydantic_core==2.33.2
37
  pydub==0.25.1
@@ -50,6 +51,7 @@ semantic-version==2.10.0
50
  shellingham==1.5.4
51
  six==1.17.0
52
  sniffio==1.3.1
 
53
  starlette==0.46.2
54
  sympy==1.14.0
55
  tokenizers==0.21.1
@@ -66,4 +68,4 @@ tzdata==2025.2
66
  urllib3==2.4.0
67
  uvicorn==0.34.2
68
  websockets==15.0.1
69
- zipp==3.22.0
 
21
  idna==3.10
22
  importlib_metadata==8.7.0
23
  Jinja2==3.1.6
24
+ loguru==0.7.3
25
  markdown-it-py==3.0.0
26
  MarkupSafe==3.0.2
27
  mdurl==0.1.2
 
32
  packaging==25.0
33
  pandas==2.2.3
34
  pillow==11.2.1
35
+ psutil==5.9.8
36
  pydantic==2.11.5
37
  pydantic_core==2.33.2
38
  pydub==0.25.1
 
51
  shellingham==1.5.4
52
  six==1.17.0
53
  sniffio==1.3.1
54
+ spaces==0.36.0
55
  starlette==0.46.2
56
  sympy==1.14.0
57
  tokenizers==0.21.1
 
68
  urllib3==2.4.0
69
  uvicorn==0.34.2
70
  websockets==15.0.1
71
+ zipp==3.22.0
stable_diffusion_inference.py CHANGED
@@ -6,27 +6,63 @@
6
  @Project : Faice_text2face
7
  """
8
  import torch
 
 
9
  from diffusers import StableDiffusionPipeline
10
  from accelerate import Accelerator
 
 
11
 
12
  model_path = 'Ngene787/Faice_text2face'
13
 
 
 
 
14
 
15
- def inference(prompt):
16
- accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1)
17
- pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16,
18
- low_cpu_mem_usage=True)
19
- # pipe = pipe.to("cuda")
20
- pipe = accelerator.prepare(pipe)
21
 
22
- # Enable memory-efficient attention
23
- # pipe.enable_xformers_memory_efficient_attention()
 
24
 
25
- # Enable attention slicing
26
- pipe.enable_attention_slicing()
27
 
28
- # Enable VAE slicing
29
- pipe.enable_vae_slicing()
30
 
31
- image = pipe(prompt).images[0]
32
- return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  @Project : Faice_text2face
7
  """
8
  import torch
9
+ import random
10
+ import numpy as np
11
  from diffusers import StableDiffusionPipeline
12
  from accelerate import Accelerator
13
+ import gradio as gr
14
+ import spaces
15
 
16
  model_path = 'Ngene787/Faice_text2face'
17
 
18
+ accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1)
19
+ pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16,
20
+ low_cpu_mem_usage=True)
21
 
22
+ device = "cuda" if torch.cuda.is_available() else "cpu"
23
+ if torch.cuda.is_available():
24
+ torch_dtype = torch.float16
25
+ else:
26
+ torch_dtype = torch.float32
27
+ pipe = pipe.to(device)
28
 
29
+ pipe = accelerator.prepare(pipe)
30
+ # Enable memory-efficient attention
31
+ # pipe.enable_xformers_memory_efficient_attention()
32
 
33
+ # Enable attention slicing
34
+ pipe.enable_attention_slicing()
35
 
36
+ # Enable VAE slicing
37
+ pipe.enable_vae_slicing()
38
 
39
+ MAX_SEED = np.iinfo(np.int32).max
40
+ MAX_IMAGE_SIZE = 256
41
+
42
+
43
+ @spaces.GPU(duration=65)
44
+ def inference(prompt,
45
+ negative_prompt="",
46
+ seed=42,
47
+ randomize_seed=False,
48
+ width=MAX_IMAGE_SIZE,
49
+ height=MAX_IMAGE_SIZE,
50
+ guidance_scale=1.5,
51
+ num_inference_steps=8,
52
+ progress=gr.Progress(track_tqdm=True), ):
53
+ if randomize_seed:
54
+ seed = random.randint(0, MAX_SEED)
55
+
56
+ generator = torch.Generator().manual_seed(seed)
57
+
58
+ image = pipe(
59
+ prompt=prompt,
60
+ negative_prompt=negative_prompt,
61
+ guidance_scale=guidance_scale,
62
+ num_inference_steps=num_inference_steps,
63
+ width=width,
64
+ height=height,
65
+ generator=generator,
66
+ ).images[0]
67
+ # image = pipe(prompt).images[0]
68
+ return image, seed
utils.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ @Time : 28/05/2025 16:17
4
+ @Author : xiaoguangliang
5
+ @File : utils.py
6
+ @Project : Faice_text2face
7
+ """
8
+ import time
9
+ from contextlib import contextmanager
10
+
11
+
12
+ @contextmanager
13
+ def timer(msg="all tasks"):
14
+ """
15
+ Calculate the time of running
16
+ @return:
17
+ """
18
+ startTime = time.time()
19
+ yield
20
+ endTime = time.time()
21
+ # print(f'The time cost for {msg}:{round(1000.0 * (endTime - startTime), 2)}, ms')
22
+ print(f"The time cost for {msg}:", round((endTime - startTime) / 60, 2), "minutes")