Ngene787 commited on
Commit
be0b220
·
1 Parent(s): 562f182

feat: add new app

Browse files
Files changed (2) hide show
  1. app.py +8 -15
  2. stable_diffusion_inference.py +2 -0
app.py CHANGED
@@ -6,18 +6,10 @@
6
  @Project : Faice_text2face
7
  """
8
  import gradio as gr
9
- import spaces
10
 
11
  from stable_diffusion_inference import inference, MAX_SEED, MAX_IMAGE_SIZE
12
  from utils import timer
13
 
14
-
15
- @spaces.GPU(duration=65)
16
- def text2face(prompt):
17
- image, seed = inference(prompt)
18
- return image, seed
19
-
20
-
21
  examples = [
22
  "Portrait of a young woman with long wavy hair, soft studio lighting, high contrast, 4k resolution, professional headshot",
23
  "Close-up of a smiling man with sharp jawline, cinematic lighting, shallow depth of field, bokeh background",
@@ -69,10 +61,11 @@ h1 {
69
 
70
  with gr.Blocks(theme="apriel", css=css) as demo:
71
  with gr.Column(elem_id="col-container"):
72
- gr.Markdown(" # TensorArt Stable Diffusion 3.5 Large TurboX")
73
- gr.Markdown(
74
- "[8-step distilled turbo model](https://huggingface.co/tensorart/stable-diffusion-3.5-large"
75
- "-TurboX)")
 
76
  with gr.Row():
77
  prompt = gr.Text(
78
  label="Prompt",
@@ -137,11 +130,11 @@ with gr.Blocks(theme="apriel", css=css) as demo:
137
  value=8,
138
  )
139
 
140
- gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=text2face,
141
  cache_examples=True, cache_mode="lazy")
142
  gr.on(
143
  triggers=[run_button.click, prompt.submit],
144
- fn=text2face,
145
  inputs=[
146
  prompt,
147
  negative_prompt,
@@ -158,4 +151,4 @@ with gr.Blocks(theme="apriel", css=css) as demo:
158
  if __name__ == "__main__":
159
  with timer("All tasks"):
160
  # demo.launch(mcp_server=True)
161
- demo.launch()
 
6
  @Project : Faice_text2face
7
  """
8
  import gradio as gr
 
9
 
10
  from stable_diffusion_inference import inference, MAX_SEED, MAX_IMAGE_SIZE
11
  from utils import timer
12
 
 
 
 
 
 
 
 
13
  examples = [
14
  "Portrait of a young woman with long wavy hair, soft studio lighting, high contrast, 4k resolution, professional headshot",
15
  "Close-up of a smiling man with sharp jawline, cinematic lighting, shallow depth of field, bokeh background",
 
61
 
62
  with gr.Blocks(theme="apriel", css=css) as demo:
63
  with gr.Column(elem_id="col-container"):
64
+ gr.Markdown(" # Fiace")
65
+ gr.Markdown(" ## Text2Face: Human Faces Generation with Diffusion Models")
66
+ # gr.Markdown(
67
+ # "[8-step distilled turbo model](https://huggingface.co/tensorart/stable-diffusion-3.5-large"
68
+ # "-TurboX)")
69
  with gr.Row():
70
  prompt = gr.Text(
71
  label="Prompt",
 
130
  value=8,
131
  )
132
 
133
+ gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=inference,
134
  cache_examples=True, cache_mode="lazy")
135
  gr.on(
136
  triggers=[run_button.click, prompt.submit],
137
+ fn=inference,
138
  inputs=[
139
  prompt,
140
  negative_prompt,
 
151
  if __name__ == "__main__":
152
  with timer("All tasks"):
153
  # demo.launch(mcp_server=True)
154
+ demo.launch(share=True)
stable_diffusion_inference.py CHANGED
@@ -11,6 +11,7 @@ import numpy as np
11
  from diffusers import StableDiffusionPipeline
12
  from accelerate import Accelerator
13
  import gradio as gr
 
14
 
15
  from loguru import logger
16
 
@@ -42,6 +43,7 @@ MAX_SEED = np.iinfo(np.int32).max
42
  MAX_IMAGE_SIZE = 256
43
 
44
 
 
45
  def inference(prompt,
46
  negative_prompt="",
47
  seed=42,
 
11
  from diffusers import StableDiffusionPipeline
12
  from accelerate import Accelerator
13
  import gradio as gr
14
+ import spaces
15
 
16
  from loguru import logger
17
 
 
43
  MAX_IMAGE_SIZE = 256
44
 
45
 
46
+ @spaces.GPU(duration=65)
47
  def inference(prompt,
48
  negative_prompt="",
49
  seed=42,