iresidentevil commited on
Commit
8c1c279
·
verified ·
1 Parent(s): 553b145

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -2,13 +2,12 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
 
5
- # import spaces #[uncomment to use ZeroGPU]
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
- torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
12
 
13
  MODEL_OPTIONS = [
14
  ("stabilityai/sdxl-turbo", "SDXL Turbo (Быстро)"),
@@ -31,13 +30,13 @@ def load_pipelines():
31
  # SDXL Turbo
32
  mid = "stabilityai/sdxl-turbo"
33
  pipe = DiffusionPipeline.from_pretrained(mid, torch_dtype=torch_dtype)
34
- pipe = pipe.to(device)
35
  PIPELINES[mid] = pipe
36
 
37
  # SD v1-4
38
  mid = "CompVis/stable-diffusion-v1-4"
39
  pipe = DiffusionPipeline.from_pretrained(mid, torch_dtype=torch_dtype)
40
- pipe = pipe.to(device)
41
  PIPELINES[mid] = pipe
42
 
43
  # # Qwen-Image
@@ -51,9 +50,9 @@ load_pipelines()
51
 
52
 
53
 
54
- # @spaces.GPU #[uncomment to use ZeroGPU]
55
  def infer(
56
- model_id, # Добавил
57
  prompt,
58
  negative_prompt,
59
  seed,
@@ -69,7 +68,8 @@ def infer(
69
 
70
  generator = torch.Generator().manual_seed(seed)
71
 
72
- pipe = PIPELINES[model_id]
 
73
 
74
  image = pipe(
75
  prompt=prompt,
 
2
  import numpy as np
3
  import random
4
 
5
+ import spaces #[uncomment to use ZeroGPU]
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
11
 
12
  MODEL_OPTIONS = [
13
  ("stabilityai/sdxl-turbo", "SDXL Turbo (Быстро)"),
 
30
  # SDXL Turbo
31
  mid = "stabilityai/sdxl-turbo"
32
  pipe = DiffusionPipeline.from_pretrained(mid, torch_dtype=torch_dtype)
33
+ # pipe = pipe.to(device)
34
  PIPELINES[mid] = pipe
35
 
36
  # SD v1-4
37
  mid = "CompVis/stable-diffusion-v1-4"
38
  pipe = DiffusionPipeline.from_pretrained(mid, torch_dtype=torch_dtype)
39
+ # pipe = pipe.to(device)
40
  PIPELINES[mid] = pipe
41
 
42
  # # Qwen-Image
 
50
 
51
 
52
 
53
+ @spaces.GPU #[uncomment to use ZeroGPU]
54
  def infer(
55
+ model_id,
56
  prompt,
57
  negative_prompt,
58
  seed,
 
68
 
69
  generator = torch.Generator().manual_seed(seed)
70
 
71
+ pipe = PIPELINES[model_id]
72
+ pipe = pipe.to(device)
73
 
74
  image = pipe(
75
  prompt=prompt,