miittnnss commited on
Commit
f2baa67
·
1 Parent(s): 6d8f5ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -31
app.py CHANGED
@@ -1,34 +1,8 @@
1
  import gradio as gr
2
- from PIL import Image
3
- import torch
4
- import torch.cuda.amp as amp
5
- from diffusers import DiffusionPipeline
6
 
7
- device = torch.device("cpu") # Default to CPU device
8
- if torch.cuda.is_available():
9
- device = torch.device("cuda")
10
-
11
- # Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
12
- pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
13
- pipeline.load_lora_weights("miittnnss/miittnnss-lora")
14
- pipeline.to(device)
15
-
16
- def generate(prompt):
17
- with torch.no_grad(), amp.autocast(enabled=device != torch.device("cpu")):
18
- image = pipeline(prompt, guidance_scale=8.5)["sample"][0]
19
-
20
- image.save('generatedimage.png')
21
- return image
22
-
23
- def predict(prompt):
24
- image = generate(prompt)
25
- return image
26
-
27
- iface = gr.Interface(
28
- fn=predict,
29
  title="Miittnnss LoRA Diffusion",
30
- inputs="text",
31
- outputs="image",
32
- capture_session=True,
33
- )
34
- iface.launch()
 
1
  import gradio as gr
 
 
 
 
2
 
3
+ gr.load(
4
+ "models/miittnnss/miittnnss-lora",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  title="Miittnnss LoRA Diffusion",
6
+ description="Type any text prompt to get started.",
7
+ theme="soft"
8
+ ).launch()