Update app.py
Browse files
app.py
CHANGED
|
@@ -2,12 +2,12 @@ import gradio as gr
|
|
| 2 |
import numpy as np
|
| 3 |
import random
|
| 4 |
|
| 5 |
-
|
| 6 |
from diffusers import DiffusionPipeline
|
| 7 |
import torch
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
-
model_repo_id = "
|
| 11 |
|
| 12 |
if torch.cuda.is_available():
|
| 13 |
torch_dtype = torch.float16
|
|
@@ -21,7 +21,7 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
| 21 |
MAX_IMAGE_SIZE = 1024
|
| 22 |
|
| 23 |
|
| 24 |
-
|
| 25 |
def infer(
|
| 26 |
prompt,
|
| 27 |
negative_prompt,
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
import random
|
| 4 |
|
| 5 |
+
import spaces #[uncomment to use ZeroGPU]
|
| 6 |
from diffusers import DiffusionPipeline
|
| 7 |
import torch
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
+
model_repo_id = "Alpha-VLLM/Lumina-Image-2.0" # Replace to the model you would like to use
|
| 11 |
|
| 12 |
if torch.cuda.is_available():
|
| 13 |
torch_dtype = torch.float16
|
|
|
|
| 21 |
MAX_IMAGE_SIZE = 1024
|
| 22 |
|
| 23 |
|
| 24 |
+
@spaces.GPU #[uncomment to use ZeroGPU]
|
| 25 |
def infer(
|
| 26 |
prompt,
|
| 27 |
negative_prompt,
|