Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,16 +11,16 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
| 11 |
device = "cuda"
|
| 12 |
dtype = torch.float16
|
| 13 |
|
| 14 |
-
repo = "stabilityai/stable-diffusion-3-
|
| 15 |
t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
|
| 16 |
|
| 17 |
model = AutoModelForCausalLM.from_pretrained(
|
| 18 |
-
"microsoft/Phi-3-mini-
|
| 19 |
device_map="cuda",
|
| 20 |
torch_dtype=torch.bfloat16,
|
| 21 |
trust_remote_code=True,
|
| 22 |
)
|
| 23 |
-
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-
|
| 24 |
upsampler = pipeline(
|
| 25 |
"text-generation",
|
| 26 |
model=model,
|
|
@@ -88,7 +88,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 88 |
|
| 89 |
with gr.Column(elem_id="col-container"):
|
| 90 |
gr.Markdown(f"""
|
| 91 |
-
# ζ₯ζ¬θͺγε
₯εγ§γγ SD3
|
| 92 |
""")
|
| 93 |
|
| 94 |
with gr.Row():
|
|
@@ -131,7 +131,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 131 |
minimum=256,
|
| 132 |
maximum=MAX_IMAGE_SIZE,
|
| 133 |
step=64,
|
| 134 |
-
value=1024,
|
| 135 |
)
|
| 136 |
|
| 137 |
height = gr.Slider(
|
|
@@ -139,7 +139,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 139 |
minimum=256,
|
| 140 |
maximum=MAX_IMAGE_SIZE,
|
| 141 |
step=64,
|
| 142 |
-
value=1024,
|
| 143 |
)
|
| 144 |
|
| 145 |
with gr.Row():
|
|
@@ -149,7 +149,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 149 |
minimum=0.0,
|
| 150 |
maximum=10.0,
|
| 151 |
step=0.1,
|
| 152 |
-
value=5
|
| 153 |
)
|
| 154 |
|
| 155 |
num_inference_steps = gr.Slider(
|
|
|
|
| 11 |
device = "cuda"
|
| 12 |
dtype = torch.float16
|
| 13 |
|
| 14 |
+
repo = "stabilityai/stable-diffusion-3.5-large"
|
| 15 |
t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
|
| 16 |
|
| 17 |
model = AutoModelForCausalLM.from_pretrained(
|
| 18 |
+
"microsoft/Phi-3.5-mini-instruct",
|
| 19 |
device_map="cuda",
|
| 20 |
torch_dtype=torch.bfloat16,
|
| 21 |
trust_remote_code=True,
|
| 22 |
)
|
| 23 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct")
|
| 24 |
upsampler = pipeline(
|
| 25 |
"text-generation",
|
| 26 |
model=model,
|
|
|
|
| 88 |
|
| 89 |
with gr.Column(elem_id="col-container"):
|
| 90 |
gr.Markdown(f"""
|
| 91 |
+
# ζ₯ζ¬θͺγε
₯εγ§γγ SD3.5 Large
|
| 92 |
""")
|
| 93 |
|
| 94 |
with gr.Row():
|
|
|
|
| 131 |
minimum=256,
|
| 132 |
maximum=MAX_IMAGE_SIZE,
|
| 133 |
step=64,
|
| 134 |
+
value=1024+512,
|
| 135 |
)
|
| 136 |
|
| 137 |
height = gr.Slider(
|
|
|
|
| 139 |
minimum=256,
|
| 140 |
maximum=MAX_IMAGE_SIZE,
|
| 141 |
step=64,
|
| 142 |
+
value=1024+512,
|
| 143 |
)
|
| 144 |
|
| 145 |
with gr.Row():
|
|
|
|
| 149 |
minimum=0.0,
|
| 150 |
maximum=10.0,
|
| 151 |
step=0.1,
|
| 152 |
+
value=3.5,
|
| 153 |
)
|
| 154 |
|
| 155 |
num_inference_steps = gr.Slider(
|