Spaces:
Runtime error
Runtime error
Commit
·
c15729e
1
Parent(s):
b8acc16
Fix Gradio compatibility issues
Browse files- Remove deprecated 'info' parameter from gr.Image() and other components
- Update Gradio version to 4.x for better compatibility
- Use gr.Markdown for component descriptions instead of info parameter
- Ensure compatibility with latest Hugging Face Spaces environment
- .DS_Store +0 -0
- README.md +1 -1
- __pycache__/app.cpython-312.pyc +0 -0
- app.py +5 -8
- requirements.txt +1 -1
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🎨
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.44.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
__pycache__/app.cpython-312.pyc
ADDED
|
Binary file (16.4 kB). View file
|
|
|
app.py
CHANGED
|
@@ -301,8 +301,7 @@ def create_interface():
|
|
| 301 |
model_dropdown = gr.Dropdown(
|
| 302 |
choices=list(MODELS.keys()),
|
| 303 |
value="Stable Diffusion 1.5",
|
| 304 |
-
label="Model"
|
| 305 |
-
info="Choose the base model"
|
| 306 |
)
|
| 307 |
|
| 308 |
# Text prompt
|
|
@@ -313,10 +312,10 @@ def create_interface():
|
|
| 313 |
)
|
| 314 |
|
| 315 |
# Reference image
|
|
|
|
| 316 |
reference_input = gr.Image(
|
| 317 |
label="Reference Image",
|
| 318 |
-
type="pil"
|
| 319 |
-
info="Upload a face or style reference image"
|
| 320 |
)
|
| 321 |
|
| 322 |
with gr.Row():
|
|
@@ -375,8 +374,7 @@ def create_interface():
|
|
| 375 |
|
| 376 |
lora_path = gr.Textbox(
|
| 377 |
label="LoRA Model Path (optional)",
|
| 378 |
-
placeholder="/path/to/lora/model.safetensors"
|
| 379 |
-
info="Local path to LoRA weights"
|
| 380 |
)
|
| 381 |
|
| 382 |
lora_scale = gr.Slider(
|
|
@@ -400,8 +398,7 @@ def create_interface():
|
|
| 400 |
|
| 401 |
output_image = gr.Image(
|
| 402 |
label="Reference | Generated",
|
| 403 |
-
type="pil"
|
| 404 |
-
info="Side-by-side comparison"
|
| 405 |
)
|
| 406 |
|
| 407 |
# Event handlers
|
|
|
|
| 301 |
model_dropdown = gr.Dropdown(
|
| 302 |
choices=list(MODELS.keys()),
|
| 303 |
value="Stable Diffusion 1.5",
|
| 304 |
+
label="Model"
|
|
|
|
| 305 |
)
|
| 306 |
|
| 307 |
# Text prompt
|
|
|
|
| 312 |
)
|
| 313 |
|
| 314 |
# Reference image
|
| 315 |
+
gr.Markdown("**Reference Image** - Upload a face or style reference image")
|
| 316 |
reference_input = gr.Image(
|
| 317 |
label="Reference Image",
|
| 318 |
+
type="pil"
|
|
|
|
| 319 |
)
|
| 320 |
|
| 321 |
with gr.Row():
|
|
|
|
| 374 |
|
| 375 |
lora_path = gr.Textbox(
|
| 376 |
label="LoRA Model Path (optional)",
|
| 377 |
+
placeholder="/path/to/lora/model.safetensors"
|
|
|
|
| 378 |
)
|
| 379 |
|
| 380 |
lora_scale = gr.Slider(
|
|
|
|
| 398 |
|
| 399 |
output_image = gr.Image(
|
| 400 |
label="Reference | Generated",
|
| 401 |
+
type="pil"
|
|
|
|
| 402 |
)
|
| 403 |
|
| 404 |
# Event handlers
|
requirements.txt
CHANGED
|
@@ -2,7 +2,7 @@ torch>=2.0.0
|
|
| 2 |
torchvision>=0.15.0
|
| 3 |
transformers>=4.30.0
|
| 4 |
diffusers>=0.21.0
|
| 5 |
-
gradio>=
|
| 6 |
Pillow>=9.5.0
|
| 7 |
numpy>=1.24.0
|
| 8 |
opencv-python>=4.8.0
|
|
|
|
| 2 |
torchvision>=0.15.0
|
| 3 |
transformers>=4.30.0
|
| 4 |
diffusers>=0.21.0
|
| 5 |
+
gradio>=4.0.0,<5.0.0
|
| 6 |
Pillow>=9.5.0
|
| 7 |
numpy>=1.24.0
|
| 8 |
opencv-python>=4.8.0
|