Spaces:
Running
Running
Update app.py
#13
by
Duskfallcrew
- opened
app.py
CHANGED
|
@@ -419,7 +419,7 @@ def cleanup_temp_files(directory=None):
|
|
| 419 |
except Exception as e:
|
| 420 |
print(f"Warning: Error during cleanup: {e}")
|
| 421 |
|
| 422 |
-
def convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16,
|
| 423 |
"""Convert the model between different formats."""
|
| 424 |
temp_dir = None
|
| 425 |
history = ConversionHistory()
|
|
@@ -453,6 +453,7 @@ def convert_model(model_to_load, save_precision_as, epoch, global_step, referenc
|
|
| 453 |
args.global_step = global_step
|
| 454 |
args.reference_model = reference_model
|
| 455 |
args.fp16 = fp16
|
|
|
|
| 456 |
|
| 457 |
update_progress(output_widget, "π Validating input model...", 10)
|
| 458 |
args.model_to_save = increment_filename(os.path.splitext(args.model_to_load)[0] + ".safetensors")
|
|
@@ -618,7 +619,7 @@ def get_auto_optimization_suggestions(model_path: str, precision: str, available
|
|
| 618 |
|
| 619 |
return suggestions
|
| 620 |
|
| 621 |
-
def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_private
|
| 622 |
"""Uploads a model to the Hugging Face Hub."""
|
| 623 |
try:
|
| 624 |
# Login to Hugging Face
|
|
@@ -658,15 +659,15 @@ def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_priv
|
|
| 658 |
|
| 659 |
# ---------------------- GRADIO INTERFACE ----------------------
|
| 660 |
|
| 661 |
-
def main(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, hf_token, orgs_name, model_name, make_private):
|
| 662 |
"""Main function orchestrating the entire process."""
|
| 663 |
output = gr.Markdown()
|
| 664 |
|
| 665 |
# Create tempdir, will only be there for the function
|
| 666 |
with tempfile.TemporaryDirectory() as output_path:
|
| 667 |
-
conversion_output = convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, output)
|
| 668 |
|
| 669 |
-
upload_output = upload_to_huggingface(output_path, hf_token, orgs_name, model_name, make_private
|
| 670 |
|
| 671 |
# Return a combined output
|
| 672 |
return f"{conversion_output}\n\n{upload_output}"
|
|
@@ -750,6 +751,12 @@ with gr.Blocks(css="#main-container { display: flex; flex-direction: column; hei
|
|
| 750 |
info="Load model in half precision (recommended for CPU usage)"
|
| 751 |
)
|
| 752 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 753 |
# Hugging Face Upload Section
|
| 754 |
gr.Markdown("### Upload to Hugging Face (Optional)")
|
| 755 |
|
|
@@ -790,6 +797,7 @@ with gr.Blocks(css="#main-container { display: flex; flex-direction: column; hei
|
|
| 790 |
global_step,
|
| 791 |
reference_model,
|
| 792 |
fp16,
|
|
|
|
| 793 |
hf_token,
|
| 794 |
orgs_name,
|
| 795 |
model_name,
|
|
|
|
| 419 |
except Exception as e:
|
| 420 |
print(f"Warning: Error during cleanup: {e}")
|
| 421 |
|
| 422 |
+
def convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, use_xformers, hf_token, orgs_name, model_name, make_private):
|
| 423 |
"""Convert the model between different formats."""
|
| 424 |
temp_dir = None
|
| 425 |
history = ConversionHistory()
|
|
|
|
| 453 |
args.global_step = global_step
|
| 454 |
args.reference_model = reference_model
|
| 455 |
args.fp16 = fp16
|
| 456 |
+
args.use_xformers = use_xformers
|
| 457 |
|
| 458 |
update_progress(output_widget, "π Validating input model...", 10)
|
| 459 |
args.model_to_save = increment_filename(os.path.splitext(args.model_to_load)[0] + ".safetensors")
|
|
|
|
| 619 |
|
| 620 |
return suggestions
|
| 621 |
|
| 622 |
+
def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_private):
|
| 623 |
"""Uploads a model to the Hugging Face Hub."""
|
| 624 |
try:
|
| 625 |
# Login to Hugging Face
|
|
|
|
| 659 |
|
| 660 |
# ---------------------- GRADIO INTERFACE ----------------------
|
| 661 |
|
| 662 |
+
def main(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, use_xformers, hf_token, orgs_name, model_name, make_private):
|
| 663 |
"""Main function orchestrating the entire process."""
|
| 664 |
output = gr.Markdown()
|
| 665 |
|
| 666 |
# Create tempdir, will only be there for the function
|
| 667 |
with tempfile.TemporaryDirectory() as output_path:
|
| 668 |
+
conversion_output = convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, use_xformers, output)
|
| 669 |
|
| 670 |
+
upload_output = upload_to_huggingface(output_path, hf_token, orgs_name, model_name, make_private)
|
| 671 |
|
| 672 |
# Return a combined output
|
| 673 |
return f"{conversion_output}\n\n{upload_output}"
|
|
|
|
| 751 |
info="Load model in half precision (recommended for CPU usage)"
|
| 752 |
)
|
| 753 |
|
| 754 |
+
use_xformers = gr.Checkbox(
|
| 755 |
+
label="Enable Memory-Efficient Attention",
|
| 756 |
+
value=False,
|
| 757 |
+
info="Enable xFormers for reduced memory usage during conversion"
|
| 758 |
+
)
|
| 759 |
+
|
| 760 |
# Hugging Face Upload Section
|
| 761 |
gr.Markdown("### Upload to Hugging Face (Optional)")
|
| 762 |
|
|
|
|
| 797 |
global_step,
|
| 798 |
reference_model,
|
| 799 |
fp16,
|
| 800 |
+
use_xformers,
|
| 801 |
hf_token,
|
| 802 |
orgs_name,
|
| 803 |
model_name,
|