Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ import re
|
|
| 6 |
import json
|
| 7 |
import datetime
|
| 8 |
from pathlib import Path
|
| 9 |
-
from huggingface_hub import HfApi, hf_hub_download
|
| 10 |
from safetensors.torch import load_file
|
| 11 |
import torch
|
| 12 |
import subprocess
|
|
@@ -14,8 +14,7 @@ import subprocess
|
|
| 14 |
# --- Conversion Function: Safetensors (UNet) β GGUF ---
|
| 15 |
def convert_unet_to_gguf(safetensors_path, output_dir, progress=gr.Progress()):
|
| 16 |
"""
|
| 17 |
-
Converts a UNet safetensors file to GGUF using gguf-connector's CLI (t2
|
| 18 |
-
Assumes the file is named 'unet.safetensors'.
|
| 19 |
"""
|
| 20 |
progress(0.1, desc="Starting UNet to GGUF conversion...")
|
| 21 |
|
|
@@ -23,59 +22,53 @@ def convert_unet_to_gguf(safetensors_path, output_dir, progress=gr.Progress()):
|
|
| 23 |
# Ensure gguf-connector is available
|
| 24 |
import gguf_connector # noqa
|
| 25 |
|
| 26 |
-
#
|
| 27 |
work_dir = tempfile.mkdtemp()
|
| 28 |
-
|
|
|
|
| 29 |
shutil.copy(safetensors_path, input_path)
|
| 30 |
|
| 31 |
-
# GGUF output
|
| 32 |
-
|
|
|
|
| 33 |
|
| 34 |
progress(0.3, desc="Running gguf-connector (t2: safetensors β GGUF)...")
|
| 35 |
|
| 36 |
-
#
|
| 37 |
-
# This is interactive, so we must simulate input via echo or expect
|
| 38 |
-
# But since ggc t2 may be interactive, we try non-interactive fallback:
|
| 39 |
-
# Unfortunately, ggc does not support non-interactive mode robustly.
|
| 40 |
-
# So we simulate by running in dir and hoping it picks the only file.
|
| 41 |
-
|
| 42 |
-
# Change working dir so ggc sees the file
|
| 43 |
original_cwd = os.getcwd()
|
| 44 |
os.chdir(work_dir)
|
| 45 |
|
| 46 |
try:
|
| 47 |
-
# Launch ggc t2 and auto-select first file via input redirection
|
| 48 |
-
# This is fragile but best-effort
|
| 49 |
result = subprocess.run(
|
| 50 |
["ggc", "t2"],
|
| 51 |
-
input="1\n", # select first
|
| 52 |
text=True,
|
| 53 |
capture_output=True,
|
| 54 |
-
timeout=
|
| 55 |
)
|
| 56 |
if result.returncode != 0:
|
| 57 |
raise RuntimeError(f"ggc t2 failed: {result.stderr}")
|
| 58 |
finally:
|
| 59 |
os.chdir(original_cwd)
|
| 60 |
|
|
|
|
| 61 |
if not os.path.exists(gguf_output_path):
|
| 62 |
-
# Try alternative naming
|
| 63 |
candidates = [f for f in os.listdir(work_dir) if f.endswith(".gguf")]
|
| 64 |
if not candidates:
|
| 65 |
raise FileNotFoundError("No GGUF file generated by ggc t2")
|
| 66 |
gguf_output_path = os.path.join(work_dir, candidates[0])
|
| 67 |
|
| 68 |
-
# Move to output
|
| 69 |
-
final_gguf_path = os.path.join(output_dir,
|
| 70 |
shutil.move(gguf_output_path, final_gguf_path)
|
| 71 |
|
| 72 |
-
#
|
| 73 |
config_path = os.path.join(output_dir, "config.json")
|
| 74 |
with open(config_path, "w") as f:
|
| 75 |
json.dump({
|
| 76 |
"model_type": "unet",
|
| 77 |
"format": "gguf",
|
| 78 |
-
"
|
| 79 |
}, f)
|
| 80 |
|
| 81 |
progress(1.0, desc="Conversion to GGUF complete!")
|
|
@@ -88,8 +81,8 @@ def convert_unet_to_gguf(safetensors_path, output_dir, progress=gr.Progress()):
|
|
| 88 |
shutil.rmtree(work_dir, ignore_errors=True)
|
| 89 |
|
| 90 |
# --- Main Processing Function ---
|
| 91 |
-
def process_and_upload_unet_to_gguf(repo_url, hf_token, new_repo_id, private_repo, progress=gr.Progress()):
|
| 92 |
-
if not all([repo_url, hf_token, new_repo_id]):
|
| 93 |
return None, "β Error: Please fill in all fields.", ""
|
| 94 |
|
| 95 |
if not re.match(r"^[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+$", new_repo_id):
|
|
@@ -112,11 +105,11 @@ def process_and_upload_unet_to_gguf(repo_url, hf_token, new_repo_id, private_rep
|
|
| 112 |
return None, "β Source must be a Hugging Face model repo.", ""
|
| 113 |
src_repo_id = clean_url.replace("https://huggingface.co/", "")
|
| 114 |
|
| 115 |
-
# Download
|
| 116 |
-
progress(0.15, desc="Downloading
|
| 117 |
safetensors_path = hf_hub_download(
|
| 118 |
repo_id=src_repo_id,
|
| 119 |
-
filename=
|
| 120 |
cache_dir=temp_dir,
|
| 121 |
token=hf_token
|
| 122 |
)
|
|
@@ -146,15 +139,13 @@ tags:
|
|
| 146 |
- converted-by-gradio
|
| 147 |
---
|
| 148 |
|
| 149 |
-
# GGUF
|
| 150 |
|
| 151 |
Converted from: [`{src_repo_id}`](https://huggingface.co/{src_repo_id})
|
| 152 |
-
File: `
|
| 153 |
|
| 154 |
Converted by: {user_name}
|
| 155 |
Date: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
| 156 |
-
|
| 157 |
-
> β οΈ This is a GGUF-quantized UNet for storage efficiency. Use with compatible GGUF-aware inference engines.
|
| 158 |
"""
|
| 159 |
with open(os.path.join(output_dir, "README.md"), "w") as f:
|
| 160 |
f.write(readme)
|
|
@@ -166,13 +157,13 @@ Date: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
|
| 166 |
folder_path=output_dir,
|
| 167 |
repo_type="model",
|
| 168 |
token=hf_token,
|
| 169 |
-
commit_message="Upload
|
| 170 |
)
|
| 171 |
|
| 172 |
progress(1.0, desc="β
Done!")
|
| 173 |
result_html = f"""
|
| 174 |
β
Success!
|
| 175 |
-
Your GGUF
|
| 176 |
Visibility: {'Private' if private_repo else 'Public'}
|
| 177 |
"""
|
| 178 |
return gr.HTML(result_html), "β
Conversion and upload successful!", ""
|
|
@@ -184,16 +175,21 @@ Visibility: {'Private' if private_repo else 'Public'}
|
|
| 184 |
shutil.rmtree(output_dir, ignore_errors=True)
|
| 185 |
|
| 186 |
# --- Gradio UI ---
|
| 187 |
-
with gr.Blocks(title="
|
| 188 |
-
gr.Markdown("# π
|
| 189 |
-
gr.Markdown("Converts
|
| 190 |
|
| 191 |
with gr.Row():
|
| 192 |
with gr.Column():
|
| 193 |
repo_url = gr.Textbox(
|
| 194 |
label="Source Model Repository URL",
|
| 195 |
placeholder="https://huggingface.co/Yabo/FramePainter",
|
| 196 |
-
info="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
)
|
| 198 |
hf_token = gr.Textbox(
|
| 199 |
label="Hugging Face Token",
|
|
@@ -203,7 +199,7 @@ with gr.Blocks(title="UNet Safetensors β GGUF Converter") as demo:
|
|
| 203 |
with gr.Column():
|
| 204 |
new_repo_id = gr.Textbox(
|
| 205 |
label="New Repository ID",
|
| 206 |
-
placeholder="your-username/
|
| 207 |
info="Format: username/model-name"
|
| 208 |
)
|
| 209 |
private_repo = gr.Checkbox(label="Make Private", value=False)
|
|
@@ -216,16 +212,16 @@ with gr.Blocks(title="UNet Safetensors β GGUF Converter") as demo:
|
|
| 216 |
|
| 217 |
convert_btn.click(
|
| 218 |
fn=process_and_upload_unet_to_gguf,
|
| 219 |
-
inputs=[repo_url, hf_token, new_repo_id, private_repo],
|
| 220 |
outputs=[repo_link_output, status_output],
|
| 221 |
show_progress=True
|
| 222 |
)
|
| 223 |
|
| 224 |
gr.Examples(
|
| 225 |
examples=[
|
| 226 |
-
["https://huggingface.co/Yabo/FramePainter"]
|
| 227 |
],
|
| 228 |
-
inputs=[repo_url]
|
| 229 |
)
|
| 230 |
|
| 231 |
demo.launch()
|
|
|
|
| 6 |
import json
|
| 7 |
import datetime
|
| 8 |
from pathlib import Path
|
| 9 |
+
from huggingface_hub import HfApi, hf_hub_download
|
| 10 |
from safetensors.torch import load_file
|
| 11 |
import torch
|
| 12 |
import subprocess
|
|
|
|
| 14 |
# --- Conversion Function: Safetensors (UNet) β GGUF ---
|
| 15 |
def convert_unet_to_gguf(safetensors_path, output_dir, progress=gr.Progress()):
|
| 16 |
"""
|
| 17 |
+
Converts a UNet safetensors file to GGUF using gguf-connector's CLI (t2).
|
|
|
|
| 18 |
"""
|
| 19 |
progress(0.1, desc="Starting UNet to GGUF conversion...")
|
| 20 |
|
|
|
|
| 22 |
# Ensure gguf-connector is available
|
| 23 |
import gguf_connector # noqa
|
| 24 |
|
| 25 |
+
# Prepare working directory
|
| 26 |
work_dir = tempfile.mkdtemp()
|
| 27 |
+
input_name = os.path.basename(safetensors_path)
|
| 28 |
+
input_path = os.path.join(work_dir, input_name)
|
| 29 |
shutil.copy(safetensors_path, input_path)
|
| 30 |
|
| 31 |
+
# Expected GGUF output name: same basename + .gguf
|
| 32 |
+
base_name = os.path.splitext(input_name)[0]
|
| 33 |
+
gguf_output_path = os.path.join(work_dir, f"{base_name}.gguf")
|
| 34 |
|
| 35 |
progress(0.3, desc="Running gguf-connector (t2: safetensors β GGUF)...")
|
| 36 |
|
| 37 |
+
# Run ggc t2 in the work directory
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
original_cwd = os.getcwd()
|
| 39 |
os.chdir(work_dir)
|
| 40 |
|
| 41 |
try:
|
|
|
|
|
|
|
| 42 |
result = subprocess.run(
|
| 43 |
["ggc", "t2"],
|
| 44 |
+
input="1\n", # auto-select first file
|
| 45 |
text=True,
|
| 46 |
capture_output=True,
|
| 47 |
+
timeout=600
|
| 48 |
)
|
| 49 |
if result.returncode != 0:
|
| 50 |
raise RuntimeError(f"ggc t2 failed: {result.stderr}")
|
| 51 |
finally:
|
| 52 |
os.chdir(original_cwd)
|
| 53 |
|
| 54 |
+
# Check if GGUF was created
|
| 55 |
if not os.path.exists(gguf_output_path):
|
|
|
|
| 56 |
candidates = [f for f in os.listdir(work_dir) if f.endswith(".gguf")]
|
| 57 |
if not candidates:
|
| 58 |
raise FileNotFoundError("No GGUF file generated by ggc t2")
|
| 59 |
gguf_output_path = os.path.join(work_dir, candidates[0])
|
| 60 |
|
| 61 |
+
# Move to final output
|
| 62 |
+
final_gguf_path = os.path.join(output_dir, os.path.basename(gguf_output_path))
|
| 63 |
shutil.move(gguf_output_path, final_gguf_path)
|
| 64 |
|
| 65 |
+
# Save minimal config
|
| 66 |
config_path = os.path.join(output_dir, "config.json")
|
| 67 |
with open(config_path, "w") as f:
|
| 68 |
json.dump({
|
| 69 |
"model_type": "unet",
|
| 70 |
"format": "gguf",
|
| 71 |
+
"source_file": input_name
|
| 72 |
}, f)
|
| 73 |
|
| 74 |
progress(1.0, desc="Conversion to GGUF complete!")
|
|
|
|
| 81 |
shutil.rmtree(work_dir, ignore_errors=True)
|
| 82 |
|
| 83 |
# --- Main Processing Function ---
|
| 84 |
+
def process_and_upload_unet_to_gguf(repo_url, safetensors_filename, hf_token, new_repo_id, private_repo, progress=gr.Progress()):
|
| 85 |
+
if not all([repo_url, safetensors_filename, hf_token, new_repo_id]):
|
| 86 |
return None, "β Error: Please fill in all fields.", ""
|
| 87 |
|
| 88 |
if not re.match(r"^[a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+$", new_repo_id):
|
|
|
|
| 105 |
return None, "β Source must be a Hugging Face model repo.", ""
|
| 106 |
src_repo_id = clean_url.replace("https://huggingface.co/", "")
|
| 107 |
|
| 108 |
+
# Download specified safetensors file
|
| 109 |
+
progress(0.15, desc=f"Downloading {safetensors_filename}...")
|
| 110 |
safetensors_path = hf_hub_download(
|
| 111 |
repo_id=src_repo_id,
|
| 112 |
+
filename=safetensors_filename,
|
| 113 |
cache_dir=temp_dir,
|
| 114 |
token=hf_token
|
| 115 |
)
|
|
|
|
| 139 |
- converted-by-gradio
|
| 140 |
---
|
| 141 |
|
| 142 |
+
# GGUF Model
|
| 143 |
|
| 144 |
Converted from: [`{src_repo_id}`](https://huggingface.co/{src_repo_id})
|
| 145 |
+
File: `{safetensors_filename}` β `{os.path.splitext(safetensors_filename)[0]}.gguf`
|
| 146 |
|
| 147 |
Converted by: {user_name}
|
| 148 |
Date: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
|
|
|
|
|
|
| 149 |
"""
|
| 150 |
with open(os.path.join(output_dir, "README.md"), "w") as f:
|
| 151 |
f.write(readme)
|
|
|
|
| 157 |
folder_path=output_dir,
|
| 158 |
repo_type="model",
|
| 159 |
token=hf_token,
|
| 160 |
+
commit_message="Upload converted GGUF model"
|
| 161 |
)
|
| 162 |
|
| 163 |
progress(1.0, desc="β
Done!")
|
| 164 |
result_html = f"""
|
| 165 |
β
Success!
|
| 166 |
+
Your GGUF model is uploaded to: [{new_repo_id}](https://huggingface.co/{new_repo_id})
|
| 167 |
Visibility: {'Private' if private_repo else 'Public'}
|
| 168 |
"""
|
| 169 |
return gr.HTML(result_html), "β
Conversion and upload successful!", ""
|
|
|
|
| 175 |
shutil.rmtree(output_dir, ignore_errors=True)
|
| 176 |
|
| 177 |
# --- Gradio UI ---
|
| 178 |
+
with gr.Blocks(title="Safetensors β GGUF Converter") as demo:
|
| 179 |
+
gr.Markdown("# π Safetensors to GGUF Converter")
|
| 180 |
+
gr.Markdown("Converts any `.safetensors` file from a Hugging Face model repo to GGUF format.")
|
| 181 |
|
| 182 |
with gr.Row():
|
| 183 |
with gr.Column():
|
| 184 |
repo_url = gr.Textbox(
|
| 185 |
label="Source Model Repository URL",
|
| 186 |
placeholder="https://huggingface.co/Yabo/FramePainter",
|
| 187 |
+
info="Hugging Face model repo containing your safetensors file"
|
| 188 |
+
)
|
| 189 |
+
safetensors_filename = gr.Textbox(
|
| 190 |
+
label="Safetensors Filename",
|
| 191 |
+
placeholder="unet.safetensors",
|
| 192 |
+
info="Name of the .safetensors file in the repo (e.g., unet.safetensors)"
|
| 193 |
)
|
| 194 |
hf_token = gr.Textbox(
|
| 195 |
label="Hugging Face Token",
|
|
|
|
| 199 |
with gr.Column():
|
| 200 |
new_repo_id = gr.Textbox(
|
| 201 |
label="New Repository ID",
|
| 202 |
+
placeholder="your-username/my-model-gguf",
|
| 203 |
info="Format: username/model-name"
|
| 204 |
)
|
| 205 |
private_repo = gr.Checkbox(label="Make Private", value=False)
|
|
|
|
| 212 |
|
| 213 |
convert_btn.click(
|
| 214 |
fn=process_and_upload_unet_to_gguf,
|
| 215 |
+
inputs=[repo_url, safetensors_filename, hf_token, new_repo_id, private_repo],
|
| 216 |
outputs=[repo_link_output, status_output],
|
| 217 |
show_progress=True
|
| 218 |
)
|
| 219 |
|
| 220 |
gr.Examples(
|
| 221 |
examples=[
|
| 222 |
+
["https://huggingface.co/Yabo/FramePainter", "unet.safetensors"]
|
| 223 |
],
|
| 224 |
+
inputs=[repo_url, safetensors_filename]
|
| 225 |
)
|
| 226 |
|
| 227 |
demo.launch()
|