Spaces:
Running
Running
use WeReCooking encoder, remove HF_TOKEN
Browse files
app.py
CHANGED
|
@@ -54,10 +54,10 @@ vae_path = find_model(VAE_FILE)
|
|
| 54 |
try:
|
| 55 |
llm_path = find_model(LLM_FILE)
|
| 56 |
except FileNotFoundError:
|
| 57 |
-
print("[init] Downloading
|
| 58 |
llm_path = hf_hub_download(
|
| 59 |
-
repo_id="
|
| 60 |
-
filename=LLM_FILE,
|
| 61 |
)
|
| 62 |
|
| 63 |
print(f"[init] Diffusion: {diffusion_path}")
|
|
@@ -96,8 +96,7 @@ def download_lora(repo_id: str) -> tuple[str, str]:
|
|
| 96 |
if not repo_id or repo_id.startswith("("):
|
| 97 |
return "", "Select a LoRA first"
|
| 98 |
try:
|
| 99 |
-
|
| 100 |
-
files = list_repo_files(repo_id, token=token)
|
| 101 |
sf_files = [f for f in files if f.endswith(".safetensors")]
|
| 102 |
if not sf_files:
|
| 103 |
return "", f"No .safetensors found in {repo_id}"
|
|
@@ -114,7 +113,7 @@ def download_lora(repo_id: str) -> tuple[str, str]:
|
|
| 114 |
size_mb = os.path.getsize(lora_dst) / 1024**2
|
| 115 |
return label, f"Already cached ({size_mb:.0f} MB)"
|
| 116 |
print(f"[lora] Downloading {repo_id}/{target}...")
|
| 117 |
-
src = hf_hub_download(repo_id=repo_id, filename=target
|
| 118 |
shutil.copy2(src, lora_dst)
|
| 119 |
size_mb = os.path.getsize(lora_dst) / 1024**2
|
| 120 |
DOWNLOADED_LORAS[label] = lora_name
|
|
@@ -257,7 +256,7 @@ with gr.Blocks(theme="NoCrypt/miku", title="FLUX.2 Klein 4B CPU") as demo:
|
|
| 257 |
lora_search.select(fn=on_lora_select, inputs=[lora_search, active_loras], outputs=[active_loras, lora_status, lora_search])
|
| 258 |
gen_btn.click(fn=generate, inputs=[prompt, ref_image, resolution, steps, seed, lora_strength, active_loras], outputs=[output_image, status_text])
|
| 259 |
|
| 260 |
-
gr.Markdown("---\nsd.cpp Q4_K_M |
|
| 261 |
"[BFL](https://bfl.ai/models/flux-2-klein) | [sd.cpp](https://github.com/leejet/stable-diffusion.cpp) | "
|
| 262 |
"[Browse LoRAs](https://huggingface.co/models?search=klein+4b&filter=lora)")
|
| 263 |
|
|
|
|
| 54 |
try:
|
| 55 |
llm_path = find_model(LLM_FILE)
|
| 56 |
except FileNotFoundError:
|
| 57 |
+
print("[init] Downloading uncensored text encoder...")
|
| 58 |
llm_path = hf_hub_download(
|
| 59 |
+
repo_id="WeReCooking/flux2-klein-4B-uncensored-text-encoder",
|
| 60 |
+
filename=LLM_FILE,
|
| 61 |
)
|
| 62 |
|
| 63 |
print(f"[init] Diffusion: {diffusion_path}")
|
|
|
|
| 96 |
if not repo_id or repo_id.startswith("("):
|
| 97 |
return "", "Select a LoRA first"
|
| 98 |
try:
|
| 99 |
+
files = list_repo_files(repo_id)
|
|
|
|
| 100 |
sf_files = [f for f in files if f.endswith(".safetensors")]
|
| 101 |
if not sf_files:
|
| 102 |
return "", f"No .safetensors found in {repo_id}"
|
|
|
|
| 113 |
size_mb = os.path.getsize(lora_dst) / 1024**2
|
| 114 |
return label, f"Already cached ({size_mb:.0f} MB)"
|
| 115 |
print(f"[lora] Downloading {repo_id}/{target}...")
|
| 116 |
+
src = hf_hub_download(repo_id=repo_id, filename=target)
|
| 117 |
shutil.copy2(src, lora_dst)
|
| 118 |
size_mb = os.path.getsize(lora_dst) / 1024**2
|
| 119 |
DOWNLOADED_LORAS[label] = lora_name
|
|
|
|
| 256 |
lora_search.select(fn=on_lora_select, inputs=[lora_search, active_loras], outputs=[active_loras, lora_status, lora_search])
|
| 257 |
gen_btn.click(fn=generate, inputs=[prompt, ref_image, resolution, steps, seed, lora_strength, active_loras], outputs=[output_image, status_text])
|
| 258 |
|
| 259 |
+
gr.Markdown("---\nsd.cpp Q4_K_M | Uncensored encoder | "
|
| 260 |
"[BFL](https://bfl.ai/models/flux-2-klein) | [sd.cpp](https://github.com/leejet/stable-diffusion.cpp) | "
|
| 261 |
"[Browse LoRAs](https://huggingface.co/models?search=klein+4b&filter=lora)")
|
| 262 |
|