incendies's picture
refactor: update Client initialization to use 'token' instead of 'hf_token' for consistency across models
e6fd579
import base64
import os
import tempfile
import urllib.request
import gradio as gr
import spaces
from gradio_client import Client, handle_file
from daggr import FnNode, GradioNode, Graph
# ==================== FREE TOOLS COLLECTION ====================
# These tools are free to use and don't consume credits
def _hf_token():
"""HF token so ZeroGPU quota is used (not 'unlogged user'). Prefer env, then huggingface_hub."""
token = os.environ.get("HF_TOKEN")
if token:
return token
try:
from huggingface_hub import get_token
return get_token()
except Exception:
return None
def _url_to_path(url):
"""Download image from URL to a temp file; Daggr needs file paths to display images."""
if not url or not isinstance(url, str) or not url.startswith("http"):
return url
try:
ext = "png"
if ".jpg" in url or ".jpeg" in url:
ext = "jpg"
elif ".webp" in url:
ext = "webp"
f = tempfile.NamedTemporaryFile(suffix=f".{ext}", delete=False)
req = urllib.request.Request(url, headers={"User-Agent": "Gradio-Daggr/1.0"})
with urllib.request.urlopen(req, timeout=60) as r:
f.write(r.read())
f.close()
return f.name
except Exception:
return url
# 1. Background Removal
# API may return (original, result); we only need the result for "final_image".
bg_remover = GradioNode(
"hf-applications/background-removal",
api_name="/image",
run_locally=False,
inputs={"image": gr.Image()},
postprocess=lambda *vals: vals[-1] if vals else None,
outputs={
"final_image": gr.Image(label="Background Removed"),
},
)
def _image_to_filepath(image):
"""Convert image input (list, dict, or path) to a single filepath string."""
if image is None:
return None
if isinstance(image, str):
return image
if isinstance(image, dict) and "path" in image and isinstance(image["path"], str):
return image["path"]
if isinstance(image, (list, tuple)) and len(image) > 0:
first = image[0]
if isinstance(first, str):
return first
if isinstance(first, dict) and "path" in first and isinstance(first["path"], str):
return first["path"]
return None
def _path_for_api(path):
"""Convert path (file path, URL, or data URL) to a path handle_file can use. Data URLs become temp files."""
if not path or not isinstance(path, str):
return None
if path.startswith("data:"):
# data:image/jpeg;base64,... -> decode and write to temp file (handle_file can't use data URL as filename)
try:
header, b64 = path.split(",", 1)
ext = "png"
if "jpeg" in header or "jpg" in header:
ext = "jpg"
elif "webp" in header:
ext = "webp"
data = base64.b64decode(b64)
f = tempfile.NamedTemporaryFile(suffix=f".{ext}", delete=False)
f.write(data)
f.close()
return f.name
except Exception as e:
raise RuntimeError(f"Upscaler: could not decode data URL: {e}") from e
return path
# 2. Image Upscaler (FnNode so we pass a single filepath string; GradioNode was receiving a list from Daggr)
@spaces.GPU
def run_upscaler(image, model_selection="4xBHI_dat2_real"):
"""Call Phips/Upscaler API with a single filepath string. Returns path to upscaled image."""
path = _image_to_filepath(image)
if not path:
return None
path = _path_for_api(path)
if not path:
return None
# Phips/Upscaler runs on a different server - send file content via handle_file so it can read the image.
try:
image_arg = handle_file(path)
except Exception as e:
raise RuntimeError(f"Upscaler: could not load image from path: {e}") from e
try:
client = Client("Phips/Upscaler", token=_hf_token())
result = client.predict(image_arg, model_selection, api_name="/upscale_image")
except Exception as e:
raise RuntimeError(f"Upscaler API error: {e}") from e
# Returns tuple of 2: [comparison, filepath]; we need the upscaled image path (second element)
out = None
if result and len(result) >= 2 and result[1]:
out = result[1]
elif result and len(result) >= 1 and result[0]:
r0 = result[0]
if isinstance(r0, (list, tuple)) and len(r0) >= 2 and r0[1]:
out = r0[1]
elif isinstance(r0, str):
out = r0
if out and isinstance(out, str) and out.startswith("http"):
return _url_to_path(out)
return out
upscaler = FnNode(
run_upscaler,
name="Upscaler",
inputs={
"image": gr.Image(label="Input Image"),
"model_selection": gr.Dropdown(
label="Model",
choices=["4xBHI_dat2_real", "4xNomos8kDAT", "4xHFA2k", "2xEvangelion_dat2"],
value="4xBHI_dat2_real",
),
},
outputs={"upscaled_image": gr.Image(label="Upscaled Image")},
)
# 3. Z-Image Turbo (FnNode so prompt is sent as prompt, not mistaken for a file path — avoids "File name too long")
@spaces.GPU
def run_z_image_turbo(prompt, height=1024, width=1024, seed=42):
"""Call Z-Image-Turbo API with prompt as string. Returns image URL or path."""
if not prompt or not isinstance(prompt, str) or not prompt.strip():
return None
try:
client = Client("hf-applications/Z-Image-Turbo", token=_hf_token())
result = client.predict(
prompt=prompt.strip(),
height=float(height),
width=float(width),
seed=int(seed),
api_name="/generate_image",
)
except Exception as e:
raise RuntimeError(f"Z-Image-Turbo API error: {e}") from e
# Returns tuple of 2: [0] dict(path=..., url=..., ...), [1] seed used. Daggr needs local path.
if result and len(result) >= 1 and result[0]:
img = result[0]
out = None
if isinstance(img, dict):
out = img.get("url") or img.get("path")
elif isinstance(img, str):
out = img
if out and isinstance(out, str) and out.startswith("http"):
return _url_to_path(out)
return out
return None
z_image_turbo = FnNode(
run_z_image_turbo,
name="Z-Image-Turbo",
inputs={
"prompt": gr.Textbox(label="Prompt", lines=3),
"height": gr.Slider(512, 1024, value=1024, step=64, label="Height"),
"width": gr.Slider(512, 1024, value=1024, step=64, label="Width"),
"seed": gr.Number(value=42, label="Seed", precision=0),
},
outputs={"generated_image": gr.Image(label="Generated Image")},
)
# 4. FLUX.2 Klein 9B (FnNode so prompt is sent as prompt, not mistaken for filename — avoids "File name too long")
@spaces.GPU
def run_flux_klein(
prompt,
mode_choice="Distilled (4 steps)",
seed=0,
randomize_seed=True,
width=1024,
height=1024,
):
"""Call FLUX.2-klein-9B API with prompt as string. Returns image URL or path."""
if not prompt or not isinstance(prompt, str) or not prompt.strip():
return None
try:
client = Client("black-forest-labs/FLUX.2-klein-9B", token=_hf_token())
result = client.predict(
prompt=prompt.strip(),
input_images=[],
mode_choice=mode_choice,
seed=float(seed),
randomize_seed=bool(randomize_seed),
width=float(width),
height=float(height),
num_inference_steps=4.0 if "Distilled" in mode_choice else 50.0,
guidance_scale=1.0 if "Distilled" in mode_choice else 3.5,
prompt_upsampling=False,
api_name="/generate",
)
except Exception as e:
raise RuntimeError(f"FLUX.2-klein-9B API error: {e}") from e
# Returns tuple of 2: [0] dict(path=..., url=...), [1] seed used. Daggr needs local path.
if result and len(result) >= 1 and result[0]:
img = result[0]
out = None
if isinstance(img, dict):
out = img.get("url") or img.get("path")
elif isinstance(img, str):
out = img
if out and isinstance(out, str) and out.startswith("http"):
return _url_to_path(out)
return out
return None
flux_klein = FnNode(
run_flux_klein,
name="FLUX.2-klein-9B",
inputs={
"prompt": gr.Textbox(label="Prompt", lines=3),
"mode_choice": gr.Radio(
choices=["Distilled (4 steps)", "Base (50 steps)"],
value="Distilled (4 steps)",
label="Mode",
),
"seed": gr.Number(value=0, label="Seed", precision=0),
"randomize_seed": gr.Checkbox(value=True, label="Randomize seed"),
"width": gr.Slider(512, 1024, value=1024, step=64, label="Width"),
"height": gr.Slider(512, 1024, value=1024, step=64, label="Height"),
},
outputs={"result": gr.Image(label="Generated Image")},
)
# Note: hysts-daggr/daggr-text-to-image-to-3d is a Daggr space itself,
# so it doesn't expose a standard Gradio API and cannot be connected via GradioNode.
# Users can access it directly at: https://huggingface.co/spaces/hysts-daggr/daggr-text-to-image-to-3d
# ==================== GRAPH SETUP ====================
# Create the workflow graph with all free tools
# Upscaler is an FnNode that calls Phips/Upscaler with a single path string (no list).
graph = Graph(
name="Imageat Workflow - Free Tools",
nodes=[
bg_remover,
upscaler,
z_image_turbo,
flux_klein,
],
)
# Launch the Space
graph.launch()