sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
gradio-app/gradio:client/python/test/test_snippet.py | from __future__ import annotations
from contextlib import contextmanager
import gradio as gr
from gradio_client import Client
from gradio_client.snippet import generate_code_snippets
@contextmanager
def connect(demo: gr.Blocks, **kwargs):
_, local_url, _ = demo.launch(prevent_thread_lock=True, **kwargs)
try:
yield Client(local_url)
finally:
demo.close()
class TestSnippetExecution:
def test_python_snippet_runs_for_simple_demo(self):
def greet(name):
return "Hello " + name + "!"
demo = gr.Interface(
fn=greet,
inputs=gr.Textbox(label="Name"),
outputs=gr.Textbox(label="Greeting"),
api_name="greet",
)
with connect(demo) as client:
api_info = client.view_api(print_info=False, return_format="dict")
endpoint_info = api_info["named_endpoints"]["/greet"]
snippets = generate_code_snippets("/greet", endpoint_info, client.src)
python_snippet = snippets["python"]
assert "client.predict(" in python_snippet
assert 'api_name="/greet"' in python_snippet
namespace = {}
exec(python_snippet, namespace)
assert namespace["result"] == "Hello Hello!!!"
def test_python_snippet_runs_for_calculator(self):
def calculator(num1, operation, num2):
if operation == "add":
return num1 + num2
elif operation == "subtract":
return num1 - num2
elif operation == "multiply":
return num1 * num2
elif operation == "divide":
return num1 / num2
demo = gr.Interface(
calculator,
[
"number",
gr.Radio(["add", "subtract", "multiply", "divide"]),
"number",
],
"number",
api_name="predict",
)
with connect(demo) as client:
api_info = client.view_api(print_info=False, return_format="dict")
endpoint_info = api_info["named_endpoints"]["/predict"]
snippets = generate_code_snippets("/predict", endpoint_info, client.src)
python_snippet = snippets["python"]
namespace = {}
exec(python_snippet, namespace)
assert namespace["result"] == 6.0
def test_python_snippet_runs_with_default_params(self):
def add(a, b=10):
return a + b
demo = gr.Interface(
add,
[gr.Number(label="a"), gr.Number(label="b", value=10)],
gr.Number(label="result"),
api_name="add",
)
with connect(demo) as client:
api_info = client.view_api(print_info=False, return_format="dict")
endpoint_info = api_info["named_endpoints"]["/add"]
snippets = generate_code_snippets("/add", endpoint_info, client.src)
python_snippet = snippets["python"]
namespace = {}
exec(python_snippet, namespace)
assert isinstance(namespace["result"], (int, float))
| {
"repo_id": "gradio-app/gradio",
"file_path": "client/python/test/test_snippet.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
gradio-app/gradio:gradio/cli/commands/skills.py | """CLI command to install the Gradio skill for AI coding assistants.
Usage:
gradio skills add --cursor
gradio skills add --cursor --opencode
gradio skills add --cursor --global
gradio skills add --dest=~/my-skills
gradio skills add abidlabs/english-translator --cursor
"""
from __future__ import annotations
import os
import shutil
from pathlib import Path
from typing import Annotated
import typer
from gradio_client import Client
from gradio_client.snippet import generate_code_snippets
from huggingface_hub import HfApi
SKILL_ID = "gradio"
_GITHUB_RAW = "https://raw.githubusercontent.com/gradio-app/gradio/main"
_SKILL_PREFIX = ".agents/skills/gradio"
_SKILL_FILES = ["SKILL.md", "examples.md"]
skills_app = typer.Typer(help="Manage Gradio skills for AI assistants.")
def _import_hf_skills():
try:
from huggingface_hub.cli.skills import ( # type: ignore[import-not-found]
CENTRAL_GLOBAL,
CENTRAL_LOCAL,
GLOBAL_TARGETS,
LOCAL_TARGETS,
)
except (ImportError, ModuleNotFoundError):
raise SystemExit(
"The 'gradio skills' command requires huggingface_hub >= 1.4.0.\n"
"Please upgrade: pip install --upgrade huggingface_hub"
) from None
return CENTRAL_GLOBAL, CENTRAL_LOCAL, GLOBAL_TARGETS, LOCAL_TARGETS
def _download(url: str) -> str:
from huggingface_hub.utils import get_session
try:
response = get_session().get(url)
response.raise_for_status()
except Exception as e:
raise SystemExit(
f"Failed to download {url}\n{e}\n\n"
"Make sure you have internet access. The skill files are fetched from "
"the Gradio GitHub repository."
) from e
return response.text
def _remove_existing(path: Path, force: bool) -> None:
if not (path.exists() or path.is_symlink()):
return
if not force:
raise SystemExit(
f"Skill already exists at {path}.\nRe-run with --force to overwrite."
)
if path.is_dir() and not path.is_symlink():
shutil.rmtree(path)
else:
path.unlink()
def _create_symlink(
agent_skills_dir: Path,
central_skill_path: Path,
force: bool,
skill_id: str = SKILL_ID,
) -> Path:
agent_skills_dir = agent_skills_dir.expanduser().resolve()
agent_skills_dir.mkdir(parents=True, exist_ok=True)
link_path = agent_skills_dir / skill_id
_remove_existing(link_path, force)
link_path.symlink_to(os.path.relpath(central_skill_path, agent_skills_dir))
return link_path
def _install_to(skills_dir: Path, force: bool) -> Path:
skills_dir = skills_dir.expanduser().resolve()
skills_dir.mkdir(parents=True, exist_ok=True)
dest = skills_dir / SKILL_ID
_remove_existing(dest, force)
dest.mkdir()
for fname in _SKILL_FILES:
content = _download(f"{_GITHUB_RAW}/{_SKILL_PREFIX}/{fname}")
(dest / fname).write_text(content, encoding="utf-8")
return dest
def _space_id_to_skill_id(space_id: str) -> str:
return space_id.replace("/", "-")
def _render_endpoint_section(
api_name: str, endpoint_info: dict, space_id: str, src_url: str
) -> str:
params = endpoint_info.get("parameters", [])
returns = endpoint_info.get("returns", [])
lines: list[str] = []
lines.append(f"### `{api_name}`\n")
if params:
lines.append("**Parameters:**\n")
for p in params:
ptype = p.get("python_type", {})
type_str = (
ptype.get("type", "Any") if isinstance(ptype, dict) else str(ptype)
)
name = p.get("parameter_name") or p.get("label", "input")
component = p.get("component", "")
default_info = ""
if p.get("parameter_has_default"):
default_info = f", default: `{p.get('parameter_default')}`"
required = (
" (required)" if not p.get("parameter_has_default", False) else ""
)
lines.append(
f"- `{name}` [{component}]: `{type_str}`{required}{default_info}"
)
lines.append("")
if returns:
lines.append("**Returns:**\n")
for r in returns:
rtype = r.get("python_type", {})
type_str = (
rtype.get("type", "Any") if isinstance(rtype, dict) else str(rtype)
)
label = r.get("label", "output")
component = r.get("component", "")
lines.append(f"- `{label}` [{component}]: `{type_str}`")
lines.append("")
snippets = generate_code_snippets(
api_name, endpoint_info, src_url, space_id=space_id
)
lines.append("**Python:**\n")
lines.append("```python")
lines.append(snippets["python"])
lines.append("```\n")
lines.append("**JavaScript:**\n")
lines.append("```javascript")
lines.append(snippets["javascript"])
lines.append("```\n")
lines.append("**cURL:**\n")
lines.append("```bash")
lines.append(snippets["bash"])
lines.append("```\n")
return "\n".join(lines)
def _get_space_description(space_id: str) -> str | None:
try:
info = HfApi().space_info(space_id)
return getattr(info, "short_description", None) or None
except Exception:
return None
def _generate_space_skill(space_id: str) -> tuple[str, str]:
try:
client = Client(space_id, download_files=False)
except Exception as e:
raise SystemExit(
f"Failed to connect to Space '{space_id}'.\n{e}\n\n"
"Make sure the Space exists, is public (or provide HF_TOKEN), and is running."
) from e
api_info = client.view_api(print_info=False, return_format="dict")
src_url = client.src
skill_id = _space_id_to_skill_id(space_id)
space_description = _get_space_description(space_id)
lines: list[str] = []
lines.append("---")
lines.append(f"name: {skill_id}")
desc = (
f"description: Use the {space_id} Gradio Space via API. "
f"Provides Python, JavaScript, and cURL usage examples."
)
if space_description:
desc += f" Space description: {space_description}"
lines.append(desc)
lines.append("---\n")
lines.append(f"# {space_id}\n")
lines.append(
f"This skill describes how to use the {space_id} "
f"Gradio Space programmatically.\n"
)
named = api_info.get("named_endpoints", {})
unnamed = api_info.get("unnamed_endpoints", {})
if named:
lines.append("## API Endpoints\n")
for api_name, endpoint_info in named.items():
lines.append(
_render_endpoint_section(api_name, endpoint_info, space_id, src_url)
)
if not named and unnamed:
lines.append("## API Endpoints\n")
for fn_index, endpoint_info in unnamed.items():
lines.append(
_render_endpoint_section(
f"fn_index={fn_index}", endpoint_info, space_id, src_url
)
)
return skill_id, "\n".join(lines) + "\n"
def _install_space_skill(
skill_id: str, content: str, skills_dir: Path, force: bool
) -> Path:
skills_dir = skills_dir.expanduser().resolve()
skills_dir.mkdir(parents=True, exist_ok=True)
dest = skills_dir / skill_id
_remove_existing(dest, force)
dest.mkdir()
(dest / "SKILL.md").write_text(content, encoding="utf-8")
return dest
@skills_app.command(
"add",
)
def skills_add(
space_id: Annotated[
str | None,
typer.Argument(
help="HF Space ID (e.g. 'user/my-space'). If omitted, installs the general Gradio skill."
),
] = None,
cursor: Annotated[
bool, typer.Option("--cursor", help="Install for Cursor.")
] = False,
claude: Annotated[
bool, typer.Option("--claude", help="Install for Claude.")
] = False,
codex: Annotated[bool, typer.Option("--codex", help="Install for Codex.")] = False,
opencode: Annotated[
bool, typer.Option("--opencode", help="Install for OpenCode.")
] = False,
global_: Annotated[
bool,
typer.Option(
"--global",
"-g",
help="Install globally (user-level) instead of in the current project directory.",
),
] = False,
dest: Annotated[
Path | None,
typer.Option(
help="Install into a custom destination (path to skills directory)."
),
] = None,
force: Annotated[
bool,
typer.Option("--force", help="Overwrite existing skills in the destination."),
] = False,
) -> None:
"""Download and install a Gradio skill for an AI assistant.
When called without a space_id, installs the general Gradio skill.
When called with a space_id, generates and installs a skill for that
specific Gradio Space with Python, JS, and cURL usage examples.
"""
central_global, central_local, hf_global_targets, hf_local_targets = (
_import_hf_skills()
)
if not (cursor or claude or codex or opencode or dest):
raise typer.BadParameter(
"Pick a destination via --cursor, --claude, --codex, --opencode, or --dest."
)
global_targets = {**hf_global_targets, "cursor": Path("~/.cursor/skills")}
local_targets = {**hf_local_targets, "cursor": Path(".cursor/skills")}
targets_dict = global_targets if global_ else local_targets
if space_id is not None:
skill_id, content = _generate_space_skill(space_id)
print(f"Generated skill for Space '{space_id}'")
if dest:
if cursor or claude or codex or opencode or global_:
print("--dest cannot be combined with agent flags or --global.")
raise typer.Exit(code=1)
skill_dest = _install_space_skill(skill_id, content, dest, force)
print(f"Installed '{skill_id}' to {skill_dest}")
return
agent_targets: list[Path] = []
if cursor:
agent_targets.append(targets_dict["cursor"])
if claude:
agent_targets.append(targets_dict["claude"])
if codex:
agent_targets.append(targets_dict["codex"])
if opencode:
agent_targets.append(targets_dict["opencode"])
central_path = central_global if global_ else central_local
central_skill_path = _install_space_skill(
skill_id, content, central_path, force
)
print(f"Installed '{skill_id}' to central location: {central_skill_path}")
for agent_target in agent_targets:
link_path = _create_symlink(
agent_target, central_skill_path, force, skill_id=skill_id
)
print(f"Created symlink: {link_path}")
return
if dest:
if cursor or claude or codex or opencode or global_:
print("--dest cannot be combined with agent flags or --global.")
raise typer.Exit(code=1)
skill_dest = _install_to(dest, force)
print(f"Installed '{SKILL_ID}' to {skill_dest}")
return
agent_targets = []
if cursor:
agent_targets.append(targets_dict["cursor"])
if claude:
agent_targets.append(targets_dict["claude"])
if codex:
agent_targets.append(targets_dict["codex"])
if opencode:
agent_targets.append(targets_dict["opencode"])
central_path = central_global if global_ else central_local
central_skill_path = _install_to(central_path, force)
print(f"Installed '{SKILL_ID}' to central location: {central_skill_path}")
for agent_target in agent_targets:
link_path = _create_symlink(agent_target, central_skill_path, force)
print(f"Created symlink: {link_path}")
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/cli/commands/skills.py",
"license": "Apache License 2.0",
"lines": 303,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:scripts/generate_skill.py | """Generates .agents/skills/gradio/ from Gradio's docstrings, guides, and demos.
Usage:
python scripts/generate_skill.py # regenerate
python scripts/generate_skill.py --check # CI: fail if output would change
"""
import argparse
import os
import shutil
import sys
import tempfile
DIR = os.path.dirname(__file__)
REPO_ROOT = os.path.abspath(os.path.join(DIR, ".."))
sys.path.insert(0, os.path.join(REPO_ROOT, "client", "python"))
sys.path.insert(0, REPO_ROOT)
from gradio_client.documentation import generate_documentation # noqa: E402
import gradio # noqa: E402, F401
DEMOS_DIR = os.path.join(REPO_ROOT, "demo")
SKILL_DIR = os.path.join(REPO_ROOT, ".agents", "skills", "gradio")
CURATED_GUIDES = [
"quickstart",
"the-interface-class",
"blocks-and-event-listeners",
"controlling-layout",
"more-blocks-features",
"custom-CSS-and-JS",
"streaming-outputs",
"streaming-inputs",
"sharing-your-app",
"custom-HTML-components",
"getting-started-with-the-python-client",
"getting-started-with-the-js-client",
]
IMPORTANT_DEMOS = [
"blocks_essay_simple",
"blocks_flipper",
"blocks_form",
"blocks_hello",
"blocks_layout",
"calculator",
"chatbot_simple",
"chatbot_streaming",
"chatinterface_multimodal",
"custom_css",
"fake_diffusion",
"hello_world",
"image_editor",
"on_listener_decorator",
"render_merge",
"reverse_audio_2",
"sepia_filter",
"sort_records",
"streaming_simple",
"tabbed_interface_lite",
"tax_calculator",
"timer_simple",
"variable_outputs",
"video_identity",
]
KEY_COMPONENTS = [
"Textbox",
"Number",
"Slider",
"Checkbox",
"Dropdown",
"Radio",
"Image",
"Audio",
"Video",
"File",
"Chatbot",
"DataFrame",
"Button",
"Markdown",
"HTML",
]
def load_all_demo_code():
demos = {}
for demo_folder in os.listdir(DEMOS_DIR):
runfile = os.path.join(DEMOS_DIR, demo_folder, "run.py")
if not os.path.exists(runfile):
continue
with open(runfile) as f:
code = f.read()
code = code.replace("# type: ignore", "").replace(
'if __name__ == "__main__":\n demo.launch()', "demo.launch()"
)
demos[demo_folder] = code
return demos
def build_signature(entry):
params = ", ".join(
p["name"]
+ ": "
+ str(p["annotation"])
+ (" = " + str(p["default"]) if "default" in p else "")
for p in entry["parameters"]
)
return f"{entry['name']}({params})"
def organize_docs(raw_docs):
organized = {
"building": {},
"components": {},
"helpers": {},
"modals": {},
"routes": {},
"chatinterface": {},
}
for mode in raw_docs:
for c in raw_docs[mode]:
for p in c.get("parameters", []):
p["annotation"] = str(p["annotation"])
if "default" in p:
p["default"] = str(p["default"])
for fn in c["fns"]:
for p in fn.get("parameters", []):
p["annotation"] = str(p["annotation"])
if "default" in p:
p["default"] = str(p["default"])
if mode == "component":
organized["components"][c["name"]] = c
elif mode == "py-client":
continue
elif mode in organized:
organized[mode][c["name"]] = c
else:
organized["building"][c["name"]] = c
return organized
def build_events_matrix(organized):
component_events = {}
for name, comp in organized["components"].items():
if hasattr(comp.get("class"), "EVENTS"):
events = [fn["name"] for fn in comp["fns"] if fn["name"] in [str(e) for e in comp["class"].EVENTS]]
if events:
component_events[name] = events
return component_events
def generate_examples(all_demos):
lines = ["# Gradio End-to-End Examples\n"]
lines.append("Complete working Gradio apps for reference.\n")
for demo_name in IMPORTANT_DEMOS:
if demo_name not in all_demos:
continue
pretty = demo_name.replace("_", " ").title()
code = all_demos[demo_name].strip()
lines.append(f"## {pretty}\n")
lines.append(f"```python\n{code}\n```\n")
return "\n".join(lines)
def generate_skill_md(organized, guide_links):
key_sigs = []
for name in KEY_COMPONENTS:
if name in organized["components"]:
entry = organized["components"][name]
sig = build_signature(entry)
desc = entry.get("description", "").replace("<br>", " ").strip()
first_sentence = desc.split(". ")[0] + "." if desc else ""
key_sigs.append(f"### `{sig}`\n{first_sentence}\n")
events_matrix = build_events_matrix(organized)
event_lines = []
for comp, events in sorted(events_matrix.items()):
event_lines.append(f"- **{comp}**: {', '.join(events)}")
guide_list = "\n".join(
f"- [{title}](https://www.gradio.app/guides/{slug})"
for title, slug in guide_links
)
skill_md = f"""---
name: gradio
description: Build Gradio web UIs and demos in Python. Use when creating or editing Gradio apps, components, event listeners, layouts, or chatbots.
---
# Gradio
Gradio is a Python library for building interactive web UIs and ML demos. This skill covers the core API, patterns, and examples.
## Guides
Detailed guides on specific topics (read these when relevant):
{guide_list}
## Core Patterns
**Interface** (high-level): wraps a function with input/output components.
```python
import gradio as gr
def greet(name):
return f"Hello {{name}}!"
gr.Interface(fn=greet, inputs="text", outputs="text").launch()
```
**Blocks** (low-level): flexible layout with explicit event wiring.
```python
import gradio as gr
with gr.Blocks() as demo:
name = gr.Textbox(label="Name")
output = gr.Textbox(label="Greeting")
btn = gr.Button("Greet")
btn.click(fn=lambda n: f"Hello {{n}}!", inputs=name, outputs=output)
demo.launch()
```
**ChatInterface**: high-level wrapper for chatbot UIs.
```python
import gradio as gr
def respond(message, history):
return f"You said: {{message}}"
gr.ChatInterface(fn=respond).launch()
```
## Key Component Signatures
{chr(10).join(key_sigs)}
## Custom HTML Components
If a task requires significant customization of an existing component or a component that doesn't exist in Gradio, you can create one with `gr.HTML`. It supports `html_template` (with `${{}}` JS expressions and `{{{{}}}}` Handlebars syntax), `css_template` for scoped styles, and `js_on_load` for interactivity — where `props.value` updates the component value and `trigger('event_name')` fires Gradio events. For reuse, subclass `gr.HTML` and define `api_info()` for API/MCP support. See the [full guide](https://www.gradio.app/guides/custom-HTML-components).
Here's an example that shows how to create and use these kinds of components:
```python
import gradio as gr
class StarRating(gr.HTML):
def __init__(self, label, value=0, **kwargs):
html_template = \"\"\"
<h2>${{label}} rating:</h2>
${{Array.from({{length: 5}}, (_, i) => `<img class='${{i < value ? '' : 'faded'}}' src='https://upload.wikimedia.org/wikipedia/commons/d/df/Award-star-gold-3d.svg'>`).join('')}}
\"\"\"
css_template = \"\"\"
img {{ height: 50px; display: inline-block; cursor: pointer; }}
.faded {{ filter: grayscale(100%); opacity: 0.3; }}
\"\"\"
js_on_load = \"\"\"
const imgs = element.querySelectorAll('img');
imgs.forEach((img, index) => {{
img.addEventListener('click', () => {{
props.value = index + 1;
}});
}});
\"\"\"
super().__init__(value=value, label=label, html_template=html_template, css_template=css_template, js_on_load=js_on_load, **kwargs)
def api_info(self):
return {{"type": "integer", "minimum": 0, "maximum": 5}}
with gr.Blocks() as demo:
gr.Markdown("# Restaurant Review")
food_rating = StarRating(label="Food", value=3)
service_rating = StarRating(label="Service", value=3)
ambience_rating = StarRating(label="Ambience", value=3)
average_btn = gr.Button("Calculate Average Rating")
rating_output = StarRating(label="Average", value=3)
def calculate_average(food, service, ambience):
return round((food + service + ambience) / 3)
average_btn.click(
fn=calculate_average,
inputs=[food_rating, service_rating, ambience_rating],
outputs=rating_output
)
demo.launch()
```
## Event Listeners
All event listeners share the same signature:
```python
component.event_name(
fn: Callable | None | Literal["decorator"] = "decorator",
inputs: Component | Sequence[Component] | set[Component] | None = None,
outputs: Component | Sequence[Component] | set[Component] | None = None,
api_name: str | None = None,
api_description: str | None | Literal[False] = None,
scroll_to_output: bool = False,
show_progress: Literal["full", "minimal", "hidden"] = "full",
show_progress_on: Component | Sequence[Component] | None = None,
queue: bool = True,
batch: bool = False,
max_batch_size: int = 4,
preprocess: bool = True,
postprocess: bool = True,
cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
js: str | Literal[True] | None = None,
concurrency_limit: int | None | Literal["default"] = "default",
concurrency_id: str | None = None,
api_visibility: Literal["public", "private", "undocumented"] = "public",
time_limit: int | None = None,
stream_every: float = 0.5,
key: int | str | tuple[int | str, ...] | None = None,
validator: Callable | None = None,
) -> Dependency
```
Supported events per component:
{chr(10).join(event_lines)}
## Additional Reference
- [End-to-End Examples](examples.md) — complete working apps
"""
return skill_md.strip() + "\n"
GUIDE_TITLES = {
"quickstart": "Quickstart",
"the-interface-class": "The Interface Class",
"blocks-and-event-listeners": "Blocks and Event Listeners",
"controlling-layout": "Controlling Layout",
"more-blocks-features": "More Blocks Features",
"custom-CSS-and-JS": "Custom CSS and JS",
"streaming-outputs": "Streaming Outputs",
"streaming-inputs": "Streaming Inputs",
"sharing-your-app": "Sharing Your App",
"custom-HTML-components": "Custom HTML Components",
"getting-started-with-the-python-client": "Getting Started with the Python Client",
"getting-started-with-the-js-client": "Getting Started with the JS Client",
}
def generate_to(output_dir):
raw_docs = generate_documentation()
organized = organize_docs(raw_docs)
all_demos = load_all_demo_code()
os.makedirs(output_dir, exist_ok=True)
guide_links = []
for guide_name in CURATED_GUIDES:
title = GUIDE_TITLES.get(guide_name, guide_name.replace("-", " ").title())
guide_links.append((title, guide_name))
skill_md = generate_skill_md(organized, guide_links)
with open(os.path.join(output_dir, "SKILL.md"), "w") as f:
f.write(skill_md)
examples = generate_examples(all_demos)
with open(os.path.join(output_dir, "examples.md"), "w") as f:
f.write(examples)
return skill_md
def files_equal(path1, path2):
if not os.path.exists(path1) or not os.path.exists(path2):
return False
with open(path1) as f1, open(path2) as f2:
return f1.read() == f2.read()
def check(output_dir):
tmp = tempfile.mkdtemp()
try:
tmp_skill = os.path.join(tmp, "gradio")
generate_to(tmp_skill)
generated_files = ["SKILL.md", "examples.md"]
stale = []
for fname in generated_files:
existing = os.path.join(output_dir, fname)
fresh = os.path.join(tmp_skill, fname)
if not files_equal(existing, fresh):
stale.append(fname)
if stale:
print("ERROR: Skill files are out of date. Stale files:")
for f in stale:
print(f" {f}")
print("\nRun `python scripts/generate_skill.py` to update.")
sys.exit(1)
print("OK: Skill files are up to date.")
finally:
shutil.rmtree(tmp)
def main():
parser = argparse.ArgumentParser(description="Generate Gradio skill files.")
parser.add_argument(
"--check",
action="store_true",
help="Check if files are up to date (for CI). Exits with code 1 if stale.",
)
args = parser.parse_args()
if args.check:
check(SKILL_DIR)
return
print("Generating Gradio skill...")
if os.path.exists(SKILL_DIR):
shutil.rmtree(SKILL_DIR)
skill_md = generate_to(SKILL_DIR)
skill_lines = len(skill_md.splitlines())
for name in os.listdir(SKILL_DIR):
path = os.path.join(SKILL_DIR, name)
if os.path.isdir(path) and not os.path.islink(path):
count = len(os.listdir(path))
print(f" {name}/: {count} files")
elif os.path.isfile(path):
with open(path) as f:
lines = len(f.readlines())
print(f" {name}: {lines} lines")
print(f"\nDone! Output in {SKILL_DIR}")
if skill_lines > 500:
print(f" WARNING: SKILL.md is {skill_lines} lines (recommended <500)")
if __name__ == "__main__":
main()
| {
"repo_id": "gradio-app/gradio",
"file_path": "scripts/generate_skill.py",
"license": "Apache License 2.0",
"lines": 361,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/html_children/run.py | import gradio as gr
with gr.Blocks() as demo:
with gr.HTML(html_template='''
<button class="maximize">⛶</button>
<h2>${form_name}</h2>
@children
<button class="submit">Submit</button>
''', css_template='''
border: 2px solid gray;
border-radius: 12px;
padding: 20px;
.maximize {
position: absolute;
top: 10px;
right: 10px;
background: none;
border: none;
z-index: 1000;
}
''', js_on_load='''
element.querySelector('.submit').addEventListener('click', () => {
trigger('submit');
});
element.querySelector('.maximize').addEventListener('click', () => {
element.requestFullscreen();
});
''', form_name="Custom Form") as form:
name = gr.Textbox(label="Name")
email = gr.Textbox(label="Email")
output = gr.Textbox(label="Output")
form.submit(lambda name, email: f"Name: {name}, Email: {email}", inputs=[name, email], outputs=output)
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/html_children/run.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/accordion_tab_switch/run.py | import gradio as gr
with gr.Blocks() as demo:
with gr.Tabs() as tabs:
with gr.Tab("Tab 1", id="t1"):
with gr.Accordion("Accordion", open=False) as acc:
name = gr.Textbox(label="Name")
accordion_open = gr.Checkbox(label="Accordion Open", value=False)
accordion_open.change(
fn=lambda is_open: gr.update(open=is_open),
inputs=accordion_open,
outputs=acc,
)
with gr.Tab("Tab 2", id="t2"):
gr.Markdown("This is Tab 2 content.")
swith_tabs_btn = gr.Button("Switch to Tab 2")
swith_tabs_btn.click(
fn=lambda: gr.Tabs(selected="t2"),
inputs=None,
outputs=tabs,
)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/accordion_tab_switch/run.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/dropdown_custom_value/run.py | import gradio
with gradio.Blocks() as demo:
dropdown = gradio.Dropdown(
choices=[("hello", "goodbye"), ("abc", "123")],
allow_custom_value=True,
label="Dropdown",
)
text = gradio.Textbox(label="Output")
dropdown.change(lambda x: x, inputs=dropdown, outputs=text)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/dropdown_custom_value/run.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/textbox_custom_buttons/run.py | import gradio as gr
def export_data(text):
print("Exporting data:", text)
return "Data exported to server!"
def refresh_data():
import random
return f"Refreshed content: {random.randint(1000, 9999)}"
with gr.Blocks() as demo:
gr.Markdown("""
# Textbox with Custom Buttons Demo
This demo showcases custom buttons in a Textbox component that can trigger either (or both):
- **Python functions**
- **JS functions** (with and without input parameters)
You can use emojis, text, or icons for the buttons.
""")
gr.Markdown("### Textbox with Custom Buttons")
refresh_btn = gr.Button("Refresh")
alert_btn = gr.Button("⚠️ Alert")
clear_btn = gr.Button("🗑️")
textbox = gr.Textbox(
value="Sample text content that can be exported, refreshed, or transformed.",
buttons=["copy", refresh_btn, alert_btn, clear_btn],
label="Sample Text",
lines=5
)
output = gr.Textbox(label="Output (Python Function Result)")
refresh_btn.click(refresh_data, outputs=textbox)
alert_btn.click(
None,
inputs=textbox,
outputs=[],
js="(text) => { alert('This is a JavaScript alert!\\n\\nTextbox content: ' + text); return []; }"
)
clear_btn.click(
None,
inputs=[],
outputs=textbox,
js="() => ''"
)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/textbox_custom_buttons/run.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/playback_position/run.py | import gradio as gr
from gradio.media import get_audio, get_video
# Get the directory where this script is located
with gr.Blocks() as demo:
with gr.Tab("Audio"):
gr.Markdown("## Audio Playback Position")
gr.Markdown("Click the button to see the current playback position of the audio.")
audio = gr.Audio(
value=get_audio("sax.wav"),
playback_position=2.0,
elem_id="audio",
)
audio_btn = gr.Button("Get Audio Playback Position")
audio_position = gr.Number(label="Current Audio Position (seconds)")
def print_audio_playback_pos(a: gr.Audio):
return a.playback_position
audio_btn.click(print_audio_playback_pos, inputs=audio, outputs=audio_position)
set_audio_time_btn = gr.Button("Set Audio Playback Position to 10 seconds")
def set_audio_playback_pos():
return gr.Audio(playback_position=10.0)
set_audio_time_btn.click(set_audio_playback_pos, outputs=audio)
with gr.Tab("Video"):
gr.Markdown("## Video Playback Position")
gr.Markdown("Click the button to see the current playback position of the video.")
video = gr.Video(
value=get_video("world.mp4"),
playback_position=5.0,
elem_id="video",
)
video_btn = gr.Button("Get Video Playback Position")
video_position = gr.Number(label="Current Video Position (seconds)")
def print_video_playback_pos(v: gr.Video):
return v.playback_position
video_btn.click(print_video_playback_pos, inputs=video, outputs=video_position)
set_video_time_btn = gr.Button("Set Video Playback Position to 8 seconds")
def set_video_playback_pos():
return gr.Video(playback_position=8.0)
set_video_time_btn.click(set_video_playback_pos, outputs=video)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/playback_position/run.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/invisible_textbox/run.py | import gradio as gr
with gr.Blocks() as demo:
with gr.Tabs():
with gr.Tab("Invisible Textbox Demo"):
textbox = gr.Textbox(visible=False, interactive=True, elem_id="test-textbox")
with gr.Row():
make_visible_btn = gr.Button("Show")
hide = gr.Button("Hide")
make_invisible_btn = gr.Button("Make Invisible")
def show():
return gr.Textbox(visible=True)
make_visible_btn.click(fn=show, outputs=textbox)
hide.click(lambda: gr.Textbox(visible=False), outputs=textbox)
make_invisible_btn.click(lambda: gr.Textbox(visible="hidden"), outputs=textbox)
with gr.Tab("Another Tab"):
msg = gr.Markdown("This is another tab to demonstrate that invisible components work across tabs.", visible=False)
show_message = gr.Button("Show Message")
show_message.click(lambda: gr.Markdown(visible=True), outputs=msg)
with gr.Tab("Third Tab"):
with gr.Accordion("Third Tab Accordion", open=True, visible=False) as acc:
third_msg = gr.Textbox(label="Visible Textbox", interactive=True, visible=True)
hidden_number = gr.Number(visible=False, label="Hidden Number", value=100, elem_id="hidden-number")
show_number_btn = gr.Button("Show Number")
hide_number_btn = gr.Button("Hide Number")
show_number_btn.click(lambda: gr.Number(visible=True), outputs=hidden_number)
hide_number_btn.click(lambda: gr.Number(visible=False), outputs=hidden_number)
show_third_message = gr.Button("Show Accordion")
show_third_message.click(lambda: gr.Accordion(visible=True), outputs=acc)
hide_third_message = gr.Button("Hide Accordion")
hide_third_message.click(lambda: gr.Accordion(visible=False), outputs=acc)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/invisible_textbox/run.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/chatbot_reasoning_tags/run.py | import gradio as gr
def respond(message, history):
response = """<thinking>
Let me analyze this problem step by step.
First, I need to understand what the user is asking.
Then I can formulate a proper response.
</thinking>
Based on your question, here's my answer: This is the main response content that should be visible by default.
<thinking>
Now let me consider if there are any edge cases.
I should make sure my response is complete.
</thinking>
And here's some additional information that might be helpful."""
return response
demo = gr.ChatInterface(
fn=respond,
chatbot=gr.Chatbot(
reasoning_tags=[("<thinking>", "</thinking>")],
height=600
),
title="Test Collapse Thinking Feature",
description="This demo tests the reasoning_tags parameter. The thinking blocks should be collapsed by default."
)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/chatbot_reasoning_tags/run.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/component_props/run.py | import gradio as gr
from gradio.media import get_image
with gr.Blocks() as demo:
a = gr.Number(value=5, minimum=0, maximum=10, label="Input A", info="Enter a number between 0 and 10")
output_a = gr.JSON(label="Output", elem_id="output")
with gr.Row():
show_value_btn = gr.Button("Show Value")
double_btn = gr.Button("Double Value and Maximum")
reset_btn = gr.Button("Reset")
def process_with_props(x: gr.Number):
return {
"value": x.value,
"maximum": x.maximum,
"minimum": x.minimum,
}
show_value_btn.click(process_with_props, a, output_a)
def double_value_and_max(x: gr.Number):
x.maximum *= 2 # type: ignore
x.value = (x.value or 0) * 2
x.info = f"Enter a number between 0 and {x.maximum}"
return x
double_btn.click(double_value_and_max, a, a).then(
process_with_props, a, output_a
)
def reset(x: gr.Number):
x.maximum = 10
x.value = 5
x.info = "Enter a number between 0 and 10"
return x
reset_btn.click(reset, a, a).then(
process_with_props, a, output_a
)
# Image component demo
gr.Markdown("## Image Component Props")
b = gr.Image(value=get_image("cheetah.jpg"), label="Input Image", width=300, height=300, type="filepath")
output_b = gr.JSON(label="Image Props Output", elem_id="image-output")
with gr.Row():
show_image_props_btn = gr.Button("Show Image Props")
change_image_size_btn = gr.Button("Change Image Size")
reset_image_btn = gr.Button("Reset Image")
def show_image_props(x: gr.Image):
return {
"value": x.value if x.value is None else str(x.value),
"width": x.width,
"height": x.height,
"type": x.type,
}
show_image_props_btn.click(show_image_props, b, output_b)
def change_image_size(x: gr.Image):
x.width = 400
x.height = 400
return x
change_image_size_btn.click(change_image_size, b, b).then(
show_image_props, b, output_b
)
def reset_image(x: gr.Image):
x.width = 300
x.height = 300
x.value = get_image("cheetah.jpg")
return x
reset_image_btn.click(reset_image, b, b).then(
show_image_props, b, output_b
)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/component_props/run.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/mcp_image_app/run.py | import gradio as gr
import tempfile
from PIL import Image
import numpy as np
@gr.mcp.tool(
_meta={
"openai/outputTemplate": "ui://widget/app.html",
"openai/resultCanProduceWidget": True,
"openai/widgetAccessible": True,
}
)
def power_law_image(input_path: str, gamma: float = 0.5) -> str:
"""
Applies a power-law (gamma) transformation to an image file and saves
the result to a temporary file.
Args:
input_path (str): Path to the input image.
gamma (float): Power-law exponent. <1 brightens, >1 darkens.
Returns:
str: Path to the saved temporary output image.
"""
img = Image.open(input_path).convert("RGB")
arr = np.array(img, dtype=np.float32) / 255.0
arr = np.power(arr, gamma)
arr = np.clip(arr * 255, 0, 255).astype(np.uint8)
out_img = Image.fromarray(arr)
tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
out_img.save(tmp_file.name)
tmp_file.close()
return tmp_file.name
@gr.mcp.resource("ui://widget/app.html", mime_type="text/html+skybridge")
def app_html():
visual = """
<style>
#image-container {
position: relative;
display: inline-block;
max-width: 100%;
}
#image-display {
max-width: 100%;
height: auto;
display: block;
border-radius: 8px;
}
#brighten-btn {
position: absolute;
bottom: 16px;
right: 26px;
padding: 12px 24px;
background: #1a1a1a;
color: white;
border: none;
border-radius: 8px;
font-weight: 600;
cursor: pointer;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15);
}
#brighten-btn:hover {
background: #000000;
}
</style>
<div id="image-container">
<img id="image-display" alt="Processed image" />
<button id="brighten-btn">Brighten</button>
</div>
<script>
const imageEl = document.getElementById('image-display');
const btnEl = document.getElementById('brighten-btn');
function extractImageUrl(data) {
if (data?.text?.startsWith('Image URL: ')) {
return data.text.substring('Image URL: '.length).trim();
}
if (data?.content) {
for (const item of data.content) {
if (item.type === 'text' && item.text?.startsWith('Image URL: ')) {
return item.text.substring('Image URL: '.length).trim();
}
}
}
}
function render() {
const url = extractImageUrl(window.openai?.toolOutput);
if (url) imageEl.src = url;
}
async function brightenImage() {
btnEl.disabled = true;
btnEl.textContent = 'Brightening...';
const result = await window.openai.callTool('power_law_image', {
input_path: imageEl.src
});
const newUrl = extractImageUrl(result);
if (newUrl) imageEl.src = newUrl;
btnEl.disabled = false;
btnEl.textContent = 'Brighten';
}
btnEl.addEventListener('click', brightenImage);
window.addEventListener("openai:set_globals", (event) => {
if (event.detail?.globals?.toolOutput) render();
}, { passive: true });
render();
</script>
"""
return visual
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
original_image = gr.Image(label="Original Image", type="filepath")
btn = gr.Button("Brighten Image")
with gr.Column():
output_image = gr.Image(label="Output Image", type="filepath")
html = gr.Code(language="html", max_lines=20)
btn.click(power_law_image, inputs=original_image, outputs=original_image)
btn.click(app_html, outputs=html)
if __name__ == "__main__":
demo.launch(mcp_server=True, share=True)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/mcp_image_app/run.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/mcp_letter_counter_app/run.py | import gradio as gr
@gr.mcp.tool(
_meta={
"openai/outputTemplate": "ui://widget/app.html",
"openai/resultCanProduceWidget": True,
"openai/widgetAccessible": True,
}
)
def letter_counter(word: str, letter: str) -> int:
"""
Count the number of letters in a word or phrase.
Parameters:
word (str): The word or phrase to count the letters of.
letter (str): The letter to count the occurrences of.
"""
return word.count(letter)
@gr.mcp.resource("ui://widget/app.html", mime_type="text/html+skybridge")
def app_html():
visual = """
<div id="letter-card-container"></div>
<script>
const container = document.getElementById('letter-card-container');
function render() {
const word = window.openai?.toolInput?.word || "strawberry";
const letter = window.openai?.toolInput?.letter || "r";
let letterHTML = '';
for (let i = 0; i < word.length; i++) {
const char = word[i];
const color = char.toLowerCase() === letter.toLowerCase() ? '#b8860b' : '#000000';
letterHTML += `<span style="color: ${color};">${char}</span>`;
}
container.innerHTML = `
<div style="
background: linear-gradient(135deg, #f5f5dc 0%, #e8e4d0 100%);
background-image:
repeating-linear-gradient(45deg, transparent, transparent 2px, rgba(139, 121, 94, 0.03) 2px, rgba(139, 121, 94, 0.03) 4px),
linear-gradient(135deg, #f5f5dc 0%, #e8e4d0 100%);
border-radius: 16px;
padding: 40px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1), 0 1px 3px rgba(0, 0, 0, 0.08);
max-width: 600px;
margin: 20px auto;
font-family: 'Georgia', serif;
text-align: center;
">
<div style="
font-size: 48px;
font-weight: bold;
letter-spacing: 8px;
line-height: 1.5;
">
${letterHTML}
</div>
</div>
`;
}
render();
window.addEventListener("openai:set_globals", (event) => {
if (event.detail?.globals?.toolInput) {
render();
}
}, { passive: true });
</script>
"""
return visual
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
word = gr.Textbox(label="Word")
letter = gr.Textbox(label="Letter")
btn = gr.Button("Count Letters")
with gr.Column():
count = gr.Number(label="Count")
html = gr.Code(language="html", max_lines=20)
btn.click(letter_counter, inputs=[word, letter], outputs=count)
btn.click(app_html, outputs=html)
if __name__ == "__main__":
demo.launch(mcp_server=True, share=True)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/mcp_letter_counter_app/run.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/pending_inputs/run.py | import gradio as gr
with gr.Blocks() as demo:
gr.Markdown("# Pending Input Components")
with gr.Row():
with gr.Column():
file = gr.File()
btn = gr.Button("Upload")
with gr.Column():
output_file = gr.File()
btn.click(
lambda s: (s),
file,
output_file,
)
with gr.Row():
with gr.Column():
img = gr.Image(type="filepath")
btn_2 = gr.Button("Upload")
with gr.Column():
output_file_2 = gr.File()
btn_2.click(
lambda s: (s),
img,
output_file_2,
)
with gr.Row():
with gr.Column():
audio = gr.Audio(type="filepath")
btn_3 = gr.Button("Upload")
with gr.Column():
output_file_3 = gr.File()
btn_3.click(
lambda s: (s),
audio,
output_file_3,
)
with gr.Row():
with gr.Column():
video = gr.Video()
btn_3 = gr.Button("Upload")
with gr.Column():
output_file_4 = gr.File()
btn_3.click(
lambda s: (s),
video,
output_file_4,
)
with gr.Row():
with gr.Column():
model3d = gr.Model3D()
btn_4 = gr.Button("Upload")
with gr.Column():
output_file_4 = gr.File()
btn_4.click(
lambda s: (s),
model3d,
output_file_4,
)
with gr.Row():
with gr.Column():
gallery = gr.Gallery()
btn_5 = gr.Button("Upload")
with gr.Column():
output_file_5 = gr.File(file_count="multiple")
btn_5.click(
lambda s: [x[0] for x in s],
gallery,
output_file_5,
)
# with gr.Row():
# with gr.Column():
# df = gr.Dataframe()
# btn_6 = gr.Button("Upload")
# with gr.Column():
# output_file_6 = gr.File()
# btn_6.click(
# lambda s: (s),
# df,
# output_file_6,
# )
with gr.Row():
with gr.Column():
imageslider = gr.ImageSlider(type="filepath")
btn_7 = gr.Button("Upload")
with gr.Column():
output_file_7 = gr.File()
btn_7.click(
lambda s: s[0],
imageslider,
output_file_7,
)
with gr.Row():
with gr.Column():
text = gr.MultimodalTextbox()
btn_8 = gr.Button("Upload")
with gr.Column():
output_file_8 = gr.File()
btn_8.click(
lambda s: s["files"],
text,
output_file_8,
)
if __name__ == "__main__":
demo.launch(
allowed_paths=["/private/var/folders/3w/6btg016509v7b2lz9h7vwqv00000gn/T"]
)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/pending_inputs/run.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/render_queue_false/run.py | import gradio as gr
with gr.Blocks() as demo:
input_text = gr.Textbox(label="Input Text")
@gr.render(inputs=input_text, queue=False)
def show_split(text):
if len(text) == 0:
gr.Markdown("## No Input Provided")
else:
for letter in text:
with gr.Row():
text = gr.Textbox(letter, label=f"Letter {letter}")
btn = gr.Button("Clear")
btn.click(lambda: gr.Textbox(value=""), None, text)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/render_queue_false/run.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/show_progress_on/run.py | # This demo needs to be run from the repo folder.
# python demo/fake_gan/run.py
import time
import gradio as gr
from gradio.media import get_image
def fake_gan():
time.sleep(5)
images = [
(get_image("cheetah.jpg"), f"label {i}")
for i in range(3)
]
return images, "Done"
with gr.Blocks() as demo:
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
, columns=1, object_fit="contain", height="auto")
t = gr.Textbox(label="Progress", elem_id="progress_textbox")
btn = gr.Button("Generate images", scale=0)
btn.click(fake_gan, None, [gallery, t], show_progress="minimal", show_progress_on=t)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/show_progress_on/run.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/star_rating_component/run.py | import gradio as gr
class StarRating(gr.HTML):
def __init__(self, label, value=0, **kwargs):
html_template = """
<h2>${label} rating:</h2>
${Array.from({length: 5}, (_, i) => `<img class='${i < value ? '' : 'faded'}' src='https://upload.wikimedia.org/wikipedia/commons/d/df/Award-star-gold-3d.svg'>`).join('')}
"""
css_template = """
img { height: 50px; display: inline-block; cursor: pointer; }
.faded { filter: grayscale(100%); opacity: 0.3; }
"""
js_on_load = """
const imgs = element.querySelectorAll('img');
imgs.forEach((img, index) => {
img.addEventListener('click', () => {
props.value = index + 1;
});
});
"""
super().__init__(value=value, label=label, html_template=html_template, css_template=css_template, js_on_load=js_on_load, **kwargs)
def api_info(self):
return {"type": "integer", "minimum": 0, "maximum": 5}
with gr.Blocks() as demo:
gr.Markdown("# Restaurant Review")
food_rating = StarRating(label="Food", value=3)
service_rating = StarRating(label="Service", value=3)
ambience_rating = StarRating(label="Ambience", value=3)
average_btn = gr.Button("Calculate Average Rating")
rating_output = StarRating(label="Average", value=3)
def calculate_average(food, service, ambience):
return round((food + service + ambience) / 3)
average_btn.click(
fn=calculate_average,
inputs=[food_rating, service_rating, ambience_rating],
outputs=rating_output
)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/star_rating_component/run.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/star_rating_events/run.py | import gradio as gr
with gr.Blocks() as demo:
star_rating = gr.HTML(
value=3,
html_template="""
<h2>Star Rating:</h2>
${Array.from({length: 5}, (_, i) => `<img class='${i < value ? '' : 'faded'}' src='https://upload.wikimedia.org/wikipedia/commons/d/df/Award-star-gold-3d.svg'>`).join('')}
<button id='submit-btn'>Submit Rating</button>
""",
css_template="""
img { height: 50px; display: inline-block; cursor: pointer; }
.faded { filter: grayscale(100%); opacity: 0.3; }
""",
js_on_load="""
const imgs = element.querySelectorAll('img');
imgs.forEach((img, index) => {
img.addEventListener('click', () => {
props.value = index + 1;
});
});
const submitBtn = element.querySelector('#submit-btn');
submitBtn.addEventListener('click', () => {
trigger('submit');
});
""")
rating_output = gr.Textbox(label="Submitted Rating")
star_rating.submit(lambda x: x, inputs=star_rating, outputs=rating_output)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/star_rating_events/run.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/star_rating_props/run.py | import gradio as gr
with gr.Blocks() as demo:
star_rating = gr.HTML(
7,
size=40,
max_stars=10,
html_template="""
<h2>Star Rating:</h2>
${Array.from({length: max_stars}, (_, i) => `<img class='${i < value ? '' : 'faded'}' src='https://upload.wikimedia.org/wikipedia/commons/d/df/Award-star-gold-3d.svg'>`).join('')}""",
css_template="""
img { height: ${size}px; display: inline-block; }
.faded { filter: grayscale(100%); opacity: 0.3; }
"""
)
rating_slider = gr.Slider(0, 10, step=1, label="Select Rating")
rating_slider.change(fn=lambda x: x, inputs=rating_slider, outputs=star_rating)
size_slider = gr.Slider(20, 100, 40, step=1, label="Select Size")
size_slider.change(fn=lambda x: gr.HTML(size=x), inputs=size_slider, outputs=star_rating)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/star_rating_props/run.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/star_rating_templates/run.py | import gradio as gr
with gr.Blocks() as demo:
star_rating = gr.HTML(
value=3,
html_template="""
<h2>Star Rating:</h2>
${Array.from({length: 5}, (_, i) => `<img class='${i < value ? '' : 'faded'}' src='https://upload.wikimedia.org/wikipedia/commons/d/df/Award-star-gold-3d.svg'>`).join('')}""",
css_template="""
img { height: 50px; display: inline-block; }
.faded { filter: grayscale(100%); opacity: 0.3; }
""")
rating_slider = gr.Slider(0, 5, 3, step=1, label="Select Rating")
rating_slider.change(fn=lambda x: x, inputs=rating_slider, outputs=star_rating)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/star_rating_templates/run.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/super_html/run.py | import os
import gradio as gr
with gr.Blocks() as demo:
gr.Markdown("""
# Simple HTML usecase
This is the classic `gr.HTML` usecase where we just want to render some static HTML.
""")
simple_html = gr.HTML("<h1 style='color:purple;' id='simple'>Hello, World!</h1>")
gr.Markdown("""
# Templated HTML usecase
'value' can now be anything, and it can be used inside the `html_template` using `${value}` syntax.
Note that when used as output or input, `value` is just this specific value rather than the entire HTML.
""")
with gr.Row():
name1 = gr.Textbox(label="Name")
templated_html = gr.HTML(
"",
html_template="<h1>Hello, {{value}}! ${value.length} letters</h1>",
elem_id="templated",
)
name1.change(lambda x: x, inputs=name1, outputs=templated_html)
gr.Markdown("""
# Additional Props
You are not limited to using `${value}` in the templates, you can add any number of custom tags to the template, and pass them to the component as keyword arguments. These props can be updated via python event listeners as well.
""")
with gr.Row():
templated_html_props = gr.HTML(
"John",
html_template="""
<h1 style="font-size: ${fontSize}px;">Hello, ${value}!</h1>
""",
fontSize=30,
elem_id="props",
)
slider = gr.Slider(10, 100, value=30, label="Font Size")
slider.change(
lambda x: gr.HTML(fontSize=x), inputs=slider, outputs=templated_html_props
)
gr.Markdown("""
# CSS Templating
We can also template CSS, which is automatically scoped to the component.
""")
with gr.Row():
name2 = gr.Textbox(label="Person")
color = gr.ColorPicker(label="Text Color", value="#00ff00")
bold = gr.Checkbox(label="Bold Text", value=True)
templated_html_css = gr.HTML(
["J", "o", "h", "n"],
html_template="""
<h1>Hello, ${value.join('')}!</h1>
<ul>
{{#each value}}
<li>{{this}}</li>
{{/each}}
</ul>
""",
css_template="""
h1, li {
color: ${color};
font-weight: ${bold ? 'bold' : 'normal'};
}
""",
color="green",
bold=True,
elem_id="css",
)
with gr.Row():
btn = gr.Button("Update HTML")
btn_blue = gr.Button("Make HTML Blue")
def update_templated_html_css(name, color, bold):
return gr.HTML(value=list(name), color=color, bold=bold)
btn.click(
update_templated_html_css,
inputs=[name2, color, bold],
outputs=templated_html_css,
)
btn_blue.click(lambda: gr.HTML(color="blue"), outputs=templated_html_css)
gr.Markdown("""
# JS Prop Updates
We can now trigger events from gr.HTML using event listeners in `js_on_load`. This script has access to `element` which refers to the parent element, and `trigger(event_name)` or `trigger(event_name, event_data)`, which can be used to dispatch events.
""")
button_set = gr.HTML(
html_template="""
<button id='A'>A</button>
<button id='B'>B</button>
<button id='C'>C</button>
""",
css_template="""
button {
padding: 10px;
background-color: red;
}
""",
js_on_load="""
const buttons = element.querySelectorAll('button');
buttons.forEach(button => {
button.addEventListener('click', () => {
trigger('click', {clicked: button.innerText});
});
});
""",
elem_id="button_set",
)
clicked_box = gr.Textbox(label="Clicked")
def on_button_click(evt: gr.EventData):
return evt.clicked
button_set.click(on_button_click, outputs=clicked_box)
gr.Markdown("""
# JS Prop Changes
You can also update `value` or any other prop of the component from JS using `props`, e.g., `props.value = "new value"` will update the `value` prop and re-render the HTML template.
""")
form = gr.HTML(
html_template="""
<input type="text" value="${value}" id="text-input" />
<p>${value.length} letters</p>
<button class="submit" style="display: ${valid ? 'block' : 'none'};">submit</button>
<button class="clear">clear</button>
""",
js_on_load="""
const input = element.querySelector('input');
const submit_button = element.querySelector('button.submit');
const clear_button = element.querySelector('button.clear');
input.addEventListener('input', () => {
props.valid = input.value.length > 5;
props.value = input.value;
});
submit_button.addEventListener('click', () => {
trigger('submit');
});
clear_button.addEventListener('click', () => {
props.value = "";
props.valid = false;
trigger('clear');
});
""",
valid=False,
elem_id="form",
)
output_box = gr.Textbox(label="Output Box")
form.submit(lambda x: x, form, outputs=output_box)
output_box.submit(lambda x: x, output_box, outputs=form)
gr.Markdown("""
# Extending gr.HTML for new Components
You can create your own Components by extending the gr.HTML class.
""")
class ListComponent(gr.HTML):
def __init__(self, container=True, label="List", ordered=False, **kwargs):
self.ordered = ordered
super().__init__(
html_template="""
<h2>${label}</h2>
${ordered ? `<ol>` : `<ul>`}
${value.map(item => `<li>${item}</li>`).join('')}
${ordered ? `</ol>` : `</ul>`}
""",
container=container,
label=label,
ordered=ordered,
**kwargs,
)
l1 = ListComponent(
label="Fruits", value=["Apple", "Banana", "Cherry"], elem_id="fruits"
)
l2 = ListComponent(
label="Vegetables",
value=["Carrot", "Broccoli", "Spinach"],
elem_id="vegetables",
)
make_ordered_btn = gr.Button("Make Ordered")
make_unordered_btn = gr.Button("Make Unordered")
make_ordered_btn.click(
lambda: [ListComponent(ordered=True), ListComponent(ordered=True)],
outputs=[l1, l2],
)
make_unordered_btn.click(
lambda: [ListComponent(ordered=False), ListComponent(ordered=False)],
outputs=[l1, l2],
)
failed_template = gr.HTML(
value=None,
html_template="""
${Zalue}
""",
)
gr.Markdown("""
# File Upload via gr.HTML
The `upload` async function is available in `js_on_load`. It takes a JavaScript `File` object,
uploads it to the Gradio server, and returns the server-side file path as a string.
""")
upload_html = gr.HTML(
html_template="""
<div>
<input type="file" id="html-file-input" />
<button id="html-upload-btn" style="margin-left: 8px; padding: 4px 8px;">Upload</button>
<p id="html-upload-status">No file uploaded yet.</p>
</div>
""",
js_on_load="""
const input = element.querySelector('#html-file-input');
const btn = element.querySelector('#html-upload-btn');
const status = element.querySelector('#html-upload-status');
btn.addEventListener('click', async () => {
const file = input.files[0];
if (!file) {
status.textContent = 'Please select a file first.';
return;
}
status.textContent = 'Uploading...';
try {
const { path, url } = await upload(file);
status.textContent = 'Uploaded: ' + path;
trigger('upload', { path: path, url: url, name: file.name });
} catch (e) {
status.textContent = 'Upload failed: ' + e.message;
}
});
""",
elem_id="upload_html"
)
upload_result = gr.Textbox(label="Upload Result", elem_id="upload_result")
def on_html_upload(evt: gr.EventData):
return evt.path
upload_html.upload(on_html_upload, outputs=upload_result)
class TodoList(gr.HTML):
def __init__(
self,
value: list[str] | None = None,
completed: list[int] | None = None,
**kwargs,
):
self.completed = completed or []
super().__init__(
html_template="""
<h2>Todo List</h2>
<ul>
${value.map((item, index) => `
<li style="text-decoration: ${completed.includes(index) ? 'line-through' : 'none'};">
<input type="checkbox" ${completed.includes(index) ? 'checked' : ''} data-index="${index}" />
${item}
</li>
`).join('')}
</ul>
""",
js_on_load="""
const checkboxes = element.querySelectorAll('input[type="checkbox"]');
checkboxes.forEach(checkbox => {
checkbox.addEventListener('change', () => {
const index = parseInt(checkbox.getAttribute('data-index'));
let completed = props.completed || [];
if (checkbox.checked) {
if (!completed.includes(index)) {
completed.push(index);
}
} else {
completed = completed.filter(i => i !== index);
}
props.completed = [...completed];
console.log(JSON.stringify(props.completed))
});
});
""",
completed=self.completed,
value=value,
**kwargs,
)
todo_list = TodoList(
value=["Buy groceries", "Walk the dog", "Read a book"],
completed=[1],
elem_id="todo",
)
gr.Markdown("""
# HTML Children
Use `@children` in `html_template` to render child components inside the HTML wrapper.
""")
with gr.HTML(
html_template="""
<h2>${title}</h2>
@children
<button class="send">Send</button>
""",
css_template="""
border: 2px solid gray;
border-radius: 8px;
padding: 16px;
""",
js_on_load="""
element.querySelector('.send').addEventListener('click', () => {
trigger('submit');
});
""",
title="Contact Form",
elem_id="children_form",
) as children_form:
children_name = gr.Textbox(label="Your Name")
children_email = gr.Textbox(label="Your Email")
children_output = gr.Textbox(label="Children Output")
children_form.submit(
lambda name, email: f"Name: {name}, Email: {email}",
inputs=[children_name, children_email],
outputs=children_output,
)
gr.Markdown("""
# Server Functions
You can call Python functions from `js_on_load` using the `server` object. Pass a list of functions via `server_functions` and they become available as async methods on the `server` object in your JavaScript code.
""")
def list_directory(path):
try:
items = sorted(os.listdir(path))
return [
{"name": item, "is_dir": os.path.isdir(os.path.join(path, item))}
for item in items[:20]
]
except (FileNotFoundError, PermissionError):
return []
server_fn_html = gr.HTML(
value=os.path.dirname(__file__),
html_template="""
<div>
<p>Directory: <strong>${value}</strong></p>
<div id='server-fn-tree'></div>
<button id='server-fn-load'>Load Files</button>
</div>
""",
js_on_load="""
const loadBtn = element.querySelector('#server-fn-load');
const tree = element.querySelector('#server-fn-tree');
loadBtn.addEventListener('click', async () => {
tree.innerHTML = '<em>Loading...</em>';
const items = await server.list_directory(props.value);
tree.innerHTML = '';
items.forEach(item => {
const el = document.createElement('div');
el.textContent = (item.is_dir ? '📁 ' : '📄 ') + item.name;
el.className = 'server-fn-item';
tree.appendChild(el);
});
});
""",
css_template="""
#server-fn-tree { padding: 8px; min-height: 20px; }
.server-fn-item { padding: 2px 8px; }
#server-fn-load { padding: 6px 12px; margin-top: 8px; }
""",
server_functions=[list_directory],
elem_id="server_fns",
)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/super_html/run.py",
"license": "Apache License 2.0",
"lines": 348,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:gradio/components/custom_html_components/audio_gallery.py | import gradio as gr
class AudioGallery(gr.HTML):
def __init__(
self,
audio_urls: list[str],
*,
value: str | None = None,
labels: list[str] | None = None,
columns: int = 3,
label: str | None = None,
**kwargs,
):
self.audio_urls = audio_urls
html_template = """
<div class="audio-gallery-container">
${label ? `<label class="container-label">${label}</label>` : ''}
<div class="audio-gallery-grid" style="grid-template-columns: repeat(${columns}, 1fr);">
${audio_urls.map((url, i) => `
<div class="audio-item" data-index="${i}">
<div class="audio-label">${labels && labels[i] ? labels[i] : 'Audio ' + (i + 1)}</div>
<canvas class="waveform-canvas" data-url="${url}" width="300" height="80"></canvas>
<audio src="${url}" preload="metadata" ${value === url ? 'data-selected="true"' : ''}></audio>
<div class="audio-controls">
<button class="play-btn">▶</button>
<div class="time-display">0:00</div>
</div>
</div>
`).join('')}
</div>
</div>
"""
css_template = """
.audio-gallery-container { padding: var(--spacing-lg); }
.container-label { display: block; margin-bottom: var(--spacing-md); font-weight: 600; }
.audio-gallery-grid { display: grid; gap: var(--spacing-lg); }
.audio-item { border: 2px solid var(--border-color-primary); border-radius: var(--radius-md); padding: var(--spacing-md); cursor: pointer; transition: all 0.2s; }
.audio-item:hover { border-color: var(--color-accent); box-shadow: 0 2px 8px rgba(0,0,0,0.1); }
.audio-item[data-selected="true"] { border-color: var(--color-accent); background-color: var(--background-fill-secondary); }
.audio-label { margin-bottom: 8px; text-align: center; }
.waveform-canvas { width: 100%; height: 80px; background: var(--background-fill-secondary); margin-bottom: 8px; }
.audio-controls { display: flex; align-items: center; gap: 8px; }
.play-btn { width: 32px; height: 32px; border-radius: 50%; border: none; background: var(--color-accent); color: white; cursor: pointer; }
.play-btn:hover { opacity: 0.8; }
.time-display { font-size: 12px; }
"""
js_on_load = """
const audioItems = element.querySelectorAll('.audio-item');
audioItems.forEach((item, index) => {
const canvas = item.querySelector('.waveform-canvas');
const audio = item.querySelector('audio');
const playBtn = item.querySelector('.play-btn');
const timeDisplay = item.querySelector('.time-display');
const ctx = canvas.getContext('2d');
drawWaveform(canvas, ctx);
item.addEventListener('click', (e) => {
if (e.target === playBtn) return;
audioItems.forEach(i => i.removeAttribute('data-selected'));
item.setAttribute('data-selected', 'true');
props.value = audio.src;
});
playBtn.addEventListener('click', (e) => {
e.stopPropagation();
if (audio.paused) {
document.querySelectorAll('.audio-item audio').forEach(a => a.pause());
document.querySelectorAll('.play-btn').forEach(b => b.textContent = '▶');
audio.play();
playBtn.textContent = '⏸';
} else {
audio.pause();
playBtn.textContent = '▶';
}
});
audio.addEventListener('timeupdate', () => {
const currentTime = Math.floor(audio.currentTime);
const minutes = Math.floor(currentTime / 60);
const seconds = currentTime % 60;
timeDisplay.textContent = `${minutes}:${seconds.toString().padStart(2, '0')}`;
const progress = audio.currentTime / audio.duration;
drawWaveform(canvas, ctx, progress);
});
audio.addEventListener('ended', () => {
playBtn.textContent = '▶';
drawWaveform(canvas, ctx, 0);
});
});
function drawWaveform(canvas, ctx, progress = 0) {
const width = canvas.width;
const height = canvas.height;
const bars = 50;
const barWidth = width / bars;
ctx.clearRect(0, 0, width, height);
for (let i = 0; i < bars; i++) {
const barHeight = (Math.sin(i * 0.5) * 0.3 + Math.random() * 0.7) * height * 0.8;
const x = i * barWidth;
const y = (height - barHeight) / 2;
ctx.fillStyle = i / bars < progress ? '#FF7C00' : '#ccc';
ctx.fillRect(x, y, barWidth - 2, barHeight);
}
}
"""
super().__init__(
value=value or (audio_urls[0] if audio_urls else None),
html_template=html_template,
css_template=css_template,
js_on_load=js_on_load,
audio_urls=audio_urls,
labels=labels,
columns=columns,
label=label,
apply_default_css=False,
**kwargs,
)
def api_info(self):
return {
"type": "string",
"title": "Audio URL",
}
if __name__ == "__main__":
with gr.Blocks() as demo:
gr.Markdown("# Audio Gallery Demo")
gallery = AudioGallery(
audio_urls=[
"https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav",
"https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample-1-4.wav",
"https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/audio/cantina.wav",
"https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/audio/recording1.wav",
"https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/audio/heath_ledger.mp3",
"https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/audio/cate_blanch.mp3",
],
labels=[
"Sample 1",
"Sample 2",
"Cantina",
"Recording",
"Heath Ledger",
"Cate Blanchett",
],
columns=3,
label="Select an audio file",
)
output = gr.Textbox(label="Selected Audio URL")
gr.Interface(
fn=lambda x: x,
inputs=gallery,
outputs=output,
)
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/components/custom_html_components/audio_gallery.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:gradio/components/custom_html_components/colored_checkbox_group.py | import gradio as gr
class ColoredCheckboxGroup(gr.HTML):
def __init__(
self,
choices: list[str],
*,
value: list[str] | None = None,
colors: list[str],
label: str | None = None,
**kwargs,
):
html_template = """
<div class="colored-checkbox-container">
${label ? `<label class="container-label">${label}</label>` : ''}
<div class="colored-checkbox-group">
${choices.map((choice, i) => `
<label class="checkbox-label" data-color-index="${i}">
<input type="checkbox" value="${choice}" ${(value || []).includes(choice) ? 'checked' : ''}>
${choice}
</label>
`).join('')}
</div>
</div>
"""
css_template = """
.colored-checkbox-container {
border: 1px solid var(--border-color-primary);
border-radius: var(--radius-lg);
padding: var(--spacing-lg);
}
.container-label { display: block; margin-bottom: var(--spacing-md); }
.colored-checkbox-group { display: flex; flex-direction: column; gap: 6px; }
.checkbox-label { display: flex; align-items: center; cursor: pointer; }
.checkbox-label input { margin-right: 8px; }
${choices.map((choice, i) => `.checkbox-label[data-color-index="${i}"] { color: ${colors[i]}; }`).join(' ')}
"""
js_on_load = """
const checkboxes = element.querySelectorAll('input[type="checkbox"]');
checkboxes.forEach(checkbox => {
checkbox.addEventListener('change', () => {
props.value = Array.from(checkboxes)
.filter(cb => cb.checked)
.map(cb => cb.value);
});
});
"""
super().__init__(
value=value or [],
html_template=html_template,
css_template=css_template,
js_on_load=js_on_load,
choices=choices,
colors=colors,
label=label,
**kwargs,
)
def api_info(self):
return {
"items": {"enum": self.props["choices"], "type": "string"}, # type: ignore
"title": "Checkbox Group",
"type": "array",
}
if __name__ == "__main__":
def update_colors(color: str):
if color.startswith("rgb"):
rgb_values = (
color.replace("rgba", "").replace("rgb", "").strip("()").split(",")
)
r, g, b = (
int(float(rgb_values[0])),
int(float(rgb_values[1])),
int(float(rgb_values[2])),
)
medium = f"#{r:02x}{g:02x}{b:02x}"
else:
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
medium = color
dark = f"#{int(r * 0.6):02x}{int(g * 0.6):02x}{int(b * 0.6):02x}"
light = f"#{int(r + (255 - r) * 0.4):02x}{int(g + (255 - g) * 0.4):02x}{int(b + (255 - b) * 0.4):02x}"
return ColoredCheckboxGroup(
choices=["a", "b", "c"],
colors=[dark, medium, light],
label="Select options",
)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
cp = gr.ColorPicker(value="#FF0000")
with gr.Column(scale=2):
cg = ColoredCheckboxGroup(
choices=["a", "b", "c"],
colors=["#990000", "#FF0000", "#FF6666"],
label="Select options",
)
gr.Interface(
fn=lambda x: " ".join(x),
inputs=cg,
outputs=gr.Textbox(label="output"),
)
cp.change(update_colors, inputs=cp, outputs=cg, show_progress="hidden")
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/components/custom_html_components/colored_checkbox_group.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/validators.py | from typing import TYPE_CHECKING, Any
from gradio_client.documentation import document
if TYPE_CHECKING:
import numpy as np
@document()
def is_audio_correct_length(
audio: tuple[int, "np.ndarray"], min_length: float | None, max_length: float | None
) -> dict[str, Any]:
"""
Validates that the audio length is within the specified min and max length (in seconds).
Parameters:
audio: A tuple of (sample rate in Hz, audio data as numpy array).
min_length: Minimum length of audio in seconds. If None, no minimum length check is performed.
max_length: Maximum length of audio in seconds. If None, no maximum length check is performed.
Returns:
A dict corresponding to `gr.validate()` indicating whether the audio length is valid and an optional message.
"""
if min_length is not None or max_length is not None:
sample_rate, data = audio
duration = len(data) / sample_rate
if min_length is not None and duration < min_length:
return {
"__type__": "validate",
"is_valid": False,
"message": f"Audio is too short. It must be at least {min_length} seconds",
}
if max_length is not None and duration > max_length:
return {
"__type__": "validate",
"is_valid": False,
"message": f"Audio is too long. It must be at most {max_length} seconds",
}
return {"__type__": "validate", "is_valid": True}
@document()
def is_video_correct_length(
video: str, min_length: float | None, max_length: float | None
) -> dict[str, Any]:
"""
Validates that the video file length is within the specified min and max length (in seconds).
Parameters:
video: The path to the video file.
min_length: Minimum length of video in seconds. If None, no minimum length check is performed.
max_length: Maximum length of video in seconds. If None, no maximum length check is performed.
Returns:
A dict corresponding to `gr.validate()` indicating whether the audio length is valid and an optional message.
"""
from gradio.processing_utils import get_video_length
if min_length is not None or max_length is not None:
duration = get_video_length(video)
if min_length is not None and duration < min_length:
return {
"__type__": "validate",
"is_valid": False,
"message": f"Video is too short. It must be at least {min_length} seconds",
}
if max_length is not None and duration > max_length:
return {
"__type__": "validate",
"is_valid": False,
"message": f"Video is too long. It must be at most {max_length} seconds",
}
return {"__type__": "validate", "is_valid": True}
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/validators.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:scripts/benchmark_latency_api.py | import time
import gradio as gr
from gradio_client import Client
import threading
SHOW_RESULTS = False
with gr.Blocks() as demo:
input = gr.Textbox(label="Input")
output = gr.Textbox(label="Output")
input.change(lambda x: x*2, input, output)
_, url, _ = demo.launch(prevent_thread_lock=True)
client = Client(url, verbose=False)
times = []
for _ in range(25):
start = time.time()
result = client.predict("Hello")
end = time.time()
times.append(end - start)
if SHOW_RESULTS:
print("Serial result was", result)
print(f"Serial average: {sum(times) / len(times)} seconds")
parallel_times = []
results = []
lock = threading.Lock()
def make_request():
start = time.time()
result = client.predict("Hello")
end = time.time()
with lock:
parallel_times.append(end - start)
results.append(result)
threads = []
for _ in range(25):
t = threading.Thread(target=make_request)
threads.append(t)
t.start()
for t in threads:
t.join()
if SHOW_RESULTS:
print("Parallel result was", results[0] if results else None)
print(f"Parallel average: {sum(parallel_times) / len(parallel_times)} seconds") | {
"repo_id": "gradio-app/gradio",
"file_path": "scripts/benchmark_latency_api.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:scripts/benchmark_latency_mcp.py | import asyncio
import time
from mcp import ClientSession
from mcp.client.streamable_http import streamablehttp_client
import gradio as gr
SHOW_RESULTS = False
with gr.Blocks() as demo:
input = gr.Textbox(label="Input")
output = gr.Textbox(label="Output")
def double(word: str) -> str:
return word * 2
input.change(double, input, output, api_name="predict")
_, url, _ = demo.launch(prevent_thread_lock=True, mcp_server=True)
mcp_url = f"{url}gradio_api/mcp/"
async def make_serial_requests():
times = []
async with streamablehttp_client(mcp_url) as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
tools = await session.list_tools()
tool_name = tools.tools[0].name
for _ in range(5):
start = time.time()
result = await session.call_tool(tool_name, arguments={"word": "Hello"})
end = time.time()
times.append(end - start)
if SHOW_RESULTS:
print("Serial result was: ", result.content[0].text)
print(f"Serial average: {sum(times) / len(times)} seconds")
asyncio.run(make_serial_requests())
async def make_serial_requests_with_progress():
times = []
progress_counts = []
async with streamablehttp_client(mcp_url) as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
tools = await session.list_tools()
tool_name = tools.tools[0].name
for _ in range(5):
progress_updates = []
async def progress_callback(progress: float, total: float | None, message: str | None):
progress_updates.append({"progress": progress, "total": total, "message": message})
start = time.time()
result = await session.call_tool(
tool_name,
arguments={"word": "Hello"},
progress_callback=progress_callback,
meta={"progressToken": f"progress-token-{_}"}
)
end = time.time()
times.append(end - start)
progress_counts.append(len(progress_updates))
if SHOW_RESULTS:
print("Serial with progress result was: ", result.content[0].text)
print(f"Serial with progress average: {sum(times) / len(times)} seconds")
print(f"Average progress notifications received: {sum(progress_counts) / len(progress_counts)}")
asyncio.run(make_serial_requests_with_progress())
async def make_parallel_requests():
parallel_times = []
results = []
async def make_request():
async with streamablehttp_client(mcp_url) as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
tools = await session.list_tools()
tool_name = tools.tools[0].name
start = time.time()
result = await session.call_tool(tool_name, arguments={"word": "Hello"})
end = time.time()
parallel_times.append(end - start)
results.append(result)
tasks = [make_request() for _ in range(25)]
await asyncio.gather(*tasks)
if SHOW_RESULTS:
print("Parallel result was: ", results[0].content[0].text)
print(f"Parallel average: {sum(parallel_times) / len(parallel_times)} seconds")
asyncio.run(make_parallel_requests())
| {
"repo_id": "gradio-app/gradio",
"file_path": "scripts/benchmark_latency_mcp.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:test/test_component_props.py | """Tests for the component props feature."""
import gradio as gr
from gradio import helpers
def test_special_args_detects_component_type_hints():
"""Test that special_args detects when a parameter is type-hinted with a component."""
def func_with_component_hint(x: gr.Number):
return x.value * 2
def func_without_hint(x):
return x * 2
# Test that component type hint is detected
_, _, _, component_prop_indices = helpers.special_args(func_with_component_hint)
assert 0 in component_prop_indices
# Test that no component type hint is detected
_, _, _, component_prop_indices = helpers.special_args(func_without_hint)
assert len(component_prop_indices) == 0
def test_special_args_creates_namespace_with_props():
"""Test that special_args creates a namespace object with component props."""
def func(x: gr.Number):
return x.value
component_props = {
0: {"value": 5, "minimum": 0, "maximum": 10, "label": "Test Number"}
}
inputs, *_ = helpers.special_args(func, inputs=[5], component_props=component_props)
# Check that the input is a SimpleNamespace with all props
assert hasattr(inputs[0], "value")
assert hasattr(inputs[0], "minimum")
assert hasattr(inputs[0], "maximum")
assert hasattr(inputs[0], "label")
assert inputs[0].value == 5
assert inputs[0].minimum == 0
assert inputs[0].maximum == 10
assert inputs[0].label == "Test Number"
def test_mixed_type_hints():
"""Test that we can mix component type hints with regular parameters."""
def func(a: gr.Number, b, c: gr.Textbox):
return a.value + b + len(c.value)
_, _, _, component_prop_indices = helpers.special_args(func)
assert 0 in component_prop_indices # a
assert 1 not in component_prop_indices # b
assert 2 in component_prop_indices # c
def test_block_function_stores_component_prop_inputs():
"""Test that BlockFunction correctly stores component_prop_inputs."""
from gradio.block_function import BlockFunction
def func(x: gr.Number):
return x.value
block_fn = BlockFunction(
fn=func,
inputs=[],
outputs=[],
preprocess=True,
postprocess=True,
inputs_as_dict=False,
targets=[],
_id=0,
component_prop_inputs=[0, 2],
)
assert block_fn.component_prop_inputs == [0, 2]
assert block_fn.get_config()["component_prop_inputs"] == [0, 2]
def test_component_props_in_blocks():
"""Test the full integration of component props in a Blocks app."""
with gr.Blocks() as demo:
a = gr.Number(value=5, minimum=0, maximum=10)
b = gr.Number()
def double_with_props(x: gr.Number):
# Should receive namespace with all props
return x.value * 2
a.submit(double_with_props, a, b)
# Check that the function has component_prop_inputs set
fn = demo.fns[0]
assert 0 in fn.component_prop_inputs
| {
"repo_id": "gradio-app/gradio",
"file_path": "test/test_component_props.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
gradio-app/gradio:scripts/sync_frontend.py | from __future__ import annotations
import json
import pathlib
from huggingface_hub import upload_folder
def copy_js_code(root: str | pathlib.Path, hf_token: str | None = None):
NOT_COMPONENT = [
"app",
"node_modules",
"storybook",
"playwright-report",
"workbench",
"tooltils",
"component-test",
"core",
"spa",
]
print("COPYING JS CODE TO gradio/_frontend_code/")
version = json.load(open(pathlib.Path(root) / "gradio" / "package.json"))["version"]
for entry in (pathlib.Path(root) / "js").iterdir():
if (
entry.is_dir()
and not str(entry.name).startswith("_")
and str(entry.name) not in NOT_COMPONENT
):
print("entry:", entry)
upload_folder(
repo_id="gradio/frontend",
repo_type="dataset",
folder_path=str(entry),
path_in_repo=f"{version}/{entry.name}",
ignore_patterns=[
"CHANGELOG*",
"README.md",
"*/node_modules/*", # Matches content within node_modules folders
"*.test.*",
"*.stories.*",
"*.spec.*",
".svelte-kit/*",
"dist/**",
],
allow_patterns=[
"*.ts",
"*.svelte",
"*.json",
"**/*.ts",
"**/*.svelte",
"**/*.json",
],
token=hf_token,
)
upload_folder(
repo_id="gradio/frontend",
repo_type="dataset",
folder_path=str(pathlib.Path(root) / "client" / "js"),
path_in_repo=f"{version}/client",
ignore_patterns=[
"CHANGELOG*",
"README.md",
"*/node_modules/*", # Matches content within node_modules folders
"*.test.*",
"*.stories.*",
"*.spec.*",
".svelte-kit/*",
"dist/**",
],
allow_patterns=[
"*.ts",
"*.svelte",
"*.json",
"**/*.ts",
"**/*.svelte",
"**/*.json",
],
token=hf_token,
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Sync Frontend code to dataset")
parser.add_argument("hf_token", type=str, help="HF API token")
args = parser.parse_args()
current_dir = pathlib.Path(__file__).parent.resolve()
copy_js_code((current_dir / "..").resolve(), hf_token=args.hf_token)
| {
"repo_id": "gradio-app/gradio",
"file_path": "scripts/sync_frontend.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/tab_render_children/run.py | import gradio as gr
with gr.Blocks() as demo:
gr.Markdown("## Tab render_children parameter")
with gr.Tabs():
with gr.Tab("Tab 1") as tab1:
gr.Markdown("This tab is visible by default")
with gr.Tab("Tab 2", render_children=True) as tab2:
tb = gr.Textbox(label="Will be rendered but hidden", elem_id="invisible-but-rendered")
tb2 = gr.Textbox(label="Will not be rendered", elem_id="invisible-and-not-rendered", visible=False)
tb3 = gr.Textbox(label="Will be rendered but hidden with visible='hidden'", elem_id="visibility-hidden", visible="hidden")
btn = gr.Button("Make textbox interactive", variant="primary")
btn.click(lambda: gr.update(interactive=True), None, tb)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/tab_render_children/run.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/media.py | """
Media Registry for Gradio Demos
This module provides a centralized way to access media files.
Usage:
from gradio.media import get_image, get_video, get_audio, get_model3d, get_file
# Get specific media files
cheetah_img = get_image("cheetah1.jpg")
world_video = get_video("world.mp4")
cantina_audio = get_audio("cantina.wav")
bunny_model = get_model3d("Bunny.obj")
titanic_data = get_file("titanic.csv")
# Get random media of a type
random_img = get_image()
random_video = get_video()
random_audio = get_audio()
"""
import random
from pathlib import Path
from typing import Optional
MEDIA_ROOT = Path(__file__).parent / "media_assets"
MEDIA_PATHS = [
MEDIA_ROOT / "images",
MEDIA_ROOT / "videos",
MEDIA_ROOT / "audio",
MEDIA_ROOT / "models3d",
MEDIA_ROOT / "data",
]
def _get_media_path(media_type: str, filename: Optional[str] = None) -> str:
"""
Internal function to get the path to a media file.
Args:
media_type: Type of media (images, videos, audio, models3d, data)
filename: Optional filename of the media file. If None, returns a random file.
Returns:
Absolute path to the media file
Raises:
ValueError: If media_type is invalid
FileNotFoundError: If the media file doesn't exist
"""
media_dir = MEDIA_ROOT / media_type
if not media_dir.exists():
raise ValueError(f"Media directory not found: {media_dir}")
if filename is None:
# Get a random file from the directory
media_files = list(media_dir.glob("*"))
if not media_files:
raise ValueError(f"No media files found in {media_dir}")
file_path = random.choice(media_files)
else:
if filename.startswith(("http://", "https://")):
return filename
file_path = media_dir / filename
if not file_path.exists():
raise FileNotFoundError(f"Media file not found: {file_path}")
return str(file_path.absolute())
def get_image(filename: Optional[str] = None) -> str:
"""
Get path to an image file.
Args:
filename: Filename of the image (e.g., "tower.jpg"). If None, returns a random image.
Returns:
Absolute path to the image file
Examples:
>>> get_image("tower.jpg") # Get specific image
>>> get_image() # Get random image
"""
return _get_media_path("images", filename)
def get_video(filename: Optional[str] = None) -> str:
"""
Get path to a video file.
Args:
filename: Filename of the video (e.g., "world.mp4"). If None, returns a random video.
Returns:
Absolute path to the video file
Examples:
>>> get_video("world.mp4") # Get specific video
>>> get_video() # Get random video
"""
return _get_media_path("videos", filename)
def get_audio(filename: Optional[str] = None) -> str:
"""
Get path to an audio file.
Args:
filename: Filename of the audio (e.g., "cantina.wav"). If None, returns a random audio file.
Returns:
Absolute path to the audio file
Examples:
>>> get_audio("cantina.wav") # Get specific audio
>>> get_audio() # Get random audio
"""
return _get_media_path("audio", filename)
def get_model3d(filename: Optional[str] = None) -> str:
"""
Get path to a 3D model file.
Args:
filename: Filename of the 3D model (e.g., "Duck.glb"). If None, returns a random model.
Returns:
Absolute path to the 3D model file
Examples:
>>> get_model3d("Duck.glb") # Get specific model
>>> get_model3d() # Get random 3D model
"""
return _get_media_path("models3d", filename)
def get_file(filename: Optional[str] = None) -> str:
"""
Get path to a data file (CSV, JSON, text, etc.).
Args:
filename: Filename of the data file (e.g., "titanic.csv"). If None, returns a random file.
Returns:
Absolute path to the data file
Examples:
>>> get_file("titanic.csv") # Get specific file
>>> get_file() # Get random data file
"""
return _get_media_path("data", filename)
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/media.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/chatbot_feedback/run.py | import gradio as gr
def test_liked_loading():
test_history = [
{"role": "user", "content": "test user message"},
{"role": "assistant", "content": "test assistant message"}
]
# Set feedback_value to ["Like"] for the assistant message
return gr.update(value=test_history, feedback_value=["Like"])
with gr.Blocks() as demo:
chatbot = gr.Chatbot(
resizable=True,
min_height=500,
layout="bubble",
)
chatbot.like(
lambda: None,
inputs=[],
outputs=None,
)
test_btn = gr.Button("Test Liked Loading")
test_btn.click(test_liked_loading, outputs=[chatbot])
if __name__ == "__main__":
demo.launch(debug=True)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/chatbot_feedback/run.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/cli/commands/load.py | from __future__ import annotations
import typer
from .load_chat import main as chat
load_app = typer.Typer(help="Load various types of interfaces and models")
load_app.command("chat", help="Launch a chat interface using OpenAI-compatible API")(
chat
)
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/cli/commands/load.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/cli/commands/load_chat.py | from __future__ import annotations
import os
import typer
from gradio.external import load_chat as load_chat_external
def main(
base_url: str = typer.Argument(
..., help="OpenAI-compatible base URL, e.g. http://localhost:11434/v1/"
),
model: str = typer.Argument(..., help="Model name, e.g. llama3.2"),
token: str | None = typer.Option(
None,
"--token",
"-t",
help="API key (defaults to $OPENAI_API_KEY if not provided)",
),
file_types: list[str] = typer.Option(
["text_encoded"],
"--file-types",
help="Repeatable option. Allowed values: text_encoded, image",
),
system_message: str | None = typer.Option(
None, "--system-message", help="Optional system prompt"
),
stream: bool = typer.Option(
True, "--stream/--no-stream", help="Enable or disable streaming"
),
host: str | None = typer.Option(
None, "--host", help="Server host (maps to launch.server_name)"
),
port: int = typer.Option(
7860, "--port", help="Server port (maps to launch.server_port)"
),
share: bool = typer.Option(
False, "--share/--no-share", help="Create a public share link"
),
) -> None:
"""Launch a chat interface using OpenAI-compatible API."""
resolved_token = token or os.getenv("OPENAI_API_KEY")
for ft in file_types:
if ft not in {"text_encoded", "image"}:
raise typer.BadParameter("file_types must be one of: text_encoded, image")
demo = load_chat_external(
base_url=base_url,
model=model,
token=resolved_token,
file_types=file_types,
system_message=system_message,
streaming=stream,
)
demo.launch(server_name=host, server_port=port, share=share)
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/cli/commands/load_chat.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/visibility_test/run.py | import gradio as gr
def toggle_visibility(choice, input_text):
"""Toggle visibility based on choice and return current input value."""
updates = {}
if choice == "Visible":
updates["textbox"] = gr.update(visible=True)
updates["button"] = gr.update(visible=True)
elif choice == "Hidden (in DOM)":
updates["textbox"] = gr.update(visible="hidden")
updates["button"] = gr.update(visible="hidden")
else: # "Not Visible (removed)"
updates["textbox"] = gr.update(visible=False)
updates["button"] = gr.update(visible=False)
return updates["textbox"], updates["button"], f"Current value: {input_text}"
def get_value(input_text):
"""Get the current value from the textbox."""
return f"Retrieved value: {input_text}"
def increment_counter(counter):
"""Increment counter to test event handling."""
return counter + 1
with gr.Blocks() as demo:
gr.Markdown("# Visibility Test Demo")
gr.Markdown(
"Test the three visibility states: visible=True, visible='hidden', visible=False"
)
with gr.Row():
visibility_radio = gr.Radio(
["Visible", "Hidden (in DOM)", "Not Visible (removed)"],
label="Choose visibility state",
value="Visible",
elem_id="visibility-radio",
)
with gr.Row():
with gr.Column():
textbox = gr.Textbox(
label="Test Input",
value="Initial text",
elem_id="test-textbox",
visible=True,
)
button = gr.Button("Get Value", elem_id="test-button", visible=True)
# Hidden counter for testing events on hidden elements
counter = gr.Number(value=0, visible="hidden", elem_id="counter")
increment_btn = gr.Button(
"Increment Counter",
elem_id="increment-button",
)
counter_result = gr.Textbox(
label="Counter Result", elem_id="counter-result"
)
with gr.Column():
status = gr.Textbox(label="Status", elem_id="status-output")
output = gr.Textbox(label="Output", elem_id="output-textbox")
# Wire up the events
visibility_radio.change(
toggle_visibility,
inputs=[visibility_radio, textbox],
outputs=[textbox, button, status],
)
button.click(get_value, inputs=textbox, outputs=output)
counter.change(
lambda x: f"Counter Result: {x}", inputs=counter, outputs=counter_result
)
increment_btn.click(increment_counter, inputs=counter, outputs=counter)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/visibility_test/run.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/validator_simple/run.py | import gradio as gr
def validate_input(age, location):
return [
gr.validate(not age or age > 3, "Age must be at least 3"),
gr.validate("london" not in location.lower(), "Location must not be in London"),
]
def process_text(age, location):
return f"Processed: {age} -- {location.upper()}"
with gr.Blocks() as demo:
gr.Markdown("# Validator Parameter Test Demo")
with gr.Row():
with gr.Column():
age = gr.Number(
label="Enter age",
placeholder="Enter age",
)
location = gr.Textbox(
max_lines=3,
label="Enter location",
placeholder="Enter location",
)
validate_btn = gr.Button("Process with Validation", variant="primary")
output_with_validation = gr.Textbox(
label="Output (with validation)", interactive=False
)
validate_btn.click(
fn=process_text,
validator=validate_input,
inputs=[age, location],
outputs=output_with_validation,
)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/validator_simple/run.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/walkthrough/run.py | import gradio as gr
with gr.Blocks() as demo:
with gr.Walkthrough(selected=0) as walkthrough:
with gr.Step("Image", id=0):
image = gr.Image()
btn = gr.Button("go to prompt")
btn.click(lambda: gr.Walkthrough(selected=1), outputs=walkthrough)
with gr.Step("Prompt", id=1):
prompt = gr.Textbox()
btn = gr.Button("generate")
btn.click(lambda: gr.Walkthrough(selected=2), outputs=walkthrough)
with gr.Step("Result", id=2):
gr.Image(label="result", interactive=False)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/walkthrough/run.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/walkthrough_many/run.py | import gradio as gr
def process_step(step_num):
return f"You are on step {step_num}"
with gr.Blocks() as demo:
gr.Markdown("# Stepper - Many Steps")
with gr.Walkthrough(selected=0) as walkthrough:
with gr.Step("Introduction", id=0):
gr.Markdown("This is the introduction step.")
output1 = gr.Textbox(label="Step 1 Output")
next1 = gr.Button("Next Step")
next1.click(lambda: gr.Walkthrough(selected=1), outputs=walkthrough)
with gr.Step("Basic Information", id=1):
gr.Markdown("Enter your basic information.")
output2 = gr.Textbox(label="Step 2 Output")
next2 = gr.Button("Next Step")
next2.click(lambda: gr.Walkthrough(selected=2), outputs=walkthrough)
with gr.Step("Preferences", id=2):
gr.Markdown("Set your preferences.")
output3 = gr.Textbox(label="Step 3 Output")
next3 = gr.Button("Next Step")
next3.click(lambda: gr.Walkthrough(selected=3), outputs=walkthrough)
with gr.Step("Advanced Settings", id=3):
gr.Markdown("Configure advanced settings.")
output4 = gr.Textbox(label="Step 4 Output")
next4 = gr.Button("Next Step")
next4.click(lambda: gr.Walkthrough(selected=4), outputs=walkthrough)
with gr.Step("Review", id=4):
gr.Markdown("Review your choices.")
output5 = gr.Textbox(label="Step 5 Output")
next5 = gr.Button("Next Step")
next5.click(lambda: gr.Walkthrough(selected=5), outputs=walkthrough)
with gr.Step("Confirmation", id=5):
gr.Markdown("Confirm and submit.")
output6 = gr.Textbox(label="Step 6 Output")
next6 = gr.Button("Next Step")
next6.click(lambda: gr.Walkthrough(selected=6), outputs=walkthrough)
with gr.Step("Additional Options", id=6):
gr.Markdown("Additional options if needed.")
output7 = gr.Textbox(label="Step 7 Output")
next7 = gr.Button("Next Step")
next7.click(lambda: gr.Walkthrough(selected=7), outputs=walkthrough)
with gr.Step("Final Step", id=7):
gr.Markdown("This is the final step!")
output8 = gr.Textbox(label="Step 8 Output")
gr.Button("Complete")
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/walkthrough_many/run.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/layouts/walkthrough.py | from __future__ import annotations
from gradio_client.documentation import document
from gradio.blocks import BlockContext
from gradio.component_meta import ComponentMeta
from gradio.events import Events
from gradio.i18n import I18nData
@document()
class Walkthrough(BlockContext, metaclass=ComponentMeta):
"""
Walkthrough is a layout element within Blocks that can contain multiple "Step" Components, which can be used to create a step-by-step workflow.
Example:
with gr.Walkthrough(selected=1) as walkthrough:
with gr.Step("Step 1", id=1):
btn = gr.Button("go to Step 2")
btn.click(lambda: gr.Walkthrough(selected=2), outputs=walkthrough)
with gr.Step("Step 2", id=2):
txt = gr.Textbox("Welcome to Step 2")
Guides: controlling-layout
Demos: walkthrough
"""
EVENTS = [Events.change, Events.select]
def __init__(
self,
*,
selected: int | None = None,
visible: bool = True,
elem_id: str | None = None,
elem_classes: list[str] | str | None = None,
render: bool = True,
key: int | str | tuple[int | str, ...] | None = None,
preserved_by_key: list[str] | str | None = None,
):
"""
Parameters:
selected: The currently selected step. Must be a number corresponding to the step number. Defaults to the first step.
visible: If False, Walkthrough will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles.
render: If False, this layout will not be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
key: in a gr.render, Components with the same key across re-renders are treated as the same component, not a new component. Properties set in 'preserved_by_key' are not reset across a re-render.
preserved_by_key: A list of parameters from this component's constructor. Inside a gr.render() function, if a component is re-rendered with the same key, these (and only these) parameters will be preserved in the UI (if they have been changed by the user or an event listener) instead of re-rendered based on the values provided during constructor.
"""
BlockContext.__init__(
self,
visible=visible,
elem_id=elem_id,
elem_classes=elem_classes,
render=render,
key=key,
preserved_by_key=preserved_by_key,
)
self.selected = selected
def get_block_name(self):
return "walkthrough"
@document()
class Step(BlockContext, metaclass=ComponentMeta):
"""
Step is a layout element. A step is a single step in a step-by-step workflow.
"""
EVENTS = [Events.select]
def __init__(
self,
label: str | I18nData | None = None,
visible: bool = True,
interactive: bool = True,
*,
id: int | None = None,
elem_id: str | None = None,
elem_classes: list[str] | str | None = None,
scale: int | None = None,
render: bool = True,
key: int | str | tuple[int | str, ...] | None = None,
preserved_by_key: list[str] | str | None = None,
):
"""
Parameters:
label: The visual label for the step
id: An optional numeric identifier for the step, required if you wish to control the selected step from a predict function. Must be a number.
elem_id: An optional string that is assigned as the id of the <div> containing the contents of the Step layout. The same string followed by "-button" is attached to the Step button. Can be used for targeting CSS styles.
elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles.
render: If False, this layout will not be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
scale: relative size compared to adjacent elements. 1 or greater indicates the Step will expand in size.
visible: If False, Step will be hidden.
interactive: If False, Step will not be clickable.
"""
BlockContext.__init__(
self,
elem_id=elem_id,
elem_classes=elem_classes,
render=render,
key=key,
preserved_by_key=preserved_by_key,
)
self.label = label
self.id = id
self.visible = visible
self.scale = scale
self.interactive = interactive
def get_expected_parent(self) -> type[Walkthrough]:
return Walkthrough
def get_block_name(self):
return "walkthroughstep"
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/layouts/walkthrough.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/iframe_resizer/run.py | import gradio as gr
import time
import os
from gradio import get_image
def greet():
gr.Info("Warning in 1 second")
time.sleep(1)
gr.Warning("Error in 1 second")
time.sleep(1)
raise Exception("test")
im = get_image("cheetah.jpg")
with gr.Blocks() as demo:
with gr.Tab("Accordions"):
with gr.Row(height=1500):
gr.Markdown("Scroll down to see UI.")
greet_btn = gr.Button("Trigger toast")
greet_btn.click(fn=greet)
with gr.Accordion("Accordion"):
gr.Markdown(
"""
## Accordion content
### Accordion content
#### Accordion content
##### Accordion content
###### Accordion content
"""
)
with gr.Tab("Images"):
gr.Image(value=im)
gr.Image(value=im)
gr.Image(value=im)
gr.Image(value=im)
gr.Image(value=im)
gr.Image(value=im)
gr.Image(value=im)
gr.Image(value=im)
gr.Image(value=im)
gr.Image(value=im)
gr.Image(value=im)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/iframe_resizer/run.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/validator_chatinterface/run.py | import gradio as gr
import time
def validate_input(x):
print("VALIDATE", x)
return gr.validate(x != "error", "Can't be error")
def do_chat(message, history):
for i in range(len(message)):
time.sleep(0.05)
yield "You typed: " + message[: i + 1]
demo = gr.ChatInterface(fn=do_chat, validator=validate_input, show_progress="full")
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/validator_chatinterface/run.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/validator_interface/run.py | import gradio as gr
def validate_input(age, location):
is_age_valid = True
is_location_valid = True
if not age or age < 3:
is_age_valid = False
if "london" in location.lower():
is_location_valid = False
return [
gr.validate(is_age_valid, "Age must be at least 3"),
gr.validate(is_location_valid, "Location must not be in London"),
]
def process_text(age, location):
result = f"Processed: {age} -- {location.upper()}"
return result
def validate_image(image):
# we don't want to error when a user is clearing the image
if not image:
return None
is_portrait = image.width < image.height
return gr.validate(is_portrait, "Image must be in portrait mode")
def process_image(image):
if not image:
return "No image uploaded"
return "HELLO IMAGE!!!"
def raise_error():
raise ValueError("test error")
with gr.Blocks() as demo:
with gr.Tab("Text"):
gr.Markdown("# Validator Parameter Test Demo")
with gr.Row():
with gr.Column():
age = gr.Number(
label="Enter age",
placeholder="Enter age",
)
location = gr.Textbox(
max_lines=3,
label="Enter location",
placeholder="Enter location",
)
validate_btn = gr.Button("Process with Validation", variant="primary")
output_with_validation = gr.Textbox(
label="Output (with validation)", interactive=False
)
validate_btn.click(
fn=process_text,
validator=validate_input,
inputs=[age, location],
outputs=output_with_validation,
)
with gr.Tab("Image"):
im = gr.Image(label="Enter image", placeholder="Enter image", type="pil")
t = gr.Textbox(label="Enter text", placeholder="Enter text")
im.change(
fn=process_image,
validator=validate_image,
inputs=im,
outputs=t,
)
with gr.Tab("Validation Error"):
error_btn = gr.Button("Raise Validation Error", variant="primary")
error_btn.click(
validator=raise_error,
fn=raise_error,
inputs=[],
outputs=[],
)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/validator_interface/run.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:gradio/block_function.py | from __future__ import annotations
import inspect
from collections.abc import Callable, Sequence
from typing import TYPE_CHECKING, Literal
from . import utils
try:
import spaces # type: ignore
except Exception:
spaces = None
if TYPE_CHECKING: # Only import for type checking (is False at runtime).
from gradio.components.base import Component
from gradio.renderable import Renderable
from .blocks import BlockContext
class BlockFunction:
def __init__(
self,
fn: Callable | None,
inputs: Sequence[Component | BlockContext],
outputs: Sequence[Component | BlockContext],
preprocess: bool,
postprocess: bool,
inputs_as_dict: bool,
targets: list[tuple[int | None, str]],
_id: int,
batch: bool = False,
max_batch_size: int = 4,
concurrency_limit: int | None | Literal["default"] = "default",
concurrency_id: str | None = None,
tracks_progress: bool = False,
api_name: str | None = None,
api_description: str | None | Literal[False] = None,
js: str | Literal[True] | None = None,
show_progress: Literal["full", "minimal", "hidden"] = "full",
show_progress_on: Sequence[Component] | None = None,
cancels: list[int] | None = None,
collects_event_data: bool = False,
trigger_after: int | None = None,
trigger_only_on_success: bool = False,
trigger_only_on_failure: bool = False,
trigger_mode: Literal["always_last", "once", "multiple"] = "once",
queue: bool = True,
scroll_to_output: bool = False,
api_visibility: Literal["public", "private", "undocumented"] = "public",
renderable: Renderable | None = None,
rendered_in: Renderable | None = None,
render_iteration: int | None = None,
is_cancel_function: bool = False,
connection: Literal["stream", "sse"] = "sse",
time_limit: float | None = None,
stream_every: float = 0.5,
event_specific_args: list[str] | None = None,
component_prop_inputs: list[int] | None = None,
page: str = "",
js_implementation: str | None = None,
key: str | int | tuple[int | str, ...] | None = None,
validator: Callable | None = None,
):
self.fn = fn
self._id = _id
self.inputs = inputs
self.outputs = outputs
self.preprocess = preprocess
self.postprocess = postprocess
self.tracks_progress = tracks_progress
self.concurrency_limit: int | None | Literal["default"] = concurrency_limit
self.concurrency_id = concurrency_id or str(id(fn))
self.batch = batch
self.max_batch_size = max_batch_size
self.total_runtime = 0
self.total_runs = 0
self.inputs_as_dict = inputs_as_dict
self.targets = targets
self.name = getattr(fn, "__name__", "fn") if fn is not None else None
self.api_name = api_name
self.api_description = api_description
self.js = js
self.show_progress = show_progress
self.show_progress_on = show_progress_on
self.cancels = cancels or []
self.collects_event_data = collects_event_data
self.trigger_after = trigger_after
self.trigger_only_on_success = trigger_only_on_success
self.trigger_only_on_failure = trigger_only_on_failure
self.trigger_mode = trigger_mode
self.queue = False if fn is None else queue
self.scroll_to_output = False if utils.get_space() else scroll_to_output
self.api_visibility = api_visibility
self.types_generator = inspect.isgeneratorfunction(
self.fn
) or inspect.isasyncgenfunction(self.fn)
self.renderable = renderable
self.rendered_in = rendered_in
self.render_iteration = render_iteration
self.page = page
self.validator = validator
if js_implementation:
self.fn.__js_implementation__ = js_implementation # type: ignore
# We need to keep track of which events are cancel events
# so that the client can call the /cancel route directly
self.is_cancel_function = is_cancel_function
self.time_limit = time_limit
self.stream_every = stream_every
self.connection = connection
self.event_specific_args = event_specific_args
self.component_prop_inputs = component_prop_inputs or []
self.key = key
self.spaces_auto_wrap()
def spaces_auto_wrap(self):
if spaces is None:
return
if utils.get_space() is None:
return
self.fn = spaces.gradio_auto_wrap(self.fn)
def __str__(self):
return str(
{
"fn": self.name,
"preprocess": self.preprocess,
"postprocess": self.postprocess,
}
)
def __repr__(self):
return str(self)
def get_config(self):
return {
"id": self._id,
"targets": self.targets,
"inputs": [block._id for block in self.inputs],
"outputs": [block._id for block in self.outputs],
"backend_fn": self.fn is not None,
"js": self.js,
"queue": self.queue,
"api_name": self.api_name,
"api_description": self.api_description,
"scroll_to_output": self.scroll_to_output,
"show_progress": self.show_progress,
"show_progress_on": None
if self.show_progress_on is None
else [block._id for block in self.show_progress_on],
"batch": self.batch,
"max_batch_size": self.max_batch_size,
"cancels": self.cancels,
"types": {
"generator": self.types_generator,
"cancel": self.is_cancel_function,
},
"collects_event_data": self.collects_event_data,
"trigger_after": self.trigger_after,
"trigger_only_on_success": self.trigger_only_on_success,
"trigger_only_on_failure": self.trigger_only_on_failure,
"trigger_mode": self.trigger_mode,
"api_visibility": self.api_visibility,
"rendered_in": self.rendered_in._id if self.rendered_in else None,
"render_id": self.renderable._id if self.renderable else None,
"connection": self.connection,
"time_limit": self.time_limit,
"stream_every": self.stream_every,
"event_specific_args": self.event_specific_args,
"component_prop_inputs": self.component_prop_inputs,
"js_implementation": getattr(self.fn, "__js_implementation__", None),
}
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/block_function.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/image_watermark/run.py | import gradio as gr
import os
from gradio.media import get_image
# get_image() returns file paths to sample media included with Gradio
base_a = get_image("groot.jpeg")
base_b = os.path.join(os.path.dirname(__file__), "files/bird.bmp")
watermark_a = get_image("hf-logo_transpng.png")
watermark_b = os.path.join(os.path.dirname(__file__), "files/logo_nontrans.png")
watermark_c = get_image("logo.png")
def generate_image(original_image, watermark_image):
return gr.Image(original_image, watermark=gr.WatermarkOptions(watermark=watermark_image, position='bottom-left'))
demo = gr.Interface(generate_image, [gr.Image(image_mode=None), gr.Image(image_mode=None)], gr.Image(),
api_name="predict",
examples=[[base_a, watermark_a], [base_b, watermark_b], [base_a, watermark_c], [base_a, watermark_c]])
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/image_watermark/run.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/navbar_customization/run.py | import gradio as gr
with gr.Blocks(title="Navbar Demo") as demo:
navbar = gr.Navbar(value=[("About Me", "https://x.com/abidlabs")], visible=True, main_page_name="Dashboard")
gr.Markdown("# Dashboard Page")
hide_btn = gr.Button("Hide Navbar")
hide_btn.click(fn=lambda : gr.Navbar(visible=False), outputs=navbar)
show_btn = gr.Button("Show Navbar")
show_btn.click(fn=lambda : gr.Navbar(visible=True, main_page_name="Dashboard is Back!"), outputs=navbar)
with demo.route("Settings", "/settings"):
gr.Markdown("# Settings Page")
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/navbar_customization/run.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/components/navbar.py | """gr.Navbar() component."""
from __future__ import annotations
from typing import Any, Literal
from gradio_client.documentation import document
from gradio.components.base import Component
from gradio.events import Events
@document()
class Navbar(Component):
"""
Creates a navigation bar component for multipage Gradio apps. The navbar component allows customizing the
appearance of the navbar for that page. Only one Navbar component can exist per page in a Blocks app,
and it can be placed anywhere within the page.
The Navbar component is designed to control the appearance of the navigation bar in multipage
applications. When present in a Blocks app, its properties override the default navbar behavior.
Example:
```python
import gradio as gr
with gr.Blocks() as demo:
navbar = gr.Navbar(
visible=True,
main_page_name="My App",
value=[("Analytics", "analytics"), ("About", "https://twitter.com/abidlabs")]
)
gr.Textbox(label="Main page content")
with demo.route("About"):
gr.Markdown("This is the about page")
demo.launch()
```
Guides: multipage-apps
"""
EVENTS = [Events.change]
def __init__(
self,
value: list[tuple[str, str]] | None = None,
*,
visible: bool = True,
main_page_name: str | Literal[False] = "Home",
elem_id: str | None = None,
elem_classes: list[str] | str | None = None,
render: bool = True,
key: int | str | tuple[int | str, ...] | None = None,
):
"""
Parameters:
value: If a list of tuples of (page_name, page_path) are provided, these additional pages will be added to the navbar alongside the existing pages defined in the Blocks app. The page_path can be either a relative path for internal Gradio app pages (e.g., "analytics") or an absolute URL for external links (e.g., "https://twitter.com/username"). Otherwise, only the pages defined using the `Blocks.route` method will be displayed. Example: [("Dashboard", "dashboard"), ("About", "https://twitter.com/abidlabs")]
visible: If True, the navbar will be visible. If False, the navbar will be hidden.
main_page_name: The title to display in the navbar for the main page of the Gradio. If False, the main page will not be displayed in the navbar.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
key: in a gr.render, Components with the same key across re-renders are treated as the same component, not a new component.
"""
self.visible = visible
self.main_page_name = main_page_name
super().__init__(
elem_id=elem_id,
elem_classes=elem_classes,
render=render,
key=key,
visible=visible,
value=value,
)
def preprocess(
self, payload: list[tuple[str, str]] | None
) -> list[tuple[str, str]] | None:
return payload
def postprocess(
self, value: list[tuple[str, str]] | None
) -> list[tuple[str, str]] | None:
return value
def api_info(self) -> dict[str, Any]:
return {}
def example_payload(self) -> list[tuple[str, str]] | None:
return None
def example_value(self) -> list[tuple[str, str]] | None:
return None
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/components/navbar.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/audio_subtitle/run.py | import gradio as gr
from gradio.media import get_audio, get_file, MEDIA_PATHS
a = get_audio("cate_blanch.mp3")
b = get_audio("cate_blanch_2.mp3")
s1 = get_file("s1.srt")
s2 = get_file("s2.vtt")
def add_subtitles_to_audio(audio, subtitles=None):
if subtitles is None:
return audio
if subtitles is not None:
return gr.Audio(label="Out", value=audio, subtitles=subtitles.name)
demo = gr.Interface(
fn=add_subtitles_to_audio,
inputs=[
gr.Audio(label="In", interactive=True),
gr.File(label="Subtitle", file_types=[".srt", ".vtt"]),
],
outputs=gr.Audio(label="Out"),
examples=[
[a, s1],
[b, s2],
],
api_name="predict",
)
if __name__ == "__main__":
demo.launch(allowed_paths=[MEDIA_PATHS]) # type: ignore
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/audio_subtitle/run.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/draggable_dashboard/run.py | import gradio as gr
import numpy as np
import pandas as pd
with gr.Blocks() as demo:
gr.Markdown("# Draggable Dashboard Demo")
gr.Markdown("Drag the charts around to reorder them!")
x = np.linspace(0, 10, 100)
data = pd.DataFrame({
'x': x,
'y1': np.random.normal(100, 20, 100) + 10 * np.sin(x),
'y2': np.random.normal(500, 100, 100) + 50 * np.cos(x),
'y3': np.random.normal(1000, 200, 100) + 100 * np.sin(x/2),
'y4': np.random.normal(0.15, 0.05, 100) + 0.05 * np.cos(x/3)
})
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Horizontal Layout (orientation='row')")
with gr.Draggable(orientation="row"):
gr.LinePlot(
data,
x="x",
y="y1",
title="Chart 1",
height=200,
)
gr.LinePlot(
data,
x="x",
y="y2",
title="Chart 2",
height=200,
)
gr.LinePlot(
data,
x="x",
y="y3",
title="Chart 3",
height=200,
)
gr.LinePlot(
data,
x="x",
y="y4",
title="Chart 4",
height=200,
)
with gr.Column(scale=1):
gr.Markdown("### Vertical Layout (orientation='column')")
with gr.Draggable(orientation="column"):
gr.LinePlot(
data,
x="x",
y="y1",
title="Chart 1",
height=200,
)
gr.LinePlot(
data,
x="x",
y="y2",
title="Chart 2",
height=200,
)
gr.LinePlot(
data,
x="x",
y="y3",
title="Chart 3",
height=200,
)
gr.LinePlot(
data,
x="x",
y="y4",
title="Chart 4",
height=200,
)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/draggable_dashboard/run.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/layouts/draggable.py | from __future__ import annotations
from typing import Literal
from gradio_client.documentation import document
from gradio.blocks import BlockContext
from gradio.component_meta import ComponentMeta
@document()
class Draggable(BlockContext, metaclass=ComponentMeta):
"""
Draggable is a layout element within Blocks that renders children with drag and drop functionality.
A user can reorder children by dragging them around and snapping them into place. If a child is a
layout (e.g. gr.Row, gr.Group), all the components in the child layout will drag together.
Demos: draggable_dashboard
"""
EVENTS = []
def __init__(
self,
*,
orientation: Literal["row", "column"] = "column",
visible: bool | Literal["hidden"] = True,
elem_id: str | None = None,
elem_classes: list[str] | str | None = None,
render: bool = True,
key: int | str | tuple[int | str, ...] | None = None,
preserved_by_key: list[str] | str | None = None,
):
"""
Parameters:
orientation: The direction in which children are arranged. 'row' arranges children horizontally, 'column' arranges them vertically.
visible: If False, draggable container will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles.
render: If False, this layout will not be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
key: in a gr.render, Components with the same key across re-renders are treated as the same component, not a new component. Properties set in 'preserved_by_key' are not reset across a re-render.
preserved_by_key: A list of parameters from this component's constructor. Inside a gr.render() function, if a component is re-rendered with the same key, these (and only these) parameters will be preserved in the UI (if they have been changed by the user or an event listener) instead of re-rendered based on the values provided during constructor.
"""
self.orientation = orientation
super().__init__(
visible=visible,
elem_id=elem_id,
elem_classes=elem_classes,
render=render,
key=key,
preserved_by_key=preserved_by_key,
)
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/layouts/draggable.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/rate_limit/run.py | import gradio as gr
from datetime import datetime, timedelta
from collections import defaultdict
import threading
rate_limit_data = defaultdict(list)
lock = threading.Lock()
UNAUTH_RATE_LIMIT = 3
AUTH_RATE_LIMIT = 30
RATE_LIMIT_WINDOW = 60
def clean_old_entries(user_id):
"""Remove entries older than the rate limit window"""
current_time = datetime.now()
cutoff_time = current_time - timedelta(seconds=RATE_LIMIT_WINDOW)
rate_limit_data[user_id] = [
timestamp for timestamp in rate_limit_data[user_id]
if timestamp > cutoff_time
]
def get_user_identifier(profile: gr.OAuthProfile | None, request: gr.Request) -> tuple[str, bool]:
"""Get user identifier and whether they're authenticated"""
if profile is not None:
return profile.username, True
else:
if request:
return f"ip_{request.client.host}", False
return "ip_unknown", False
def check_rate_limit(user_id: str, is_authenticated: bool) -> tuple[bool, int, int]:
"""
Check if user has exceeded rate limit
Returns: (can_proceed, clicks_used, max_clicks)
"""
with lock:
clean_old_entries(user_id)
max_clicks = AUTH_RATE_LIMIT if is_authenticated else UNAUTH_RATE_LIMIT
clicks_used = len(rate_limit_data[user_id])
can_proceed = clicks_used < max_clicks
return can_proceed, clicks_used, max_clicks
def add_click(user_id: str):
"""Add a click timestamp for the user"""
with lock:
rate_limit_data[user_id].append(datetime.now())
def update_status(profile: gr.OAuthProfile | None, request: gr.Request) -> str:
"""Update the status message showing current rate limit info"""
user_id, is_authenticated = get_user_identifier(profile, request)
_, clicks_used, max_clicks = check_rate_limit(user_id, is_authenticated)
if is_authenticated:
return f"✅ You are logged in as '{profile.username}'. You have clicked {clicks_used} times this minute. You have {max_clicks} total clicks per minute." # type: ignore
else:
return f"⚠️ You are not logged in. You have clicked {clicks_used} times this minute. You have {max_clicks} total clicks per minute."
def run_action(profile: gr.OAuthProfile | None, request: gr.Request) -> tuple[str, str]:
"""Handle the run button click with rate limiting"""
user_id, is_authenticated = get_user_identifier(profile, request)
can_proceed, clicks_used, max_clicks = check_rate_limit(user_id, is_authenticated)
if not can_proceed:
result = f"❌ Rate limit exceeded! You've used all {max_clicks} clicks for this minute. Please wait before trying again."
status = update_status(profile, request)
return result, status
add_click(user_id)
_, new_clicks_used, _ = check_rate_limit(user_id, is_authenticated)
result = f"✅ Action executed successfully! (Click #{new_clicks_used})"
status = update_status(profile, request)
return result, status
with gr.Blocks(title="Rate Limiting Demo") as demo:
gr.Markdown("# Rate Limiting Demo App")
gr.Markdown("This app demonstrates rate limiting based on authentication status.")
gr.LoginButton()
status_text = gr.Markdown("Loading status...")
with gr.Row():
run_btn = gr.Button("🚀 Run Action", variant="primary", scale=1)
result_text = gr.Markdown("")
demo.load(update_status, inputs=None, outputs=status_text)
run_btn.click(
run_action,
inputs=None,
outputs=[result_text, status_text]
)
gr.Markdown("---")
gr.Markdown("""
### Rate Limits:
- **Not logged in:** 3 clicks per minute (based on IP address)
- **Logged in:** 30 clicks per minute (based on HF username)
### How it works:
- Click the **Login** button to authenticate with Hugging Face
- Click the **Run Action** button to test the rate limiting
- The system tracks your clicks over a rolling 1-minute window
""")
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/rate_limit/run.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/html_autoscroll/run.py | import gradio as gr
import time
def longer(val):
for i in range(10):
val = val + f"<p>This is paragraph {i+1}.</p>"
time.sleep(0.2)
yield val
with gr.Blocks() as demo:
h = gr.HTML(value="<p>This is a paragraph 0.</p>", max_height=200, autoscroll=True)
demo.load(longer, h, h)
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/html_autoscroll/run.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/mcp_resources_and_prompts/fastmcp.py | """
This is the equivalent of `run.py` but implemented with FastMCP v1.
It is taken directly from the quickstart example from the MCP Python SDK:
https://github.com/modelcontextprotocol/python-sdk
"""
from mcp.server.fastmcp import FastMCP
# Create an MCP server
mcp = FastMCP("Demo")
# Add an addition tool
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers"""
return a + b
# Add a dynamic greeting resource
@mcp.resource("greeting://{name}")
def get_greeting(name: str) -> str:
"""Get a personalized greeting"""
return f"Hello, {name}!"
# Add a prompt
@mcp.prompt()
def greet_user(name: str, style: str = "friendly") -> str:
"""Generate a greeting prompt"""
styles = {
"friendly": "Please write a warm, friendly greeting",
"formal": "Please write a formal, professional greeting",
"casual": "Please write a casual, relaxed greeting",
}
return f"{styles.get(style, styles['friendly'])} for someone named {name}."
if __name__ == "__main__":
mcp.run()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/mcp_resources_and_prompts/fastmcp.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/mcp_resources_and_prompts/run.py | """
Adapts the FastMCP quickstart example to work with Gradio's MCP integration.
"""
import gradio as gr
@gr.mcp.tool() # Not needed as functions are registered as tools by default
def add(a: int, b: int) -> int:
"""Add two numbers"""
return a + b
@gr.mcp.resource("greeting://{name}")
def get_greeting(name: str) -> str:
"""Get a personalized greeting"""
return f"Hello, {name}!"
@gr.mcp.prompt()
def greet_user(name: str, style: str = "friendly") -> str:
"""Generate a greeting prompt"""
styles = {
"friendly": "Please write a warm, friendly greeting",
"formal": "Please write a formal, professional greeting",
"casual": "Please write a casual, relaxed greeting",
}
return f"{styles.get(style, styles['friendly'])} for someone named {name}."
demo = gr.TabbedInterface(
[
gr.Interface(add, [gr.Number(value=1), gr.Number(value=2)], gr.Number()),
gr.Interface(get_greeting, gr.Textbox("Abubakar"), gr.Textbox()),
gr.Interface(greet_user, [gr.Textbox("Abubakar"), gr.Dropdown(choices=["friendly", "formal", "casual"])], gr.Textbox()),
],
[
"Add",
"Get Greeting",
"Greet User",
]
)
if __name__ == "__main__":
demo.launch(mcp_server=True)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/mcp_resources_and_prompts/run.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/cli/commands/hf_login.py | import os
from huggingface_hub import interpreter_login, whoami
def save_login():
try:
while True:
response = input("Stay logged in to Hugging Face? (Y/n): ").strip().lower()
if response in ["y", "yes", ""]: # Empty string for just pressing Enter
os.environ["GRADIO_AUTO_LOGOUT"] = "false"
print("You can logout at any time with: hf auth logout")
break
elif response in ["n", "no"]:
os.environ["GRADIO_AUTO_LOGOUT"] = "true"
break
else:
print("Please answer with 'y' or 'n'")
except (EOFError, KeyboardInterrupt):
os.environ["GRADIO_AUTO_LOGOUT"] = "true"
def hf_login():
if os.getenv("HF_TOKEN"):
try:
user = whoami(token=os.getenv("HF_TOKEN")).get("name")
print(
f"🔓 Logged in to Hugging Face as {user}. You can logout at any time with: unset HF_TOKEN\n"
)
except Exception:
print("❌ Error logging in to Hugging Face with $HF_TOKEN")
print("Logging in with CLI prompt...\n")
interpreter_login()
save_login()
else:
try:
user = whoami().get("name")
print(
f"🔓 Logged in to Hugging Face as {user}. You can logout at any time with: hf auth logout\n"
)
except Exception:
print(
"🔑 No Hugging Face login found, launching login prompt... \n\nPlease use a token with permission to make calls to Inference Providers \n"
)
interpreter_login()
save_login()
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/cli/commands/hf_login.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/dia_dialogue_demo/run.py | import gradio as gr
import httpx
tags = [
"(laughs)",
"(clears throat)",
"(sighs)",
"(gasps)",
"(coughs)",
"(singing)",
"(sings)",
"(mumbles)",
"(beep)",
"(groans)",
"(sniffs)",
"(claps)",
"(screams)",
"(inhales)",
"(exhales)",
"(applause)",
"(burps)",
"(humming)",
"(sneezes)",
"(chuckle)",
"(whistles)",
]
speakers = ["Speaker 1", "Speaker 2"]
client = httpx.AsyncClient(timeout=180)
API_URL = "https://router.huggingface.co/fal-ai/fal-ai/dia-tts"
async def query(dialogue: str, token: gr.OAuthToken | None):
if token is None:
raise gr.Error(
"No token provided. Use Sign in with Hugging Face to get a token."
)
headers = {
"Authorization": f"Bearer {token.token}",
}
response = await client.post(API_URL, headers=headers, json={"text": dialogue})
url = response.json()["audio"]["url"]
print("URL: ", url)
return url
def formatter(speaker, text):
speaker = speaker.split(" ")[1]
return f"[S{speaker}] {text}"
with gr.Blocks() as demo:
with gr.Sidebar():
login_button = gr.LoginButton()
gr.HTML(
"""
<h1 style='text-align: center; display: flex; align-items: center; justify-content: center;'>
<img src="https://huggingface.co/datasets/freddyaboulton/bucket/resolve/main/dancing_huggy.gif" alt="Dancing Huggy" style="height: 100px; margin-right: 10px"> Dia Dialogue Generation Model
</h1>
<h2 style='text-align: center; display: flex; align-items: center; justify-content: center;'>Model by <a href="https://huggingface.co/nari-labs/Dia-1.6B"> Nari Labs</a>. Powered by HF and <a href="https://fal.ai/">Fal AI</a> API.</h2>
<h4>Dia is a dialogue generation model that can generate realistic dialogue between two speakers. Use the dialogue component to create a conversation and then hit the submit button in the bottom right corner to see it come to life .</h4>
"""
)
with gr.Row():
with gr.Column():
dialogue = gr.Dialogue(
speakers=speakers, tags=tags, formatter=formatter
)
with gr.Column():
with gr.Row():
audio = gr.Audio(label="Audio")
with gr.Row():
gr.DeepLinkButton(value="Share Audio via Link")
with gr.Row():
gr.Examples(
examples=[
[
[
{
"speaker": "Speaker 1",
"text": "Why did the chicken cross the road?",
},
{"speaker": "Speaker 2", "text": "I don't know!"},
{
"speaker": "Speaker 1",
"text": "to get to the other side! (laughs)",
},
]
],
[
[
{
"speaker": "Speaker 1",
"text": "I am a little tired today (sighs).",
},
{"speaker": "Speaker 2", "text": "Hang in there!"},
]
],
],
inputs=[dialogue],
cache_examples=False,
)
dialogue.submit(query, [dialogue], audio)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/dia_dialogue_demo/run.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/dialogue_component/run.py | import gradio as gr
with gr.Blocks() as demo:
dd = gr.Dialogue(speakers=["Speaker 1", "Speaker 2"],
tags=["(laughs)", "(sighs)", "(clears throat)"],
value=[
{"speaker": "Speaker 1", "text": "Hello, how are you?"},
{"speaker": "Speaker 2", "text": "I'm fine, thank you!"},
], separator="\n", interactive=True)
output = gr.Textbox(label="Output", value="")
dd.submit(lambda x: x, inputs=dd, outputs=output)
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/dialogue_component/run.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/dialogue_diarization_demo/run.py | # type: ignore
import gradio as gr
from pyannote.audio import Pipeline
import whisper
diarization_pipeline = None
whisper_model = None
def load_models():
global diarization_pipeline, whisper_model # noqa: PLW0603
if diarization_pipeline is None:
diarization_pipeline = Pipeline.from_pretrained(
"pyannote/speaker-diarization-3.1", use_auth_token=True
)
if whisper_model is None:
whisper_model = whisper.load_model("base")
def real_diarization(audio_file_path: str) -> list[dict[str, str]]:
try:
load_models()
if diarization_pipeline is None or whisper_model is None:
raise Exception("Failed to load models")
diarization = diarization_pipeline(audio_file_path)
transcription = whisper_model.transcribe(audio_file_path)
segments = transcription["segments"]
dialogue_segments = []
speaker_mapping = {}
speaker_counter = 1
for segment in segments:
start_time = segment["start"]
end_time = segment["end"]
text = segment["text"].strip()
speaker = "Speaker 1"
for turn, _, speaker_label in diarization.itertracks(yield_label=True):
if (
turn.start <= start_time <= turn.end
or turn.start <= end_time <= turn.end
):
if speaker_label not in speaker_mapping:
speaker_mapping[speaker_label] = f"Speaker {speaker_counter}"
speaker_counter += 1
speaker = speaker_mapping[speaker_label]
break
if text:
dialogue_segments.append({"speaker": speaker, "text": text})
return dialogue_segments
except Exception as e:
print(f"Error in diarization: {str(e)}")
return []
def process_audio(audio_file):
if audio_file is None:
gr.Warning("Please upload an audio file first.")
return []
try:
dialogue_segments = real_diarization(audio_file)
return dialogue_segments
except Exception as e:
gr.Error(f"Error processing audio: {str(e)}")
return []
speakers = [
"Speaker 1",
"Speaker 2",
"Speaker 3",
"Speaker 4",
"Speaker 5",
"Speaker 6",
]
tags = [
"(pause)",
"(background noise)",
"(unclear)",
"(overlap)",
"(phone ringing)",
"(door closing)",
"(music)",
"(applause)",
"(laughter)",
]
def format_speaker(speaker, text):
return f"{speaker}: {text}"
with gr.Blocks(title="Audio Diarization Demo") as demo:
with gr.Row():
with gr.Column(scale=1):
audio_input = gr.Audio(
label="Upload Audio File",
type="filepath",
sources=["upload", "microphone"],
)
process_btn = gr.Button("🔍 Analyze Speakers", variant="primary", size="lg")
with gr.Column(scale=2):
dialogue_output = gr.Dialogue(
speakers=speakers,
tags=tags,
formatter=format_speaker,
label="AI-generated speaker-separated conversation",
value=[],
)
process_btn.click(fn=process_audio, inputs=[audio_input], outputs=[dialogue_output])
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/dialogue_diarization_demo/run.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/dialogue_mock_diarization/run.py | import gradio as gr
speakers = [
"Speaker 1",
"Speaker 2",
]
def format_speaker(speaker, text):
return f"{speaker}: {text}"
def mock_diarization(audio):
return [
{
"speaker": "Speaker 1",
"text": "Hello, how are you?",
},
{
"speaker": "Speaker 2",
"text": "I'm fine, thank you!",
},
{
"speaker": "Speaker 1",
"text": "What's your name?",
},
{
"speaker": "Speaker 2",
"text": "My name is John Doe.",
},
{
"speaker": "Speaker 1",
"text": "Nice to meet you!",
},
{
"speaker": "Speaker 2",
"text": "Nice to meet you!",
},
]
demo = gr.Interface(
fn=mock_diarization,
inputs=[gr.Audio(sources=["microphone"])],
outputs=[gr.Dialogue(speakers=speakers, tags=None, formatter=format_speaker)],
title="Mock Speech Diarization",
description="Mock speech diarization",
api_name="predict"
)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/dialogue_mock_diarization/run.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/components/dialogue.py | from __future__ import annotations
from collections.abc import Callable
from typing import Literal
from gradio_client.documentation import document
from gradio.components.base import Component, server
from gradio.components.button import Button
from gradio.data_classes import GradioModel, GradioRootModel
from gradio.events import Events
from gradio.utils import set_default_buttons
class DialogueLine(GradioModel):
speaker: str
text: str
class DialogueModel(GradioRootModel):
root: list[DialogueLine] | str
@document()
class Dialogue(Component):
"""
Creates a Dialogue component for displaying or collecting multi-speaker conversations. This component can be used as input to allow users to enter dialogue involving multiple speakers, or as output to display diarized speech, such as the result of a transcription or speaker identification model. Each message can be associated with a specific speaker, making it suitable for use cases like conversations, interviews, or meetings.
Demos: dia_dialogue_demo
"""
EVENTS = [
Events.change,
Events.input,
Events.submit,
]
data_model = DialogueModel
def __init__(
self,
value: list[dict[str, str]] | Callable | None = None,
*,
type: Literal["list", "text"] = "text",
speakers: list[str] | None = None,
formatter: Callable | None = None,
unformatter: Callable | None = None,
tags: list[str] | None = None,
separator: str = "\n",
color_map: dict[str, str] | None = None,
label: str | None = "Dialogue",
info: str
| None = "Type colon (:) in the dialogue line to see the available tags",
placeholder: str | None = None,
show_label: bool | None = None,
container: bool = True,
scale: int | None = None,
min_width: int = 160,
interactive: bool | None = None,
visible: bool | Literal["hidden"] = True,
elem_id: str | None = None,
autofocus: bool = False,
autoscroll: bool = True,
elem_classes: list[str] | str | None = None,
render: bool = True,
key: int | str | None = None,
max_lines: int | None = None,
buttons: list[Literal["copy"] | Button] | None = None,
submit_btn: str | bool | None = False,
ui_mode: Literal["dialogue", "text", "both"] = "both",
):
"""
Parameters:
value: Value of the dialogue. It is a list of dictionaries, each containing a 'speaker' key and a 'text' key. If a function is provided, the function will be called each time the app loads to set the initial value of this component.
type: The type of the component, either "list" for a multi-speaker dialogue consisting of dictionaries with 'speaker' and 'text' keys or "text" for a single text input. Defaults to "text".
speakers: The different speakers allowed in the dialogue. If `None` or an empty list, no speakers will be displayed. Instead, the component will be a standard textarea that optionally supports `tags` autocompletion.
formatter: A function that formats the dialogue line dictionary, e.g. {"speaker": "Speaker 1", "text": "Hello, how are you?"} into a string, e.g. "Speaker 1: Hello, how are you?". This function is run on user input and the resulting string is passed into the prediction function.
unformatter: A function that parses a formatted dialogue string back into a dialogue line dictionary. Should take a single string line and return a dictionary with 'speaker' and 'text' keys. If not provided, the default unformatter will attempt to parse the default formatter pattern.
tags: The different tags allowed in the dialogue. Tags are displayed in an autocomplete menu below the input textbox when the user starts typing `:`. Use the exact tag name expected by the AI model or inference function.
separator: The separator between the different dialogue lines used to join the formatted dialogue lines into a single string. It should be unambiguous. For example, a newline character or tab character.
color_map: A dictionary mapping speaker names to colors. The colors may be specified as hex codes or by their names. For example: {"Speaker 1": "red", "Speaker 2": "#FFEE22"}. If not provided, default colors will be assigned to speakers. This is only used if `interactive` is False.
max_lines: maximum number of lines allowed in the dialogue.
placeholder: placeholder hint to provide behind textarea.
label: the label for this component, displayed above the component if `show_label` is `True` and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component corresponds to.
show_label: if True, will display the label. If False, the copy button is hidden as well as well as the label.
container: if True, will place the component in a container - providing some extra padding around the border.
scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.
min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
interactive: if True, will be rendered as an editable textbox; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
visible: If False, component will be hidden. If "hidden", component will be visually hidden and not take up space in the layout but still exist in the DOM
autofocus: If True, will focus on the textbox when the page loads. Use this carefully, as it can cause usability issues for sighted and non-sighted users.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.
buttons: A list of buttons to show for the component. Valid options are "copy" or a gr.Button() instance. The "copy" button allows the user to copy the text in the textbox. Custom gr.Button() instances will appear in the toolbar with their configured icon and/or label, and clicking them will trigger any .click() events registered on the button. By default, no buttons are shown.
submit_btn: If False, will not show a submit button. If True, will show a submit button with an icon. If a string, will use that string as the submit button text.
autoscroll: If True, will automatically scroll to the bottom of the textbox when the value changes, unless the user scrolls up. If False, will not scroll to the bottom of the textbox when the value changes.
ui_mode: Determines the user interface mode of the component. Can be "dialogue" (displays dialogue lines), "text" (displays a single text input), or "both" (displays both dialogue lines and a text input). Defaults to "both".
"""
super().__init__(
value=value,
label=label,
info=info,
show_label=show_label,
container=container,
scale=scale,
min_width=min_width,
interactive=interactive,
visible=visible,
elem_id=elem_id,
elem_classes=elem_classes,
render=render,
key=key,
)
if separator == " ":
raise ValueError("Separator cannot be an empty string.")
self.ui_mode = ui_mode
self.type = type
self.placeholder = placeholder
self.autofocus = autofocus
self.autoscroll = autoscroll
self.max_lines = max_lines
self.speakers = speakers
self.tags = tags or []
self.formatter = formatter
self.unformatter = unformatter
self.separator = separator
self.color_map = color_map
self.buttons = set_default_buttons(buttons, None)
self.submit_btn = submit_btn
if not interactive:
self.info = None
def preprocess(self, payload: DialogueModel) -> str | list[dict[str, str]]: # type: ignore
"""
Parameters:
payload: Expects a `DialogueModel` object or string.
Returns:
Returns the dialogue as a string or list of dictionaries.
"""
if self.type == "list":
return payload.model_dump()
return self._format(payload)
def _format(self, payload: DialogueModel) -> str:
if (isinstance(payload.root, str) and payload.root == "") or (
isinstance(payload.root, list)
and len(payload.root) == 1
and payload.root[0].text == ""
):
return ""
formatter = self.formatter
if not formatter:
formatter = self.default_formatter
if isinstance(payload.root, str):
return payload.root
return self.separator.join(
[formatter(line.speaker, line.text) for line in payload.root]
)
@staticmethod
def default_formatter(speaker: str, text: str) -> str:
return f"[{speaker}] {text}"
@staticmethod
def default_unformatter(line: str, default_speaker: str) -> dict[str, str]:
"""Parse a formatted dialogue line back into speaker and text components."""
line = line.strip()
if not line:
return {"speaker": "", "text": ""}
# Try to parse using the default formatter pattern: [speaker] text
if line.startswith("[") and "]" in line:
bracket_end = line.find("]")
speaker = line[1:bracket_end]
text = line[bracket_end + 1 :].strip()
return {"speaker": speaker, "text": text}
else:
return {"speaker": default_speaker, "text": line}
@server
async def format(self, value: list[dict] | str):
"""Format the dialogue in the frontend into a string that's copied to the clipboard."""
data = DialogueModel(root=value) # type: ignore
return self._format(data)
@server
async def unformat(self, payload: dict):
"""Parse a formatted dialogue string back into dialogue data structure."""
value = payload.get("text", "")
if not value or value.strip() == "":
return []
lines = value.split(self.separator)
dialogue_lines = []
unformatter = self.unformatter
if not unformatter:
unformatter = self.default_unformatter
default_speaker = "Unknown"
if isinstance(self.speakers, list) and len(self.speakers):
default_speaker = self.speakers[0]
for line in lines:
line = line.strip()
if not line:
continue
parsed_line = unformatter(line, default_speaker)
if parsed_line["speaker"] or parsed_line["text"]: # Skip empty lines
dialogue_lines.append(parsed_line)
return dialogue_lines
def postprocess( # type: ignore
self, value: list[dict[str, str]] | str | None
) -> DialogueModel | None:
"""
Parameters:
value: Expects a string or a list of dictionaries of dialogue lines, where each dictionary contains 'speaker' and 'text' keys, or a string.
Returns:
Returns the dialogue as a `DialogueModel` object for the frontend.
"""
if value is None:
return None
if isinstance(value, str):
return DialogueModel(root=value)
dialogue_lines = [
DialogueLine(speaker=line["speaker"], text=line["text"]) for line in value
]
return DialogueModel(root=dialogue_lines)
def as_example(self, value):
return self.preprocess(DialogueModel(root=value))
def example_payload(self):
return [
{"speaker": "Speaker 1", "text": "Hello, how are you?"},
{"speaker": "Speaker 2", "text": "I'm fine, thank you!"},
]
def example_value(self):
return [
{"speaker": "Speaker 1", "text": "Hello, how are you?"},
{"speaker": "Speaker 2", "text": "I'm fine, thank you!"},
]
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/components/dialogue.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:test/components/test_dialogue.py | import gradio as gr
from gradio.components.dialogue import DialogueLine, DialogueModel
class TestDialogue:
def test_component_functions(self):
"""
Test preprocess, postprocess, and basic functionality
"""
dialogue = gr.Dialogue(speakers=["Speaker 1", "Speaker 2"])
dialogue_data = [
DialogueLine(speaker="Speaker 1", text="Hello there!"),
DialogueLine(speaker="Speaker 2", text="Hi, how are you?"),
]
preprocessed = dialogue.preprocess(gr.Dialogue.data_model(root=dialogue_data))
assert preprocessed == "[Speaker 1] Hello there!\n[Speaker 2] Hi, how are you?"
postprocessed = dialogue.postprocess(
[
{"speaker": "Speaker 1", "text": "Hello there!"},
{"speaker": "Speaker 2", "text": "Hi, how are you?"},
]
)
assert postprocessed is not None
assert isinstance(postprocessed.root, list)
assert len(postprocessed.root) == 2
assert postprocessed.root[0].speaker == "Speaker 1"
assert postprocessed.root[0].text == "Hello there!"
postprocessed_str = dialogue.postprocess("Hello world")
assert postprocessed_str is not None
assert isinstance(postprocessed_str.root, str)
assert postprocessed_str.root == "Hello world"
assert dialogue.postprocess(None) is None
def test_dialogue_with_tags(self):
"""
Test dialogue with tags parameter
"""
dialogue = gr.Dialogue(
speakers=["Agent", "Customer"],
tags=["greeting", "question", "answer", "closing"],
)
assert dialogue.tags == ["greeting", "question", "answer", "closing"]
assert dialogue.speakers == ["Agent", "Customer"]
def test_dialogue_with_color_map(self):
"""
Test dialogue with custom color map
"""
color_map = {"Speaker 1": "#ff0000", "Speaker 2": "#00ff00"}
dialogue = gr.Dialogue(speakers=["Speaker 1", "Speaker 2"], color_map=color_map)
assert dialogue.color_map == color_map
def test_dialogue_with_formatter(self):
"""
Test dialogue with custom formatter
"""
def custom_formatter(speaker, text):
return f"{speaker}: {text}"
dialogue = gr.Dialogue(speakers=["Alice", "Bob"], formatter=custom_formatter)
dialogue_data = [
DialogueLine(speaker="Alice", text="Hello!"),
DialogueLine(speaker="Bob", text="Hi there!"),
]
preprocessed = dialogue.preprocess(gr.Dialogue.data_model(root=dialogue_data))
assert preprocessed == "Alice: Hello!\nBob: Hi there!"
def test_dialogue_without_speakers(self):
"""
Test dialogue without speakers (plain text mode)
"""
dialogue = gr.Dialogue(speakers=None)
assert dialogue.speakers is None
preprocessed = dialogue.preprocess(
gr.Dialogue.data_model(root="Just some text")
)
assert preprocessed == "Just some text"
def test_get_config(self):
"""
Test get_config returns expected configuration
"""
dialogue = gr.Dialogue(
speakers=["A", "B"],
label="Test Dialogue",
buttons=["copy"],
max_lines=10,
)
config = dialogue.get_config()
assert config["speakers"] == ["A", "B"]
assert config["label"] == "Test Dialogue"
assert config["buttons"] == ["copy"]
assert config["max_lines"] == 10
assert config["name"] == "dialogue"
def test_dialogue_separator(self):
"""
Test dialogue with custom separator
"""
dialogue = gr.Dialogue(speakers=["A", "B"], separator="\n")
dialogue_data = [
DialogueLine(speaker="A", text="First line"),
DialogueLine(speaker="B", text="Second line"),
]
preprocessed = dialogue.preprocess(gr.Dialogue.data_model(root=dialogue_data))
assert preprocessed == "[A] First line\n[B] Second line"
def test_example_value(self):
"""
Test example_value and as_example methods
"""
dialogue = gr.Dialogue(speakers=["Speaker 1", "Speaker 2"])
example = dialogue.example_value()
assert isinstance(example, list)
assert len(example) == 2
assert example[0]["speaker"] == "Speaker 1"
assert example[0]["text"] == "Hello, how are you?"
example_str = dialogue.as_example(example)
assert isinstance(example_str, str)
assert "Speaker 1" in example_str
assert "Hello, how are you?" in example_str
def test_dialogue_preprocess_list(self):
"""
Test preprocess with a list of DialogueLine objects
"""
dialogue = gr.Dialogue(speakers=["Speaker 1", "Speaker 2"], type="list")
dialogue_data = DialogueModel(
root=[
DialogueLine(speaker="Speaker 1", text="Hello!"),
DialogueLine(speaker="Speaker 2", text="Hi!"),
]
)
preprocessed = dialogue.preprocess(dialogue_data)
assert preprocessed == [
{"speaker": "Speaker 1", "text": "Hello!"},
{"speaker": "Speaker 2", "text": "Hi!"},
]
| {
"repo_id": "gradio-app/gradio",
"file_path": "test/components/test_dialogue.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
gradio-app/gradio:demo/chatinterface_deep_link/run.py | import time
import gradio as gr
def slow_echo(message, history):
for i in range(len(message["text"])):
time.sleep(0.05)
yield "You typed: " + message["text"][: i + 1]
chat = gr.ChatInterface(
slow_echo,
flagging_mode="manual",
flagging_options=["Like", "Spam", "Inappropriate", "Other"],
save_history=False,
multimodal=True,
api_name="chat"
)
with gr.Blocks() as demo:
chat.render()
gr.DeepLinkButton()
with demo.route("cached_examples"):
gr.Interface(lambda x, y: f"{y}: {x}",
inputs=[gr.Textbox(label="name"),
gr.Radio(label="Salutation", choices=["Hello", "Greetings"])
],
outputs=gr.Textbox(label="Output"),
examples=[["Freddy", "Hello"]],
cache_examples=True,
api_name="predict",
deep_link=True)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/chatinterface_deep_link/run.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:scripts/upload_docs_json.py | import json
import re
from pathlib import Path
from subprocess import run
import html2text
from bs4 import BeautifulSoup
from huggingface_hub import HfApi
api = HfApi()
def obj_divs_to_markdown(html: str) -> str:
soup = BeautifulSoup(html, "html.parser")
objs = soup.select("div.obj")
if not objs:
return ""
obj_html = "\n".join(div.decode_contents() for div in objs)
md_text = html2text.html2text(obj_html).replace(
"",
"",
)
md_text = md_text.replace("🔗\n", "")
md_text = re.sub(r"###\s*Guides[\s\S]*\Z", "", md_text)
return md_text
MAX_LEN = 2000
OVERLAP = 200
H3_PATTERN = re.compile(r"^(###\s+.+)$", re.M)
H2_PATTERN = re.compile(r"^(##\s+.+)$", re.M)
def split_with_overlap(text: str, max_len: int = MAX_LEN, overlap: int = OVERLAP):
"""Yield slices of text each ≤ max_len, adding `overlap` chars of context."""
start = 0
while start < len(text):
end = start + max_len
yield text[start:end]
start = end - overlap
def markdown_to_docs(
markdown: str,
*,
page_url: str,
page_title: str,
docs: list,
split_by_h2: bool = False,
):
"""
Convert a Markdown string into an array of doc dictionaries, each ≤ 2000 chars.
Sections are split on h_type headings; oversized sections are broken up with overlap.
"""
H_PATTERN = H2_PATTERN if split_by_h2 else H3_PATTERN
headings = [
(m.start(), m.end(), m.group(1).lstrip("# ").strip())
for m in H_PATTERN.finditer(markdown)
]
headings.append((len(markdown), len(markdown), None))
for idx in range(len(headings) - 1):
start, h_end, h_text = headings[idx]
next_start, _, _ = headings[idx + 1]
section_body = markdown[h_end:next_start].lstrip()
chunks = (
[section_body]
if len(section_body) <= MAX_LEN
else list(split_with_overlap(section_body))
)
for i, chunk in enumerate(chunks):
heading = h_text
docs.append(
dict(
text=chunk.replace("# ", "").replace("#", ""),
heading1=heading,
source_page_url=page_url,
source_page_title=page_title,
)
)
return docs
if __name__ == "__main__":
ROOT = Path(__file__).parent.parent
DOCS_DIR = (ROOT / "js/_website/build/docs").resolve()
GUIDES_DIR = (ROOT / "guides").resolve()
docs = []
for dir in DOCS_DIR.iterdir():
if dir.is_dir():
for file in dir.iterdir():
if file.is_file() and file.suffix == ".html":
html = file.read_text(encoding="utf-8")
markdown = obj_divs_to_markdown(html)
page_url = f"https://gradio.app/docs/{dir.name}/{file.name.replace('.html', '')}"
page_title = f"{dir.name.replace('-', ' ').title()} - {file.name.replace('.html', '').replace('-', ' ').title()} Docs"
docs = markdown_to_docs(
markdown, page_url=page_url, page_title=page_title, docs=docs
)
for dir in GUIDES_DIR.iterdir():
if dir.is_dir() and dir.name not in ["assets", "cn"]:
for file in dir.iterdir():
if file.is_file() and file.suffix == ".md":
markdown = file.read_text(encoding="utf-8")
page_name = file.name.replace(".md", "")
page_name = re.sub(r"^\d+_", "", page_name)
dir_name = dir.name.replace("-", " ")
dir_name = re.sub(r"^\d+_", "", dir_name)
page_url = f"https://gradio.app/guides/{page_name}"
page_title = f"{dir_name.title()} - {page_name.replace('-', ' ').title()} Guide"
docs = markdown_to_docs(
markdown,
page_url=page_url,
page_title=page_title,
docs=docs,
split_by_h2=True,
)
print(f"Generated {len(docs)} chunks.")
with open((ROOT / "scripts/docs.json").resolve(), "w") as f:
json.dump(docs, f)
result = run(
["git", "log", "-1", "--pretty=format:%H|%s"],
capture_output=True,
text=True,
check=True,
)
sha, subject = result.stdout.strip().split("|", 1)
commit_hash = sha[:7]
commit_message = subject if len(subject) <= 30 else subject[:30] + "..."
try:
commit_info = api.upload_file(
path_or_fileobj=(ROOT / "scripts/docs.json").resolve(),
path_in_repo="docs.json",
repo_id="gradio/docs",
repo_type="dataset",
commit_message=f"Changes from: {commit_hash} '{commit_message}'",
)
print("✅ docs.json uploaded")
except Exception as e:
print(f"❌ Error uploading docs.json: {e}")
| {
"repo_id": "gradio-app/gradio",
"file_path": "scripts/upload_docs_json.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/hidden_change/run.py | import gradio as gr
import time
import random
def screen_data(data, n):
entry = data.get(f"{n}", {})
ts = entry.get("timestamp", "unknown time")
msg = entry.get("message", "unknown message")
return f"At {ts}, JS says: “{msg}”"
def increment_counter(counter):
return counter + 1
def update_hidden_json(hidden_json, n):
new_n = n + 1
return {
**hidden_json,
f"{new_n}": {"timestamp": time.time(), "message": f"number {new_n + 1}"},
}, new_n
with gr.Blocks() as demo:
with gr.Tab(label="hidden component"):
n = gr.State(0)
hidden_json = gr.JSON(visible=False)
display = gr.Textbox(label="Screened Output")
demo.load(
fn=None,
js="""
() => {
const data = {
"0": {
message: "Hello from client JS! Number 1",
timestamp: new Date().toLocaleTimeString()
},
};
return data; // this goes into hidden_json
}
""",
outputs=[hidden_json],
)
counter = gr.Number(label="Counter", value=0)
hidden_json.change(fn=increment_counter, inputs=[counter], outputs=[counter])
hidden_json.change(fn=screen_data, inputs=[hidden_json, n], outputs=[display])
button = gr.Button("Update hidden_json")
button.click(
fn=update_hidden_json, inputs=[hidden_json, n], outputs=[hidden_json, n]
)
with gr.Tab(label="same data"):
btnA = gr.Button("A")
boxA = gr.Textbox()
btnA.click(lambda: "A", outputs=boxA)
btnB = gr.Button("B")
boxB = gr.Textbox(visible=False)
btnB.click(lambda x: x, boxA, boxB)
with gr.Row():
num1 = gr.Textbox(label="Text A")
num2 = gr.Textbox(label="Text B")
boxA.change(random.random, outputs=num1)
boxB.change(random.random, outputs=num2)
with gr.Tab(label="hidden parent"):
btnA = gr.Button("A")
boxA = gr.Textbox()
btnA.click(lambda: "A", outputs=boxA)
btnB = gr.Button("B")
with gr.Row(visible=False):
boxB = gr.Textbox()
btnB.click(lambda x: x, boxA, boxB)
with gr.Row():
num1 = gr.Textbox(label="Text A")
num2 = gr.Textbox(label="Text B")
boxA.change(random.random, outputs=num1)
boxB.change(random.random, outputs=num2)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/hidden_change/run.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/mcp_progress/run.py | import gradio as gr
import time
def slow_text_reverser(text: str, progress=gr.Progress()):
for i in range(len(text)):
progress(i / len(text), desc="Reversing text")
time.sleep(0.3)
return text[::-1]
demo = gr.Interface(slow_text_reverser, gr.Textbox("Hello, world!"), gr.Textbox(), api_name="predict")
if __name__ == "__main__":
demo.launch(mcp_server=True)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/mcp_progress/run.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/cli/commands/upload_mcp.py | def main(url_or_space_id: str, source_directory: str):
import httpx
from gradio_client.utils import is_http_url_like
from huggingface_hub import space_info
from mcp.server.fastmcp import FastMCP # type: ignore
from gradio.utils import abspath, is_in_or_equal
source_path = abspath(source_directory)
mcp = FastMCP("upload-mcp")
if is_http_url_like(url_or_space_id):
url = url_or_space_id.rstrip("/")
else:
url = f"https://{space_info(url_or_space_id).subdomain}.hf.space"
@mcp.tool()
def upload_file_to_gradio(file: str) -> str:
"""Generate a Gradio File Input for a local file by uploading it to a Gradio app and returning the URL.
Arguments:
file: A complete, absolute path to a local file to upload.
Returns:
Gradio File Input - A URL to the uploaded file.
"""
target_path = abspath(file)
if not is_in_or_equal(target_path, source_path):
raise ValueError(f"File {file} is not in {source_path}")
with open(target_path, "rb") as f:
response = httpx.post(f"{url}/gradio_api/upload", files={"files": f})
response.raise_for_status()
result = response.json()[0]
return f"{url}/gradio_api/file={result}"
mcp.run(transport="stdio")
if __name__ == "__main__":
import typer
typer.run(main)
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/cli/commands/upload_mcp.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/load_openapi_spec/run.py | import gradio as gr
demo = gr.load_openapi(
openapi_spec="https://petstore3.swagger.io/api/v3/openapi.json",
base_url="https://petstore3.swagger.io/api/v3",
paths=["/pet.*"],
methods=["get", "post"],
)
if __name__ == "__main__":
demo.launch(mcp_server=True)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/load_openapi_spec/run.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:test/test_external_utils.py | from unittest.mock import MagicMock, patch
import pytest
import gradio as gr
from gradio.external_utils import (
component_from_parameter_schema,
component_from_request_body_schema,
create_endpoint_fn,
get_model_info,
resolve_schema_ref,
)
@pytest.mark.parametrize(
"param_info,expected_type",
[
({"name": "age", "schema": {"type": "integer"}}, gr.Number),
({"name": "is_active", "schema": {"type": "boolean"}}, gr.Checkbox),
({"name": "name", "schema": {"type": "string"}}, gr.Textbox),
({"name": "category", "schema": {"type": "object"}}, gr.Textbox),
],
)
def test_component_from_parameter_schema(param_info, expected_type):
comp = component_from_parameter_schema(param_info)
assert isinstance(comp, expected_type)
def test_resolve_schema_ref_direct():
schema = {"type": "string"}
spec = {}
assert resolve_schema_ref(schema, spec) == schema
def test_resolve_schema_ref_ref():
schema = {"$ref": "#/components/schemas/Pet"}
spec = {
"components": {
"schemas": {
"Pet": {"type": "object", "properties": {"id": {"type": "integer"}}}
}
}
}
resolved = resolve_schema_ref(schema, spec)
assert resolved["type"] == "object"
assert "id" in resolved["properties"]
@pytest.mark.parametrize(
"request_body,expected_type",
[
(
{"content": {"application/json": {"schema": {"type": "object"}}}},
gr.Textbox,
),
(
{
"content": {
"application/octet-stream": {
"schema": {"type": "string", "format": "binary"}
}
}
},
gr.File,
),
],
)
def test_component_from_request_body_schema(request_body, expected_type):
comp = component_from_request_body_schema(request_body, {})
assert isinstance(comp, expected_type)
def test_create_endpoint_fn_signature():
operation = {
"parameters": [
{"name": "petId", "in": "path", "schema": {"type": "integer"}},
{"name": "status", "in": "query", "schema": {"type": "string"}},
],
"summary": "Find pet by ID and status",
}
fn = create_endpoint_fn("/pet/{petId}", "get", operation, "http://api.example.com")
sig = fn.__signature__ # type: ignore
assert [p.name for p in sig.parameters.values()][:2] == ["petId", "status"]
def test_create_endpoint_fn_docstring():
operation = {
"parameters": [
{
"name": "petId",
"in": "path",
"description": "ID of pet",
"schema": {"type": "integer"},
},
],
"summary": "Find pet by ID",
"description": "Returns a pet by its ID.",
}
fn = create_endpoint_fn("/pet/{petId}", "get", operation, "http://api.example.com")
doc = fn.__doc__
assert doc
assert "Returns a pet by its ID" in doc
assert "petId" in doc
@patch("httpx.get")
def test_create_endpoint_fn_with_auth_token(mock_get):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"result": "success"}
mock_get.return_value = mock_response
operation = {
"parameters": [
{"name": "id", "in": "query", "schema": {"type": "string"}},
],
"summary": "Test endpoint with auth",
}
fn = create_endpoint_fn(
"/test",
"get",
operation,
"http://api.example.com",
auth_token="my-secret-token",
)
result = fn("test-id")
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args[1]["headers"]["Authorization"] == "Bearer my-secret-token"
assert call_args[1]["headers"]["Content-Type"] == "application/json"
assert result == {"result": "success"}
def test_create_endpoint_fn_without_auth_token():
with patch("httpx.get") as mock_get:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"result": "success"}
mock_get.return_value = mock_response
operation = {"summary": "Test endpoint without auth"}
fn = create_endpoint_fn(
"/test", "get", operation, "http://api.example.com", auth_token=None
)
fn()
call_args = mock_get.call_args
assert "Authorization" not in call_args[1]["headers"]
assert call_args[1]["headers"]["Content-Type"] == "application/json"
def test_get_model_info_fastest_raises_value_error():
"""Using a model with :fastest raises ValueError (e.g. when huggingface_hub version is <1.0)."""
with pytest.raises(ValueError) as exc_info:
get_model_info("models/deepseek-ai/DeepSeek-R1-0528:fastest")
assert "To use :cheapest or :fastest, upgrade huggingface_hub" in str(
exc_info.value
)
| {
"repo_id": "gradio-app/gradio",
"file_path": "test/test_external_utils.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
gradio-app/gradio:.config/pycompile-lite-wheel.py | import subprocess
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class BuildHook(BuildHookInterface):
def finalize(self, version, build_data, artifact_path):
subprocess.run(["pyodide", "py-compile", "--keep", artifact_path], check=True)
| {
"repo_id": "gradio-app/gradio",
"file_path": ".config/pycompile-lite-wheel.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/many_tabs/run.py | import gradio as gr
import pandas as pd
import numpy as np
# Sample data for components
sample_df = pd.DataFrame(
{
"Name": ["Alice", "Bob", "Charlie", "Diana", "Eve"],
"Age": [25, 30, 35, 28, 32],
"City": ["New York", "London", "Paris", "Tokyo", "Berlin"],
"Score": [95.5, 87.2, 92.1, 88.9, 91.3],
}
)
def process_audio(audio):
if audio is None:
return "No audio uploaded"
return f"Audio file received: {audio}"
def process_image(image):
if image is None:
return "No image uploaded"
return "Image processed successfully!"
def process_3d_model(model):
if model is None:
return "No 3D model uploaded"
return f"3D model file received: {model}"
def process_video(video):
if video is None:
return "No video uploaded"
return f"Video file received: {video}"
def chat_response(message, history):
if history is None:
history = []
response = f"You said: {message}. This is a demo response!"
history.append([message, response])
return history, ""
def update_dataframe(df, action):
if action == "Add Row":
new_row = pd.DataFrame(
{"Name": ["New User"], "Age": [25], "City": ["New City"], "Score": [90.0]}
)
return pd.concat([df, new_row], ignore_index=True)
elif action == "Clear":
return pd.DataFrame({"Name": [], "Age": [], "City": [], "Score": []})
return df
with gr.Blocks(title="Multi-Component Demo with 10 Tabs") as demo:
gr.Markdown("# 🎛️ Multi-Component Gradio Demo")
gr.Markdown(
"This demo showcases various Gradio components across 10 interactive tabs."
)
with gr.Tabs() as main_tabs:
with gr.Tab("📝 Text", id="tab_text"):
gr.Markdown("### Text Processing")
text_input = gr.Textbox(
label="Input Text",
placeholder="Enter your text here...",
lines=3,
interactive=True,
)
text_area = gr.TextArea(
label="Large Text Area",
placeholder="Enter longer text...",
lines=5,
interactive=True,
)
text_output = gr.Textbox(label="Processed Text", interactive=True)
def process_text(text1, text2):
combined = f"Text 1: {text1}\nText 2: {text2}\nTotal characters: {len(text1) + len(text2)}"
return combined
text_input.change(
process_text, inputs=[text_input, text_area], outputs=text_output
)
with gr.Tab("📝 Text 2", id="tab_text_2"):
gr.Markdown("### Text Processing")
text_input_2 = gr.Textbox(
label="Input Text",
placeholder="Enter your text here...",
interactive=True,
)
text_area_2 = gr.TextArea(
label="Large Text Area",
placeholder="Enter longer text...",
interactive=True,
)
text_output_2 = gr.Textbox(label="Processed Text", interactive=True)
def process_text(text1, text2):
combined = f"Text 1: {text1}\nText 2: {text2}\nTotal characters: {len(text1) + len(text2)}"
return combined
text_input_2.change(
process_text, inputs=[text_input_2, text_area_2], outputs=text_output_2
)
# Tab 1: 3D Model Viewer
with gr.Tab("🎯 3D Model", id="tab_3d"):
gr.Markdown("### 3D Model Viewer")
model_input = gr.Model3D(
label="Upload 3D Model", interactive=True, height=400
)
model_output = gr.Textbox(label="Model Status", interactive=True)
model_input.change(
process_3d_model, inputs=model_input, outputs=model_output
)
# Tab 2: Image Editor
with gr.Tab("🖼️ Image Editor", id="tab_image_editor"):
gr.Markdown("### Image Editor")
image_editor = gr.ImageEditor(
label="Edit Image",
interactive=True,
height=400,
)
editor_output = gr.Textbox(label="Editor Status", interactive=True)
image_editor.change(
lambda x: "Image edited!" if x else "No image",
inputs=image_editor,
outputs=editor_output,
)
# Tab 3: Audio
with gr.Tab("🎵 Audio", id="tab_audio"):
gr.Markdown("### Audio Component")
with gr.Row():
audio_input = gr.Audio(
label="Upload Audio", interactive=True, type="filepath"
)
audio_mic = gr.Audio(
label="Record Audio", interactive=True, sources=["microphone"]
)
audio_output = gr.Textbox(label="Audio Status", interactive=True)
audio_input.change(process_audio, inputs=audio_input, outputs=audio_output)
# Tab 4: Image
with gr.Tab("📸 Image", id="tab_image"):
gr.Markdown("### Image Component")
with gr.Row():
image_input = gr.Image(
label="Upload Image", interactive=True, height=300
)
image_webcam = gr.Image(
label="Webcam Image", interactive=True, sources=["webcam"]
)
image_output = gr.Textbox(label="Image Status", interactive=True)
image_input.change(process_image, inputs=image_input, outputs=image_output)
# Tab 5: Dataframe
with gr.Tab("📊 Dataframe", id="tab_dataframe"):
gr.Markdown("### Interactive Dataframe")
df_component = gr.Dataframe(
value=sample_df,
label="Data Table",
interactive=True,
wrap=True,
)
with gr.Row():
add_row_btn = gr.Button("Add Row", interactive=True)
clear_btn = gr.Button("Clear Data", interactive=True)
df_status = gr.Textbox(label="Dataframe Status", interactive=True)
add_row_btn.click(
lambda df: update_dataframe(df, "Add Row"),
inputs=df_component,
outputs=df_component,
)
clear_btn.click(
lambda df: update_dataframe(df, "Clear"),
inputs=df_component,
outputs=df_component,
)
# Tab 6: Text Processing
# Tab 7: File Upload
with gr.Tab("📁 Files", id="tab_files"):
gr.Markdown("### File Upload")
file_input = gr.File(
label="Upload Files",
interactive=True,
file_count="multiple",
file_types=["image", "video", "audio", ".pdf", ".txt"],
)
file_output = gr.Textbox(label="File Status", interactive=True)
def process_files(files):
if files is None or len(files) == 0:
return "No files uploaded"
file_names = [f.name for f in files]
return f"Uploaded {len(files)} files: {', '.join(file_names)}"
file_input.change(process_files, inputs=file_input, outputs=file_output)
# Tab 8: Chatbot
with gr.Tab("💬 Chatbot", id="tab_chatbot"):
gr.Markdown("### Interactive Chatbot")
def echo(message, history):
return message
gr.ChatInterface(
fn=echo,
examples=["hello", "hola", "merhaba"],
title="Echo Bot",
)
# Tab 9: Gallery
with gr.Tab("🖼️ Gallery", id="tab_gallery"):
gr.Markdown("### Image Gallery")
gallery = gr.Gallery(
label="Image Gallery",
columns=3,
rows=2,
height="400px",
interactive=True,
allow_preview=True,
)
gallery_input = gr.File(
label="Add Images to Gallery",
file_count="multiple",
file_types=["image"],
interactive=True,
)
gallery_status = gr.Textbox(label="Gallery Status", interactive=True)
def update_gallery(files):
if files is None:
return [], "No images uploaded"
return files, f"Gallery updated with {len(files)} images"
gallery_input.change(
update_gallery, inputs=gallery_input, outputs=[gallery, gallery_status]
)
# Tab 10: Video
with gr.Tab("🎬 Video", id="tab_video"):
gr.Markdown("### Video Component")
video_input = gr.Video(label="Upload Video", interactive=True, height=400)
video_webcam = gr.Video(
label="Record Video", interactive=True, sources=["webcam"]
)
video_output = gr.Textbox(label="Video Status", interactive=True)
video_input.change(process_video, inputs=video_input, outputs=video_output)
# Global controls
gr.Markdown("---")
with gr.Row():
selected_tab = gr.Textbox(label="Currently Selected Tab", interactive=True)
tab_counter = gr.Number(
label="Tab Number (1-10)", value=1, minimum=1, maximum=10, interactive=True
)
# Tab selection functionality
def get_selected_tab(evt: gr.SelectData):
tab_names = [
"3D Model",
"Image Editor",
"Audio",
"Image",
"Dataframe",
"Text",
"Files",
"Chatbot",
"Gallery",
"Video",
]
return f"Selected: {evt.value}"
# Add select events for all tabs
tabs = [
main_tabs
] # You would need to reference individual tabs for this to work properly
gr.Markdown("### 🎯 Features:")
gr.Markdown("""
- **Tab 1**: 3D Model viewer with file upload
- **Tab 2**: Image editor with drawing tools
- **Tab 3**: Audio upload and recording
- **Tab 4**: Image upload and webcam capture
- **Tab 5**: Interactive dataframe with CRUD operations
- **Tab 6**: Text processing with multiple input types
- **Tab 7**: Multi-file upload with various formats
- **Tab 8**: Interactive chatbot interface
- **Tab 9**: Image gallery with preview
- **Tab 10**: Video upload and recording
""")
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/many_tabs/run.py",
"license": "Apache License 2.0",
"lines": 262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:gradio/brotli_middleware.py | """AGSI Brotli middleware build on top of starlette.
Code is based on GZipMiddleware shipped with starlette.
"""
import io
import re
from typing import NoReturn
from brotli import MODE_FONT, MODE_GENERIC, MODE_TEXT, Compressor # type: ignore
from starlette.datastructures import Headers, MutableHeaders
from starlette.middleware.gzip import GZipResponder
from starlette.types import ASGIApp, Message, Receive, Scope, Send
class Mode:
"""Brotli available modes."""
generic = MODE_GENERIC
text = MODE_TEXT
font = MODE_FONT
class BrotliMiddleware:
"""Brotli middleware public interface."""
def __init__(
self,
app: ASGIApp,
quality: int = 4,
mode: str = "text",
lgwin: int = 22,
lgblock: int = 0,
minimum_size: int = 400,
gzip_fallback: bool = True,
excluded_handlers: list[str] | None = None,
) -> None:
"""
Arguments.
mode: The compression mode can be:
generic, text (*default*. Used for UTF-8 format text input)
or font (for WOFF 2.0).
quality: Controls the compression-speed vs compression-
density tradeoff. The higher the quality, the slower the compression.
Range is 0 to 11.
lgwin: Base 2 logarithm of the sliding window size. Range
is 10 to 24.
lgblock: Base 2 logarithm of the maximum input block size.
Range is 16 to 24. If set to 0, the value will be set based on the
quality.
minimum_size: Only compress responses that are bigger than this value in bytes.
gzip_fallback: If True, uses gzip encoding if br is not in the Accept-Encoding header.
excluded_handlers: List of handlers to be excluded from being compressed.
"""
self.app = app
self.quality = quality
self.mode = getattr(Mode, mode)
self.minimum_size = minimum_size
self.lgwin = lgwin
self.lgblock = lgblock
self.gzip_fallback = gzip_fallback
if excluded_handlers:
self.excluded_handlers = [re.compile(path) for path in excluded_handlers]
else:
self.excluded_handlers = []
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if (
self._is_handler_excluded(scope)
or scope["type"] != "http"
or not self._is_compressible_file_type(scope)
):
return await self.app(scope, receive, send)
headers = Headers(scope=scope)
if "br" in headers.get("Accept-Encoding", ""):
br_responder = BrotliResponder(
self.app,
self.quality,
self.mode,
self.lgwin,
self.lgblock,
self.minimum_size,
)
await br_responder(scope, receive, send)
return
if self.gzip_fallback and "gzip" in headers.get("Accept-Encoding", ""):
gzip_responder = GZipResponder(self.app, self.minimum_size)
await gzip_responder(scope, receive, send)
return
await self.app(scope, receive, send)
def _is_handler_excluded(self, scope: Scope) -> bool:
handler = scope.get("path", "")
return any(pattern.search(handler) for pattern in self.excluded_handlers)
# explicitly handle html, js, css, json via a whitelist. woff2 files are already compressed.
# we don't want to compress binary files as they do not benefit much and it can cause bugs
def _is_compressible_file_type(self, scope: Scope) -> bool:
"""Check if the requested file has a compressible file extension."""
path = scope.get("path", "")
compressible_extensions = {
".html",
".htm",
".js",
".css",
".json",
".md",
".txt",
".csv",
".tsv",
".xml",
".svg",
}
if "." in path:
extension = "." + path.split(".")[-1].lower()
return extension in compressible_extensions
return False
class BrotliResponder:
"""Brotli Interface."""
def __init__(
self,
app: ASGIApp,
quality: int,
mode: Mode,
lgwin: int,
lgblock: int,
minimum_size: int,
) -> None: # noqa
self.app = app
self.quality = quality
self.mode = mode
self.lgwin = lgwin
self.lgblock = lgblock
self.minimum_size = minimum_size
self.send: Send = unattached_send
self.initial_message: Message = {}
self.started = False
self.content_encoding_set = False
self.br_file = Compressor(
quality=self.quality, mode=self.mode, lgwin=self.lgwin, lgblock=self.lgblock
)
self.br_buffer = io.BytesIO()
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: # noqa
self.send = send
await self.app(scope, receive, self.send_with_brotli)
async def send_with_brotli(self, message: Message) -> None:
"""Apply compression using brotli."""
message_type = message["type"]
if message_type == "http.response.start":
# Don't send the initial message until we've determined how to
# modify the outgoing headers correctly.
self.initial_message = message
headers = Headers(raw=self.initial_message["headers"])
self.content_encoding_set = "content-encoding" in headers
elif message_type == "http.response.body" and self.content_encoding_set:
if not self.started:
self.started = True
await self.send(self.initial_message)
await self.send(message)
elif message_type == "http.response.body" and not self.started:
self.started = True
body = message.get("body", b"")
more_body = message.get("more_body", False)
if len(body) < self.minimum_size and not more_body:
# Don't apply Brotli to small outgoing responses.
await self.send(self.initial_message)
await self.send(message)
elif not more_body:
# Standard Brotli response.
body = self._process(body) + self.br_file.finish()
headers = MutableHeaders(raw=self.initial_message["headers"])
headers["Content-Encoding"] = "br"
headers["Content-Length"] = str(len(body))
headers.add_vary_header("Accept-Encoding")
message["body"] = body
await self.send(self.initial_message)
await self.send(message)
else:
# Initial body in streaming Brotli response.
headers = MutableHeaders(raw=self.initial_message["headers"])
headers["Content-Encoding"] = "br"
headers.add_vary_header("Accept-Encoding")
del headers["Content-Length"]
self.br_buffer.write(self._process(body) + self.br_file.flush())
message["body"] = self.br_buffer.getvalue()
self.br_buffer.seek(0)
self.br_buffer.truncate()
await self.send(self.initial_message)
await self.send(message)
elif message_type == "http.response.body":
# Remaining body in streaming Brotli response.
body = message.get("body", b"")
more_body = message.get("more_body", False)
self.br_buffer.write(self._process(body) + self.br_file.flush())
if not more_body:
self.br_buffer.write(self.br_file.finish())
message["body"] = self.br_buffer.getvalue()
self.br_buffer.close()
await self.send(message)
return
message["body"] = self.br_buffer.getvalue()
self.br_buffer.seek(0)
self.br_buffer.truncate()
await self.send(message)
def _process(self, body):
"""Workaround to support both brotli and brotlipy
Before the official Google brotli repository offered a Python version,
there was a separate package to connect to brotli. These APIs are nearly
identical except that the official Google API has Compressor.process
while the brotlipy API has Compress.compress
"""
if hasattr(self.br_file, "process"):
return self.br_file.process(body)
return self.br_file.compress(body)
async def unattached_send(_: Message) -> NoReturn:
raise RuntimeError("send awaitable not set") # pragma: no cover
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/brotli_middleware.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/reload_mode/functions.py | import gradio as gr
if gr.NO_RELOAD:
def get_status(): # type: ignore
return "full"
else:
def get_status():
return "reloaded"
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/reload_mode/functions.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/reload_mode/run.py | import gradio as gr
from functions import get_status # type: ignore
if gr.NO_RELOAD:
def eat(food): # type: ignore
if food > 0:
return {food_box: food - 1, status_box: "full"}
else:
return {status_box: "hungry"}
else:
def eat(food):
return {status_box: get_status()}
with gr.Blocks() as demo:
food_box = gr.Number(value=10, label="Food Count!!")
status_box = gr.Textbox(label="Status")
gr.Button("Eat").click(fn=eat,
inputs=food_box,
outputs=[food_box, status_box])
demo.launch()
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/reload_mode/run.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/mcp_tool_only/run.py | import gradio as gr
def slice_list(lst: list, start: int, end: int) -> list:
"""
A tool that slices a list given a start and end index.
Args:
lst: The list to slice.
start: The start index.
end: The end index.
Returns:
The sliced list.
"""
return lst[start:end]
with gr.Blocks() as demo:
gr.Markdown(
"""
This is a demo of a MCP-only tool.
This tool slices a list.
This tool is MCP-only, so it does not have a UI.
"""
)
gr.api(
slice_list
)
_, url, _ = demo.launch(mcp_server=True) | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/mcp_tool_only/run.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:gradio/screen_recording_utils.py | import asyncio
import os
import shutil
import tempfile
import traceback
from pathlib import Path
DEFAULT_TEMP_DIR = os.environ.get("GRADIO_TEMP_DIR") or str(
Path(tempfile.gettempdir()) / "gradio"
)
async def process_video_with_ffmpeg(input_path, output_path, params):
from ffmpy import FFmpeg
current_input = input_path
temp_files = [input_path]
try:
if params.get("remove_segment_start") and params.get("remove_segment_end"):
start = float(params["remove_segment_start"])
end = float(params["remove_segment_end"])
if start < end:
segment_output = tempfile.mkstemp(
suffix="_trimmed.mp4", dir=DEFAULT_TEMP_DIR
)[1]
before_segment = tempfile.mkstemp(
suffix="_before.mp4", dir=DEFAULT_TEMP_DIR
)[1]
after_segment = tempfile.mkstemp(
suffix="_after.mp4", dir=DEFAULT_TEMP_DIR
)[1]
temp_files.extend([segment_output, before_segment, after_segment])
if start > 0:
ff = FFmpeg(
inputs={current_input: None},
outputs={
before_segment: f"-t {start} -c:v libx264 -preset fast -crf 22 -c:a aac -r 30 -y"
},
)
process = await asyncio.create_subprocess_exec(
*ff.cmd.split(),
stdout=asyncio.subprocess.PIPE, # type: ignore # type: ignore
stderr=asyncio.subprocess.PIPE, # type: ignore # type: ignore
)
stdout, stderr = await process.communicate()
ff = FFmpeg(
inputs={current_input: None},
outputs={
after_segment: f"-ss {end} -c:v libx264 -preset fast -crf 22 -c:a aac -r 30 -y"
},
)
process = await asyncio.create_subprocess_exec(
*ff.cmd.split(),
stdout=asyncio.subprocess.PIPE, # type: ignore
stderr=asyncio.subprocess.PIPE, # type: ignore
)
stdout, stderr = await process.communicate()
concat_file = tempfile.mkstemp(
suffix="_concat.txt", dir=DEFAULT_TEMP_DIR
)[1]
temp_files.append(concat_file)
with open(concat_file, "w") as f:
if (
start > 0
and os.path.exists(before_segment)
and os.path.getsize(before_segment) > 0
):
f.write(f"file '{before_segment}'\n")
if (
os.path.exists(after_segment)
and os.path.getsize(after_segment) > 0
):
f.write(f"file '{after_segment}'\n")
if os.path.exists(concat_file) and os.path.getsize(concat_file) > 0:
ff = FFmpeg(
inputs={concat_file: "-f concat -safe 0"},
outputs={segment_output: "-c copy -y"},
)
process = await asyncio.create_subprocess_exec(
*ff.cmd.split(),
stdout=asyncio.subprocess.PIPE, # type: ignore # type: ignore
stderr=asyncio.subprocess.PIPE, # type: ignore # type: ignore
)
stdout, stderr = await process.communicate()
current_input = segment_output
for file in [before_segment, after_segment, concat_file]:
try:
if os.path.exists(file):
os.unlink(file)
except OSError:
pass
if "zoom_effects" in params and params["zoom_effects"]:
zoom_effects = params["zoom_effects"]
for i, effect in enumerate(zoom_effects):
if (
effect.get("boundingBox")
and effect["boundingBox"].get("topLeft")
and effect["boundingBox"].get("bottomRight")
):
top_left = effect["boundingBox"]["topLeft"]
bottom_right = effect["boundingBox"]["bottomRight"]
start_frame = effect.get("start_frame")
duration = effect.get("duration", 2.0)
zoom_output = tempfile.mkstemp(
suffix=f"_zoom_{i}.mp4", dir=DEFAULT_TEMP_DIR
)[1]
temp_files.append(zoom_output)
zoom_output, zoom_temp_files = await zoom_in(
current_input, top_left, bottom_right, duration, start_frame
)
temp_files.extend(zoom_temp_files)
if zoom_output and zoom_output != current_input:
if current_input not in [input_path]:
temp_files.append(current_input)
current_input = zoom_output
ff = FFmpeg(
inputs={current_input: None},
outputs={
output_path: "-c:v libx264 -preset fast -crf 22 -c:a aac -r 30 -vsync cfr -y"
},
)
process = await asyncio.create_subprocess_exec(
*ff.cmd.split(),
stdout=asyncio.subprocess.PIPE, # type: ignore
stderr=asyncio.subprocess.PIPE, # type: ignore
)
stdout, stderr = await process.communicate()
if process.returncode != 0:
shutil.copy(current_input, output_path)
current_input = output_path
final_trimmed_output = tempfile.mkstemp(
suffix="_final_trimmed.mp4", dir=DEFAULT_TEMP_DIR
)[1]
temp_files.append(final_trimmed_output)
ff = FFmpeg(
inputs={current_input: None},
outputs={
final_trimmed_output: "-ss 0.5 -c:v libx264 -preset fast -crf 22 -c:a aac -r 30 -y"
},
)
process = await asyncio.create_subprocess_exec(
*ff.cmd.split(),
stdout=asyncio.subprocess.PIPE, # type: ignore
stderr=asyncio.subprocess.PIPE, # type: ignore
)
stdout, stderr = await process.communicate()
if (
process.returncode == 0
and os.path.exists(final_trimmed_output)
and os.path.getsize(final_trimmed_output) > 0
):
shutil.copy(final_trimmed_output, output_path)
temp_files.append(final_trimmed_output)
return output_path, temp_files
except Exception:
traceback.print_exc()
return input_path, temp_files
async def zoom_in(
input_path,
top_left=None,
bottom_right=None,
zoom_duration=2.0,
zoom_start_frame=None,
):
from ffmpy import FFmpeg
temp_files = []
try:
if not input_path or not os.path.exists(input_path):
return input_path, temp_files
if zoom_start_frame is None:
zoom_start_frame = 60
else:
try:
zoom_start_frame = float(zoom_start_frame)
except (ValueError, TypeError):
zoom_start_frame = 60
if top_left is None:
top_left = [0.25, 0.25]
if bottom_right is None:
bottom_right = [0.75, 0.75]
try:
x1, y1 = float(top_left[0]), float(top_left[1])
x2, y2 = float(bottom_right[0]), float(bottom_right[1])
except (TypeError, ValueError, IndexError):
x1, y1 = 0.25, 0.25
x2, y2 = 0.75, 0.75
x1 = max(0.0, min(0.9, x1))
y1 = max(0.0, min(0.9, y1))
x2 = max(0.1, min(1.0, x2))
y2 = max(0.1, min(1.0, y2))
if x2 <= x1:
x1, x2 = 0.25, 0.75
if y2 <= y1:
y1, y2 = 0.25, 0.75
box_width = x2 - x1
box_height = y2 - y1
box_center_x = (x1 + x2) / 2
box_center_y = (y1 + y2) / 2
def calculate_proportional_offset(center, size):
if center < 0.5:
distance_from_center = 0.5 - center
return center - (size * (distance_from_center / 0.5))
elif center > 0.5:
distance_from_center = center - 0.5
return center + (size * (distance_from_center / 0.5))
return center
zoom_center_x = calculate_proportional_offset(box_center_x, box_width)
zoom_center_y = calculate_proportional_offset(box_center_y, box_height)
target_zoom = 3.0
max_zoom_by_size = min(1.0 / box_width, 1.0 / box_height)
safety_margin = 0.9
max_zoom_by_size = max_zoom_by_size * safety_margin
dynamic_max_zoom = min(max_zoom_by_size, target_zoom)
dynamic_max_zoom = max(dynamic_max_zoom, 1.3)
duration_cmd = f'ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "{input_path}"'
process = await asyncio.create_subprocess_shell(
duration_cmd,
stdout=asyncio.subprocess.PIPE, # type: ignore
stderr=asyncio.subprocess.PIPE, # type: ignore
)
stdout, stderr = await process.communicate()
try:
output = stdout.decode().strip()
video_duration = float(output)
except (ValueError, TypeError):
video_duration = 10.0
fps = 30.0
zoom_duration = min(float(zoom_duration), video_duration)
zoom_output = tempfile.mkstemp(suffix="_zoomed.mp4", dir=DEFAULT_TEMP_DIR)[1]
temp_files.append(zoom_output)
zoom_in_frames = int(fps / 2)
zoom_out_frames = int(fps / 2)
hold_frames = int(zoom_duration * fps)
width, height = 1920, 1080
complex_filter = (
f"[0:v]zoompan="
f"z='if(between(on,{zoom_start_frame},{zoom_start_frame + zoom_in_frames + hold_frames + zoom_out_frames}),"
f"if(lt(on-{zoom_start_frame},{zoom_in_frames}),"
f"1+(({dynamic_max_zoom}-1)*(on-{zoom_start_frame})/{zoom_in_frames}),"
f"if(lt(on-{zoom_start_frame},{zoom_in_frames + hold_frames}),"
f"{dynamic_max_zoom},"
f"{dynamic_max_zoom}-(({dynamic_max_zoom}-1)*((on-{zoom_start_frame}-{zoom_in_frames}-{hold_frames}))/{zoom_out_frames})"
f")),1)':"
f"x='iw*{zoom_center_x}-iw/zoom*{zoom_center_x}':"
f"y='ih*{zoom_center_y}-ih/zoom*{zoom_center_y}':"
f"d=1:"
f"fps={fps}:"
f"s={width}x{height}[outv]"
)
ff = FFmpeg(
inputs={input_path: None},
outputs={
zoom_output: (
f'-filter_complex "{complex_filter}" '
f'-map "[outv]" '
f"-map 0:a? "
f"-c:v libx264 "
f"-pix_fmt yuv420p "
f"-movflags +faststart "
f"-preset fast "
f"-r 30 "
f"-c:a aac "
f"-y"
)
},
)
cmd_parts = ff.cmd.split()
process = await asyncio.create_subprocess_exec(
*cmd_parts,
stdout=asyncio.subprocess.PIPE, # type: ignore
stderr=asyncio.subprocess.PIPE, # type: ignore
)
stdout, stderr = await process.communicate()
if process.returncode != 0:
return input_path, temp_files
return zoom_output, temp_files
except Exception:
traceback.print_exc()
return input_path, temp_files
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/screen_recording_utils.py",
"license": "Apache License 2.0",
"lines": 275,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
gradio-app/gradio:demo/i18n/run.py | import gradio as gr
# create an i18n instance with translations for different languages
i18n = gr.I18n(
en={"greeting": "Hello, welcome to my app!", "name_label": "Your Name", "submit_button": "Greet", "john_doe": "John English", "result_label": "Result"},
es={"greeting": "¡Hola, bienvenido a mi aplicación!", "name_label": "Tu Nombre", "submit_button": "Saludar", "john_doe": "John Spanish", "result_label": "Resultado"},
fr={"greeting": "Bonjour, bienvenue dans mon application!", "name_label": "Votre Nom", "submit_button": "Saluer", "john_doe": "John French", "result_label": "Résultat"},
de={"greeting": "Hallo, willkommen in meiner App!", "name_label": "Dein Name", "submit_button": "Grüßen", "john_doe": "John German", "result_label": "Ergebnis"},
)
def add_hello_world(name):
return "hello " + name
with gr.Blocks() as demo:
gr.Markdown(value=i18n("greeting"))
with gr.Row():
# use i18n() for any string that should be translated
name_input = gr.Textbox(label=i18n("name_label"), value=i18n("john_doe"))
with gr.Row():
output_text = gr.Textbox(label=i18n("result_label"))
with gr.Row():
greet_btn = gr.Button(value=i18n("submit_button"))
with gr.Row():
reset_btn = gr.Button("Reset Name")
greet_btn.click(fn=add_hello_world, inputs=name_input, outputs=output_text)
reset_btn.click(fn=lambda: i18n("john_doe"), inputs=None, outputs=name_input)
gr.Markdown("""
This demo shows Gradio's internationalization (i18n) functionality.
The interface automatically displays text in the user's browser language
(if available in our translations), or falls back to English.
""")
if __name__ == "__main__":
# pass i18n to the launch function
demo.launch(i18n=i18n)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/i18n/run.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:gradio/i18n.py | from __future__ import annotations
import re
import warnings
from typing import Any
class I18nData:
"""
A class that wraps a translation key with metadata.
This object will be serialized and sent to the frontend, where the actual
translation will happen using the frontend's i18n system.
"""
def __init__(self, key: str):
"""
Initialize a I18nData object.
Args:
key: The translation key to be translated in the frontend.
"""
self.key = key
self._type = "translation_metadata"
def to_dict(self) -> dict[str, Any]:
"""
Convert the I18nData object to a dictionary for serialization.
This allows the frontend to recognize it as a translatable object.
"""
return {"__type__": self._type, "key": self.key}
def __str__(self) -> str:
"""
String representation of the I18nData object.
Used when the object is converted to a string.
This returns a special format that can be recognized by the frontend
as needing translation.
"""
import json
return f"__i18n__{json.dumps(self.to_dict())}"
def __repr__(self) -> str:
"""
Representation of the I18nData object for debugging.
"""
return self.__str__()
def __add__(self, other):
"""
Handle string concatenation (self + other).
"""
return str(self) + str(other)
def __radd__(self, other):
"""
Handle string concatenation (other + self).
"""
return str(other) + str(self)
def __getattr__(self, name):
"""
Handle attribute access for I18nData.
This makes it possible to use I18nData objects in contexts
that expect strings with methods.
"""
if name.startswith("__") and name.endswith("__"):
raise AttributeError(f"{self.__class__.__name__} has no attribute {name}")
def method(*_args, **_kwargs):
return self
return method
def tojson(self) -> dict[str, Any]:
"""
Convert the I18nData object to a JSON-serializable dictionary.
This is used by the default Python JSON serializer.
"""
return self.to_dict()
class I18n:
"""
Handles internationalization (i18n) for Gradio applications.
Stores translation dictionaries and provides a method to retrieve translation keys.
The translation lookup happens on the frontend based on the browser's locale
and the provided translation dictionaries.
"""
# BCP 47 language tag regex pattern
_LOCALE_PATTERN = re.compile(r"^[a-z]{2,3}(-[A-Za-z0-9]{2,8})*$")
def __init__(self, **translations: dict[str, str]):
"""
Initializes the I18n class.
Args:
**translations: Each keyword argument should be a locale code (e.g., "en", "fr") with a
dictionary value, which maps translation keys to translated strings.
Example: gr.I18n(en={"greeting": "Hello"}, es={"greeting": "Hola"})
These translations can be passed to the frontend for use there.
"""
self.translations = {}
for locale, translation_dict in translations.items():
if not self._is_valid_locale(locale):
warnings.warn(
f"Invalid locale code: '{locale}'. Locale codes should follow BCP 47 format (e.g., 'en', 'en-US'). "
f"This locale will still be included, but may not work correctly.",
UserWarning,
)
self.translations[locale] = translation_dict
def _is_valid_locale(self, locale: str) -> bool:
return bool(self._LOCALE_PATTERN.match(locale))
def __call__(self, key: str) -> I18nData:
"""
Returns a I18nData object containing the translation key.
This metadata object will be serialized and sent to the frontend,
where it will be translated by the frontend's i18n system.
Args:
key: The key to identify the translation string (e.g., "submit_button").
Returns:
A I18nData object containing the translation key.
"""
return I18nData(key)
@property
def translations_dict(self) -> dict[str, dict[str, str]]:
"""
Returns the dictionary of translations provided during initialization.
These can be passed to the frontend for use in its translation system.
"""
return self.translations
| {
"repo_id": "gradio-app/gradio",
"file_path": "gradio/i18n.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
gradio-app/gradio:demo/render_preserve_key/run.py | import gradio as gr
import random
with gr.Blocks() as demo:
number_of_boxes = gr.Slider(1, 5, step=1, value=3, label="Number of Boxes")
@gr.render(inputs=[number_of_boxes])
def create_boxes(number_of_boxes):
for i in range(number_of_boxes):
with gr.Row(key=f'row-{i}'):
number_box = gr.Textbox(
label=f"Default Label",
info="Default Info",
key=f"box-{i}",
preserved_by_key=["label", "value"],
interactive=True
)
change_label_btn = gr.Button("Change Label", key=f"btn-{i}")
change_label_btn.click(
lambda: gr.Textbox(
label=random.choice("ABCDE"),
info=random.choice("ABCDE")),
outputs=number_box
)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "gradio-app/gradio",
"file_path": "demo/render_preserve_key/run.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/letter_counter/run.py | import gradio as gr
def letter_counter(word, letter):
"""
Count the number of occurrences of a letter in a word or text.
Args:
word (str): The input text to search through
letter (str): The letter to search for
Returns:
str: A message indicating how many times the letter appears
"""
word = word.lower()
letter = letter.lower()
count = word.count(letter)
return count
demo = gr.Interface(
fn=letter_counter,
inputs=[gr.Textbox("strawberry"), gr.Textbox("r")],
outputs=[gr.Number()],
title="Letter Counter",
description="Enter text and a letter to count how many times the letter appears in the text.",
api_name="predict"
)
if __name__ == "__main__":
demo.launch(mcp_server=True)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/letter_counter/run.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:demo/mcp_tools/run.py | import numpy as np
import gradio as gr
from pathlib import Path
import os
from PIL import Image
def prime_factors(n: str):
"""
Compute the prime factorization of a positive integer.
Args:
n (str): The integer to factorize. Must be greater than 1.
"""
n_int = int(n)
if n_int <= 1:
raise ValueError("Input must be an integer greater than 1.")
factors = []
while n_int % 2 == 0:
factors.append(2)
n_int //= 2
divisor = 3
while divisor * divisor <= n_int:
while n_int % divisor == 0:
factors.append(divisor)
n_int //= divisor
divisor += 2
if n_int > 1:
factors.append(n_int)
return factors
def generate_cheetah_image():
"""
Generate a cheetah image.
Returns:
The generated cheetah image.
"""
return Path(os.path.dirname(__file__)) / "cheetah.jpg"
def image_orientation(image: Image.Image) -> str:
"""
Returns whether image is portrait or landscape.
Args:
image (Image.Image): The image to check.
Returns:
str: "Portrait" if image is portrait, "Landscape" if image is landscape.
"""
return "Portrait" if image.height > image.width else "Landscape"
def sepia(input_img):
"""
Apply a sepia filter to the input image.
Args:
input_img (np.array): The input image to apply the sepia filter to.
Returns:
The sepia filtered image.
"""
sepia_filter = np.array([
[0.393, 0.769, 0.189],
[0.349, 0.686, 0.168],
[0.272, 0.534, 0.131]
])
sepia_img = input_img.dot(sepia_filter.T)
sepia_img /= sepia_img.max()
return sepia_img
demo = gr.TabbedInterface(
[
gr.Interface(prime_factors, gr.Textbox("1001"), gr.Textbox()),
gr.Interface(generate_cheetah_image, None, gr.Image(), api_description="Generates a cheetah image. No arguments are required."),
gr.Interface(image_orientation, gr.Image(type="pil"), gr.Textbox(), api_visibility="private"),
gr.Interface(sepia, gr.Image(), gr.Image(), api_description=False),
],
[
"Prime Factors",
"Cheetah Image",
"Image Orientation Checker",
"Sepia Filter",
]
)
if __name__ == "__main__":
demo.launch(mcp_server=True)
| {
"repo_id": "gradio-app/gradio",
"file_path": "demo/mcp_tools/run.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
gradio-app/gradio:test/test_mcp.py | import copy
import json
import os
import tempfile
import pytest
from mcp import ClientSession
from mcp.client.streamable_http import streamablehttp_client
from PIL import Image
from starlette.requests import Request
import gradio as gr
from gradio.data_classes import FileData
from gradio.mcp import GradioMCPServer
def test_gradio_mcp_server_initialization(test_mcp_app):
server = GradioMCPServer(test_mcp_app)
assert server.blocks == test_mcp_app
assert server.mcp_server is not None
def test_get_block_fn_from_tool_name(test_mcp_app):
server = GradioMCPServer(test_mcp_app)
result = server.get_block_fn_from_endpoint_name("test_tool")
assert result == test_mcp_app.fns[0]
result = server.get_block_fn_from_endpoint_name("nonexistent_tool")
assert result is None
def test_generate_tool_names_correctly_for_interfaces():
def echo(x):
return x
class MyCallable:
def __call__(self, x):
return x
app = gr.TabbedInterface(
[
gr.Interface(echo, "text", "text"),
gr.Interface(echo, "image", "image"),
gr.Interface(lambda x: x, "audio", "audio"),
gr.Interface(MyCallable(), "text", "text"),
]
)
server = GradioMCPServer(app)
assert list(server.tool_to_endpoint.keys()) == [
"echo",
"echo_1",
"_lambda_",
"MyCallable",
]
def test_convert_strings_to_filedata(test_mcp_app):
server = GradioMCPServer(test_mcp_app)
test_data = {
"text": "test text",
"image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==",
}
filedata_positions: list[list[str | int]] = [["image"]]
result = server.convert_strings_to_filedata(test_data, filedata_positions)
assert isinstance(result["image"], dict)
result["image"] = FileData(**result["image"]) # type: ignore
assert os.path.exists(result["image"].path)
test_data = {"image": "invalid_data"}
with pytest.raises(ValueError):
server.convert_strings_to_filedata(test_data, filedata_positions)
def test_postprocess_output_data(test_mcp_app):
server = GradioMCPServer(test_mcp_app)
fake_root_url = "http://localhost:7860"
temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
temp_file.close()
try:
img = Image.new("RGB", (10, 10), color="red")
img.save(temp_file.name)
url = f"http://localhost:7860/gradio_api/file={temp_file.name}"
test_data = [
{"path": temp_file.name, "url": url, "meta": {"_type": "gradio.FileData"}}
]
result = server.postprocess_output_data(test_data, fake_root_url)
assert len(result) == 2
assert result[0].type == "image"
assert result[0].mimeType == "image/png" # type: ignore
assert result[1].type == "text"
assert url in result[1].text # type: ignore
finally:
os.unlink(temp_file.name)
svg_data_uri = "data:image/svg+xml,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20width%3D%22100%22%20height%3D%22100%22%3E%3Ccircle%20cx%3D%2250%22%20cy%3D%2250%22%20r%3D%2240%22%20fill%3D%22blue%22%2F%3E%3C%2Fsvg%3E"
test_data = [
{
"path": None,
"url": svg_data_uri,
"meta": {"_type": "gradio.FileData"},
"orig_name": "test.svg",
}
]
result = server.postprocess_output_data(test_data, fake_root_url)
assert len(result) == 2
assert result[0].type == "image"
assert result[0].mimeType == "image/svg+xml" # type: ignore
assert result[1].type == "text"
assert "Image URL:" in result[1].text # type: ignore
assert "/gradio_api/file=" in result[1].text # type: ignore
test_data = ["test text"]
result = server.postprocess_output_data(test_data, fake_root_url)
assert len(result) == 1
assert result[0].type == "text"
assert result[0].text == "test text" # type: ignore
def test_simplify_filedata_schema(test_mcp_app):
server = GradioMCPServer(test_mcp_app)
test_schema = {
"type": "object",
"properties": {
"text": {"type": "string"},
"image": {
"type": "object",
"properties": {"meta": {"default": {"_type": "gradio.FileData"}}},
},
},
}
old_schema = copy.deepcopy(test_schema)
simplified_schema, filedata_positions = server.simplify_filedata_schema(test_schema)
assert simplified_schema["properties"]["image"]["type"] == "string"
assert filedata_positions == [["image"]]
# Verify that the original schema is not modified
assert test_schema == old_schema
def test_tool_prefix_character_replacement(test_mcp_app):
test_cases = [
("test-space", "test_space_test_tool"),
("flux.1_schnell", "flux_1_schnell_test_tool"),
("test\\backslash", "test_backslash_test_tool"),
("test:colon spaces ", "test_colon_spaces__test_tool"),
]
original_system = os.environ.get("SYSTEM")
original_space_id = os.environ.get("SPACE_ID")
try:
os.environ["SYSTEM"] = "spaces"
for space_id, tool_name in test_cases:
os.environ["SPACE_ID"] = space_id
server = GradioMCPServer(test_mcp_app)
assert tool_name in server.tool_to_endpoint
finally:
if original_system is not None:
os.environ["SYSTEM"] = original_system
else:
os.environ.pop("SYSTEM", None)
if original_space_id is not None:
os.environ["SPACE_ID"] = original_space_id
else:
os.environ.pop("SPACE_ID", None)
@pytest.mark.asyncio
async def test_associative_keyword_in_schema():
def test_tool(x):
return x
demo = gr.Interface(test_tool, "image", "image")
server = GradioMCPServer(demo)
scope = {
"type": "http",
"headers": [],
"server": ("localhost", 7860),
"path": "/gradio_api/mcp/schema",
"query_string": "",
}
request = Request(scope)
schema = (await server.get_complete_schema(request)).body.decode("utf-8") # type: ignore
schema = json.loads(schema)
assert (
schema[0]["inputSchema"]["properties"]["x"]["format"]
== "Gradio File Input - a http or https url to a file"
)
assert (
"to upload the file to the gradio app and create a Gradio File Input"
in schema[0]["description"]
)
assert schema[0]["meta"]["file_data_present"]
@pytest.mark.asyncio
async def test_tool_selection_via_query_params():
def tool_1(x):
return x
def tool_2(x):
return x
demo = gr.TabbedInterface(
[
gr.Interface(tool_1, "image", "image"),
gr.Interface(tool_2, "image", "image"),
]
)
server = GradioMCPServer(demo)
scope = {
"type": "http",
"headers": [],
"server": ("localhost", 7860),
"path": "/gradio_api/mcp/schema",
"query_string": "",
}
request = Request(scope)
schema = (await server.get_complete_schema(request)).body.decode("utf-8") # type: ignore
schema = json.loads(schema)
assert schema[0]["name"] == "tool_1"
assert schema[1]["name"] == "tool_2"
scope = {
"type": "http",
"headers": [],
"server": ("localhost", 7860),
"path": "/gradio_api/mcp/schema",
"query_string": "tools=tool_2",
}
request = Request(scope)
schema = (await server.get_complete_schema(request)).body.decode("utf-8") # type: ignore
schema = json.loads(schema)
assert len(schema) == 1
assert schema[0]["name"] == "tool_2"
@pytest.mark.asyncio
@pytest.mark.serial
async def test_mcp_streamable_http_client():
def double(word: str) -> str:
"""
Doubles the input word.
Parameters:
word: The word to double
Returns:
The doubled word
"""
return word * 2
with gr.Blocks() as demo:
input_box = gr.Textbox(label="Input")
output_box = gr.Textbox(label="Output")
input_box.change(double, input_box, output_box, api_name="double")
_, local_url, _ = demo.launch(prevent_thread_lock=True, mcp_server=True)
mcp_url = f"{local_url}gradio_api/mcp/"
try:
async with streamablehttp_client(mcp_url) as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
tools_response = await session.list_tools()
assert len(tools_response.tools) == 1
tool = tools_response.tools[0]
assert "double" in tool.name
assert "Doubles the input word" in tool.description # type: ignore
result = await session.call_tool(tool.name, arguments={"word": "Hello"}) # type: ignore
assert len(result.content) == 1 # type: ignore
assert result.content[0].text == "HelloHello" # type: ignore
finally:
demo.close()
@pytest.mark.serial
@pytest.mark.asyncio
async def test_mcp_streamable_http_client_with_progress_callback():
progress_updates = []
def slow_processor(text: str, progress=gr.Progress()) -> str:
"""
Processes text slowly with progress updates.
Parameters:
text: The text to process
Returns:
The processed text
"""
total = len(text)
for i in range(total):
progress((i + 1) / total, desc=f"Processing character {i + 1}/{total}")
return text.upper()
with gr.Blocks() as demo:
input_box = gr.Textbox(label="Input")
output_box = gr.Textbox(label="Output")
input_box.submit(slow_processor, input_box, output_box, api_name="process")
demo.queue()
_, local_url, _ = demo.launch(prevent_thread_lock=True, mcp_server=True)
mcp_url = f"{local_url}gradio_api/mcp/"
try:
async with streamablehttp_client(mcp_url) as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
tools_response = await session.list_tools()
assert len(tools_response.tools) == 1
tool = tools_response.tools[0]
assert "process" in tool.name
async def progress_callback(
progress: float, total: float | None, message: str | None
):
progress_updates.append(
{"progress": progress, "total": total, "message": message}
)
result = await session.call_tool(
tool.name,
arguments={"text": "test"},
progress_callback=progress_callback,
meta={"progressToken": "test-token-123"},
)
assert len(result.content) == 1 # type: ignore
assert result.content[0].text == "TEST" # type: ignore
assert len(progress_updates) > 0, "Expected to receive progress updates"
finally:
demo.close()
@pytest.mark.asyncio
@pytest.mark.serial
async def test_mcp_streamable_http_client_with_stateful_app(stateful_mcp_app):
_, local_url, _ = stateful_mcp_app.launch(prevent_thread_lock=True, mcp_server=True)
mcp_url = f"{local_url}gradio_api/mcp/"
try:
async with streamablehttp_client(mcp_url) as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
tools_response = await session.list_tools()
assert len(tools_response.tools) == 1
tool = tools_response.tools[0]
result = await session.call_tool(
tool.name,
arguments={"name": "test", "flag": True, "gallery_images": 42},
)
assert len(result.content) == 1 # type: ignore
assert (
result.content[0].text # type: ignore
== "name=test, hidden_state=hidden_value, flag=True, gallery=42"
)
finally:
stateful_mcp_app.close()
@pytest.mark.asyncio
@pytest.mark.serial
async def test_x_gradio_user_mcp_gets_set():
def fn(name: str, request: gr.Request) -> str:
return f"Hello, {name}! Your x-gradio-user is {request.headers.get('x-gradio-user', 'not provided')}"
app = gr.Interface(fn, "text", "text")
_, local_url, _ = app.launch(prevent_thread_lock=True, mcp_server=True)
mcp_url = f"{local_url}gradio_api/mcp/"
try:
async with streamablehttp_client(mcp_url) as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
tools_response = await session.list_tools()
tool = tools_response.tools[0]
result = await session.call_tool(
tool.name,
arguments={"name": "Gradio"},
)
assert len(result.content) == 1 # type: ignore
assert (
result.content[0].text # type: ignore
== "Hello, Gradio! Your x-gradio-user is mcp"
)
finally:
app.close()
| {
"repo_id": "gradio-app/gradio",
"file_path": "test/test_mcp.py",
"license": "Apache License 2.0",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:tests_v1/plugins/model_plugins/test_quantization_plugin.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from llamafactory.v1.config.model_args import ModelArguments
from llamafactory.v1.core.model_engine import ModelEngine
bitsandbytes = pytest.importorskip("bitsandbytes")
def check_quantization_status(model):
quantized_info = {"bnb": []}
for name, module in model.named_modules():
# check BitsAndBytes quantization
if isinstance(module, bitsandbytes.nn.modules.Linear8bitLt) or isinstance(
module, bitsandbytes.nn.modules.Linear4bit
):
quantized_info["bnb"].append(name)
return quantized_info
@pytest.mark.runs_on(["cuda"])
@pytest.mark.parametrize("name, quantization_bit", [("bnb", 4), ("auto", 4)])
def test_quantization_plugin(name, quantization_bit):
model_args = ModelArguments(
model="llamafactory/tiny-random-qwen3",
quant_config={
"name": name,
"quantization_bit": quantization_bit,
},
)
model_engine = ModelEngine(model_args=model_args)
quantized_info = check_quantization_status(model_engine.model)
print(f"Quantized weights for method {name} with {quantization_bit} bit: {quantized_info}")
assert any(v for v in quantized_info.values()), "model is not quantized properly."
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/plugins/model_plugins/test_quantization_plugin.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:tests_v1/plugins/model_plugins/test_peft.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from peft import LoraConfig, PeftModel, get_peft_model
from transformers import AutoModelForCausalLM, AutoTokenizer
from llamafactory.v1.plugins.model_plugins import peft as peft_module
from llamafactory.v1.plugins.model_plugins.peft import merge_and_export_model
TINY_MODEL = "llamafactory/tiny-random-qwen3"
@pytest.fixture(scope="module")
def model_path():
return TINY_MODEL
@pytest.fixture(scope="function")
def model(model_path):
return AutoModelForCausalLM.from_pretrained(model_path)
@pytest.fixture(scope="function")
def tokenizer(model_path):
return AutoTokenizer.from_pretrained(model_path)
@pytest.fixture(scope="function")
def adapter_path(tmp_path):
# Create a dummy adapter
lora_config = LoraConfig(
r=8,
lora_alpha=16,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
base_model = AutoModelForCausalLM.from_pretrained(TINY_MODEL)
peft_model = get_peft_model(base_model, lora_config)
save_path = tmp_path / "test_adapter"
peft_model.save_pretrained(save_path)
return str(save_path)
def test_find_all_linear_modules(model):
"""Verify linear modules are discoverable and include q_proj / v_proj for tiny-random-qwen3."""
modules = peft_module._find_all_linear_modules(model)
expected_subset = {"q_proj", "v_proj"}
assert expected_subset.issubset(set(modules))
def test_get_lora_model(model):
"""Verify a PeftModel is returned and LoRA config takes effect."""
config = {"name": "lora", "r": 8, "target_modules": "all", "lora_alpha": 16}
model = peft_module.get_lora_model(model, config, is_train=True)
assert isinstance(model, PeftModel)
assert model.peft_config["default"].r == 8
assert "q_proj" in model.peft_config["default"].target_modules
def test_get_freeze_model_layers(model):
"""Verify layer-wise freezing: only the last layer stays trainable."""
# Freeze all but last layer
config = {"name": "freeze", "freeze_trainable_layers": 1, "freeze_trainable_modules": "all"}
# Ensure we start with something known
model = peft_module.get_freeze_model(model, config, is_train=True)
num_layers = model.config.num_hidden_layers
assert num_layers > 0
for name, param in model.named_parameters():
if f"layers.{num_layers - 1}" in name:
assert param.requires_grad, f"{name} should be trainable"
elif "layers.0" in name and num_layers > 1:
assert not param.requires_grad, f"{name} should be frozen"
def test_get_freeze_model_modules(model):
"""Verify module-wise freezing: only last-layer self_attn is trainable."""
# Freeze specific modules (e.g. only self_attn)
config = {"name": "freeze", "freeze_trainable_layers": 1, "freeze_trainable_modules": "self_attn"}
model = peft_module.get_freeze_model(model, config, is_train=True)
num_layers = model.config.num_hidden_layers
for name, param in model.named_parameters():
if f"layers.{num_layers - 1}" in name and "self_attn" in name:
assert param.requires_grad, f"{name} should be trainable"
else:
assert not param.requires_grad, f"{name} should be frozen"
def test_load_adapter_single_for_inference(model, adapter_path):
"""Verify single adapter is merged+unloaded in inference mode."""
# Test loading single adapter for inference (merge and unload)
model_result = peft_module.load_adapter(model, adapter_path, is_train=False)
assert not isinstance(model_result, PeftModel)
def test_load_adapter_resume_train(model, adapter_path):
"""Verify training mode returns a trainable PeftModel."""
# Test loading for training
model_result = peft_module.load_adapter(model, adapter_path, is_train=True)
assert isinstance(model_result, PeftModel)
def test_load_adapter_train_multiple_disallowed(model, adapter_path):
"""Verify multiple adapters are rejected in training mode."""
with pytest.raises(ValueError, match="only a single LoRA adapter"):
peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=True)
def test_load_adapter_infer_multiple_merges(model, adapter_path):
"""Verify multiple adapters are merged in inference mode."""
# Test merging multiple adapters
model_result = peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=False)
assert not isinstance(model_result, PeftModel)
def test_merge_and_export_model(tmp_path, adapter_path):
"""Verify merge_and_export_model produces export artifacts."""
export_dir = tmp_path / "export"
args_dict = {
"model": TINY_MODEL,
"peft_config": {
"name": "lora",
"adapter_name_or_path": adapter_path,
"export_dir": str(export_dir),
"export_size": 1,
"infer_dtype": "float16",
},
}
merge_and_export_model(args_dict)
assert export_dir.exists()
assert (export_dir / "config.json").exists()
assert (export_dir / "model.safetensors").exists()
assert (export_dir / "tokenizer_config.json").exists()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/plugins/model_plugins/test_peft.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:docs/conf.py | # Configuration file for the Sphinx documentation builder.
# Define common settings here
project = "LlamaFactory"
copyright = "2024, LlamaFactory Team"
author = "LlamaFactory Team"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"myst_parser",
]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
html_js_files = [
"js/switcher.js",
]
html_css_files = [
"css/lang-switcher.css",
]
myst_enable_extensions = [
"colon_fence",
"deflist",
]
myst_heading_anchors = 3
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "docs/conf.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
hiyouga/LlamaFactory:docs/en/conf.py | import os
import sys
# Add parent dir to path to allow importing conf.py
sys.path.insert(0, os.path.abspath(".."))
from conf import * # noqa: F403
# Language settings
language = "en"
html_search_language = "en"
# Static files
# Point to the root _static directory
html_static_path = ["../_static"]
# Add custom JS for language switcher
html_js_files = [
"js/switcher.js",
]
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "docs/en/conf.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
hiyouga/LlamaFactory:docs/zh/conf.py | import os
import sys
# Add parent dir to path to allow importing conf.py
sys.path.insert(0, os.path.abspath(".."))
from conf import * # noqa: F403
# Language settings
language = "zh_CN"
html_search_language = "zh"
# Static files
# Point to the root _static directory
html_static_path = ["../_static"]
# Add custom JS for language switcher
html_js_files = [
"js/switcher.js",
]
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "docs/zh/conf.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
hiyouga/LlamaFactory:scripts/hf2dcp.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert a HuggingFace model to DCP checkpoint format.
Usage:
python scripts/hf2dcp.py convert --hf_path=/path/to/hf --dcp_path=/path/to/dcp
Arguments:
hf_path: Path to the HuggingFace model directory.
dcp_path: Output path (directory) for DCP checkpoint.
"""
import fire
import torch
import torch.distributed.checkpoint as dcp
from transformers import AutoModelForCausalLM
def convert(hf_path: str, dcp_path: str) -> None:
"""Convert HF model weights to DCP.
Args:
hf_path: HuggingFace model directory.
dcp_path: Output path (directory) for DCP checkpoint.
"""
if not hf_path or not dcp_path:
raise ValueError("Both 'hf_path' and 'dcp_path' are required.")
print(f"Loading HF model from {hf_path}...")
model = AutoModelForCausalLM.from_pretrained(hf_path, device_map="cpu", torch_dtype=torch.bfloat16)
print(f"Saving to DCP format at {dcp_path}...")
dcp.save(model.state_dict(), checkpoint_id=dcp_path)
print("Done!")
def help() -> None:
"""Show help message."""
print(__doc__)
if __name__ == "__main__":
fire.Fire({"convert": convert, "help": help, "--convert": convert})
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "scripts/hf2dcp.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/trainer_plugins/distributed/fsdp2.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import os
import torch
import torch.nn as nn
from peft.tuners.lora import LoraLayer
from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict, set_model_state_dict
from torch.distributed.fsdp import (
CPUOffloadPolicy,
MixedPrecisionPolicy,
fully_shard,
)
from ....accelerator.helper import get_current_accelerator
from ....accelerator.interface import DistributedInterface
from ....utils.logging import get_logger
from ....utils.types import HFModel, Processor
logger = get_logger(__name__)
def get_transformer_layer_cls(model: HFModel) -> type[nn.Module] | None:
no_split_modules = getattr(model, "_no_split_modules", None)
if no_split_modules:
if isinstance(no_split_modules, (list, tuple)):
for name, module in model.named_modules():
for cls_name in no_split_modules:
if module.__class__.__name__ == cls_name:
return module.__class__
if hasattr(model, "model") and hasattr(model.model, "layers"):
return type(model.model.layers[0])
if hasattr(model, "layers"):
return type(model.layers[0])
return None
def save_model(model: HFModel, output_dir: str, processor: Processor) -> None:
if DistributedInterface().get_rank() == 0:
logger.info("Gathering state dict for saving...")
options = StateDictOptions(full_state_dict=True, cpu_offload=True)
state_dict = get_model_state_dict(model, options=options)
if DistributedInterface().get_rank() == 0:
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir, state_dict=state_dict, max_shard_size="4GB")
processor.save_pretrained(output_dir, max_shard_size="4GB")
logger.info(f"Model saved to {output_dir}")
class FSDP2Engine:
def __init__(self, dist_config: dict):
self.dist_interface = DistributedInterface()
self.rank = self.dist_interface.get_rank()
self.local_rank = self.dist_interface.get_local_rank()
self.world_size = self.dist_interface.get_world_size()
self.mixed_precision = dist_config.get("mixed_precision", "bf16")
self.reshard_after_forward = dist_config.get("reshard_after_forward", True)
self.offload_params = dist_config.get("offload_params", False)
self.pin_memory = dist_config.get("pin_memory", True)
self.dcp_path = dist_config.get("dcp_path", None)
self.device_mesh = self.dist_interface.data_device_mesh
if self.device_mesh is None:
logger.warning(
"Device Mesh not found in DistributedInterface. FSDP2 might fail if not running in distributed mode."
)
if self.device_mesh is not None:
try:
self.fsdp_mesh = self.device_mesh["dp"]
except Exception:
self.fsdp_mesh = self.device_mesh
logger.info(f"Using Device Mesh: {self.fsdp_mesh}")
else:
self.fsdp_mesh = None
def get_mp_policy(self) -> MixedPrecisionPolicy:
if self.mixed_precision == "bf16":
param_dtype = torch.bfloat16
reduce_dtype = torch.float32
elif self.mixed_precision == "fp16":
param_dtype = torch.float16
reduce_dtype = torch.float32
else:
param_dtype = torch.float32
reduce_dtype = torch.float32
return MixedPrecisionPolicy(
param_dtype=param_dtype,
reduce_dtype=reduce_dtype,
cast_forward_inputs=True,
)
def is_lora_module_wrap(self, model) -> bool:
return any(isinstance(module, LoraLayer) for module in model.modules())
def prepare_model(self, model: HFModel) -> HFModel:
if self.fsdp_mesh is None:
logger.warning("No FSDP Mesh available, skipping FSDP wrapping.")
return model
mp_policy = self.get_mp_policy()
layer_cls = get_transformer_layer_cls(model)
if layer_cls is None:
logger.warning(
"Could not identify Transformer Layer class, applying FSDP to the whole model structure only."
)
transformer_layer_cls_to_wrap = set()
else:
logger.info(f"Applying per-layer FSDP to {layer_cls.__name__}")
transformer_layer_cls_to_wrap = {layer_cls}
if self.is_lora_module_wrap(model):
lora_modules = []
for module in model.modules():
if len(list(module.children())) != 0:
continue
if any(param.requires_grad for param in module.parameters(recurse=False)):
lora_modules.append(module)
for module in lora_modules:
fully_shard(
module,
mesh=self.fsdp_mesh,
reshard_after_forward=self.reshard_after_forward,
mp_policy=mp_policy,
offload_policy=CPUOffloadPolicy(pin_memory=self.pin_memory) if self.offload_params else None,
)
logger.info("Applying FSDP wrap for LoRA layer separately.")
for name, module in model.named_modules():
should_wrap = False
if type(module) in transformer_layer_cls_to_wrap:
should_wrap = True
elif isinstance(module, nn.Embedding):
if not getattr(model.config, "tie_word_embeddings", True):
should_wrap = True
if should_wrap:
fully_shard(
module,
mesh=self.fsdp_mesh,
reshard_after_forward=self.reshard_after_forward,
mp_policy=mp_policy,
offload_policy=CPUOffloadPolicy(pin_memory=self.pin_memory) if self.offload_params else None,
)
# BaseTrainer is the single source of truth for gradient checkpointing.
# FSDP2 only applies the input-grad compatibility hook when checkpointing is already enabled.
if getattr(model, "is_gradient_checkpointing", False):
if self.rank == 0:
logger.info("Gradient checkpointing is enabled. Applying FSDP2 input grad preparation.")
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
fully_shard(
model,
mesh=self.fsdp_mesh,
reshard_after_forward=self.reshard_after_forward,
mp_policy=mp_policy,
offload_policy=CPUOffloadPolicy(pin_memory=self.pin_memory) if self.offload_params else None,
)
return model
@torch.no_grad()
def materialize_and_load(self, model: HFModel, hf_model_path: str, dcp_path: str = None):
if self.rank == 0:
logger.info("Materializing sharded model params...")
device = get_current_accelerator()
model.to_empty(device=device)
if dcp_path and os.path.exists(dcp_path):
if self.rank == 0:
logger.info(f"DCP path found at {dcp_path}. Using efficient Sharded Loading (DCP Load).")
self._load_from_dcp(model, dcp_path)
else:
if self.rank == 0:
if dcp_path:
logger.warning(f"DCP path {dcp_path} not found.")
logger.info("Using HF Meta Loading (Chunk Load).")
self._load_weights_from_hf_checkpoint(model, hf_model_path)
return model
def shard_model(self, model: HFModel) -> HFModel:
if model.device.type == "meta":
model = self.prepare_model(model)
model = self.materialize_and_load(model, hf_model_path=model.config.name_or_path, dcp_path=self.dcp_path)
else:
model = self.prepare_model(model)
return model
def _load_from_dcp(self, model: HFModel, dcp_path: str):
import torch.distributed.checkpoint as dcp
try:
if self.rank == 0:
logger.info(f"Loading distributed checkpoint from {dcp_path} ...")
options = StateDictOptions(full_state_dict=False, cpu_offload=True)
local_state_dict = get_model_state_dict(model, options=options)
dcp.load(state_dict=local_state_dict, checkpoint_id=dcp_path)
set_model_state_dict(model, local_state_dict, options=options)
if self.rank == 0:
logger.info("DCP weights loaded successfully.")
except Exception as e:
logger.error(f"Failed to load from DCP: {e}")
raise e
def _load_weights_from_hf_checkpoint(self, model: HFModel, hf_model_path: str):
import glob
import json
hf_model_path = self._resolve_hf_checkpoint_dir(hf_model_path)
if self.rank == 0:
logger.info(f"Loading weights from {hf_model_path} ...")
index_file = os.path.join(hf_model_path, "model.safetensors.index.json")
is_safetensors = True
checkpoint_files = []
if os.path.exists(index_file):
with open(index_file) as f:
index = json.load(f)
checkpoint_files = sorted(set(index["weight_map"].values()))
checkpoint_files = [os.path.join(hf_model_path, f) for f in checkpoint_files]
elif os.path.exists(os.path.join(hf_model_path, "model.safetensors")):
checkpoint_files = [os.path.join(hf_model_path, "model.safetensors")]
else:
is_safetensors = False
index_file = os.path.join(hf_model_path, "pytorch_model.bin.index.json")
if os.path.exists(index_file):
with open(index_file) as f:
index = json.load(f)
checkpoint_files = sorted(set(index["weight_map"].values()))
checkpoint_files = [os.path.join(hf_model_path, f) for f in checkpoint_files]
elif os.path.exists(os.path.join(hf_model_path, "pytorch_model.bin")):
checkpoint_files = [os.path.join(hf_model_path, "pytorch_model.bin")]
else:
checkpoint_files = sorted(glob.glob(os.path.join(hf_model_path, "*.safetensors")))
if checkpoint_files:
is_safetensors = True
else:
checkpoint_files = sorted(glob.glob(os.path.join(hf_model_path, "*.bin")))
if not checkpoint_files:
raise ValueError(f"No checkpoint files found in {hf_model_path}")
param_map = dict(model.named_parameters())
total_files = len(checkpoint_files)
for i, ckpt_file in enumerate(checkpoint_files):
if self.rank == 0:
logger.info(f"[{i + 1}/{total_files}] Loading {os.path.basename(ckpt_file)} ...")
if is_safetensors:
from safetensors import safe_open
with safe_open(ckpt_file, framework="pt", device="cpu") as f:
for key in f.keys():
if key in param_map:
tensor = f.get_tensor(key)
self._copy_weights(param_map[key], tensor)
else:
state_dict = torch.load(ckpt_file, map_location="cpu")
for key, tensor in state_dict.items():
if key in param_map:
self._copy_weights(param_map[key], tensor)
del state_dict
gc.collect()
def _resolve_hf_checkpoint_dir(self, hf_model_path: str) -> str:
"""Resolve a HF model identifier or local path to a local directory containing checkpoint files.
- If `hf_model_path` is an existing directory, return it.
- If it's a file path, return its parent directory.
- Otherwise treat it as a Hugging Face Hub repo id and download/resolve to the local cache dir.
"""
if not hf_model_path:
return hf_model_path
# Local directory or file path.
if os.path.isdir(hf_model_path):
return hf_model_path
if os.path.isfile(hf_model_path):
return os.path.dirname(hf_model_path)
# HuggingFace Hub repo id: snapshot to local cache so we can glob/index files.
try:
from huggingface_hub import snapshot_download
except ImportError as e:
raise ValueError(
f"hf_model_path='{hf_model_path}' does not exist locally and huggingface_hub is not available "
f"to download it. Please provide a local model directory or install huggingface_hub. Error: {e}"
) from e
revision = os.getenv("HF_REVISION")
offline = os.getenv("HF_HUB_OFFLINE") == "1" or os.getenv("TRANSFORMERS_OFFLINE") == "1"
# In distributed runs, let rank0 download first to avoid N-way concurrent downloads.
if torch.distributed.is_available() and torch.distributed.is_initialized():
if self.rank == 0:
local_dir = snapshot_download(
repo_id=hf_model_path,
revision=revision,
local_files_only=offline,
allow_patterns=[
"*.safetensors",
"*.bin",
"*.index.json",
"model.safetensors",
"model.safetensors.index.json",
"pytorch_model.bin",
"pytorch_model.bin.index.json",
"config.json",
],
)
logger.info(f"Resolved HF repo id '{hf_model_path}' to local dir: {local_dir}")
torch.distributed.barrier()
if self.rank != 0:
local_dir = snapshot_download(
repo_id=hf_model_path,
revision=revision,
local_files_only=True,
allow_patterns=[
"*.safetensors",
"*.bin",
"*.index.json",
"model.safetensors",
"model.safetensors.index.json",
"pytorch_model.bin",
"pytorch_model.bin.index.json",
"config.json",
],
)
return local_dir
local_dir = snapshot_download(
repo_id=hf_model_path,
revision=revision,
local_files_only=offline,
allow_patterns=[
"*.safetensors",
"*.bin",
"*.index.json",
"model.safetensors",
"model.safetensors.index.json",
"pytorch_model.bin",
"pytorch_model.bin.index.json",
"config.json",
],
)
if self.rank == 0:
logger.info(f"Resolved HF repo id '{hf_model_path}' to local dir: {local_dir}")
return local_dir
def _copy_weights(self, param, loaded_tensor):
from torch.distributed._tensor import DTensor, Shard
if loaded_tensor.dtype != param.dtype:
loaded_tensor = loaded_tensor.to(param.dtype)
if isinstance(param, DTensor):
shard_placement = None
mesh_dim = -1
for i, placement in enumerate(param.placements):
if isinstance(placement, Shard):
shard_placement = placement
mesh_dim = i
break
local_tensor = param.to_local()
if shard_placement is None:
local_tensor.copy_(loaded_tensor)
else:
dim = shard_placement.dim
mesh = param.device_mesh
my_coordinate = mesh.get_coordinate()
if my_coordinate is None:
return
rank_in_dim = my_coordinate[mesh_dim]
world_size_in_dim = mesh.size(mesh_dim)
full_size = param.shape[dim]
chunk_size = (full_size + world_size_in_dim - 1) // world_size_in_dim
start = rank_in_dim * chunk_size
end = min(start + chunk_size, full_size)
if start >= full_size:
return
sliced_tensor = loaded_tensor.narrow(dim, start, end - start)
slices = [slice(None)] * local_tensor.ndim
slices[dim] = slice(0, sliced_tensor.shape[dim])
local_tensor[tuple(slices)].copy_(sliced_tensor)
else:
param.data.copy_(loaded_tensor)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/trainer_plugins/distributed/fsdp2.py",
"license": "Apache License 2.0",
"lines": 361,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/trainer_plugins/distributed/hub.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from ....config.arg_utils import PluginConfig
from ....utils.plugin import BasePlugin
if TYPE_CHECKING:
from ....utils.types import HFModel, Processor
class DistributedPlugin(BasePlugin):
def __call__(self, model: HFModel, dist_config: PluginConfig, **kwargs) -> HFModel:
return super().__call__(model, dist_config, **kwargs)
@DistributedPlugin("fsdp2").register()
def shard_model_fsdp2(model: HFModel, dist_config: PluginConfig, **kwargs) -> HFModel:
from .fsdp2 import FSDP2Engine
return FSDP2Engine(dist_config).shard_model(model)
@DistributedPlugin("fsdp2").register("save_model")
def save_model_fsdp2(model: HFModel, output_dir: str, processor: Processor) -> None:
from .fsdp2 import save_model
return save_model(model, output_dir, processor)
@DistributedPlugin("deepspeed").register()
def shard_model_deepspeed(model: HFModel, dist_config: PluginConfig, **kwargs) -> HFModel:
from .deepspeed import DeepSpeedEngine
return DeepSpeedEngine(
dist_config,
num_micro_batch=kwargs.get("num_micro_batch"),
micro_batch_size=kwargs.get("micro_batch_size"),
).shard_model(model)
@DistributedPlugin("deepspeed").register("save_model")
def save_model_deepspeed(model: HFModel, output_dir: str, processor: Processor) -> None:
from .deepspeed import save_model
return save_model(model, output_dir, processor)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/trainer_plugins/distributed/hub.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:tests_v1/trainers/test_fsdp2_sft_trainer.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
from pathlib import Path
import pytest
@pytest.mark.xfail(reason="CI machines may OOM when heavily loaded.")
@pytest.mark.runs_on(["cuda", "npu"])
def test_fsdp2_sft_trainer(tmp_path: Path):
"""Test FSDP2 SFT trainer by simulating `llamafactory-cli sft config.yaml` behavior."""
config_yaml = """\
model: Qwen/Qwen3-0.6B
trust_remote_code: true
model_class: llm
template: qwen3_nothink
kernel_config:
name: auto
include_kernels: auto
quant_config: null
dist_config:
name: fsdp2
dcp_path: null
init_config:
name: init_on_meta
### data
train_dataset: data/v1_sft_demo.yaml
### training
output_dir: {output_dir}
micro_batch_size: 1
global_batch_size: 1
cutoff_len: 2048
learning_rate: 1.0e-4
bf16: false
max_steps: 1
### sample
sample_backend: hf
max_new_tokens: 128
"""
# Create output directory
output_dir = tmp_path / "outputs"
output_dir.mkdir(parents=True, exist_ok=True)
config_file = tmp_path / "config.yaml"
config_file.write_text(config_yaml.format(output_dir=str(output_dir)))
# Set up environment variables
env = os.environ.copy()
env["USE_V1"] = "1" # Use v1 launcher
env["FORCE_TORCHRUN"] = "1" # Force distributed training via torchrun
# Run the CLI command via subprocess
# This simulates: llamafactory-cli sft config.yaml
result = subprocess.run(
[sys.executable, "-m", "llamafactory.cli", "sft", str(config_file)],
env=env,
capture_output=True,
cwd=str(Path(__file__).parent.parent.parent), # LLaMA-Factory root
)
# Decode output with error handling (progress bars may contain non-UTF-8 bytes)
stderr = result.stderr.decode("utf-8", errors="replace")
# Check the result
assert result.returncode == 0, f"Training failed with return code {result.returncode}\nSTDERR: {stderr}"
# Verify output files exist (optional - adjust based on what run_sft produces)
# assert (output_dir / "some_expected_file").exists()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/trainers/test_fsdp2_sft_trainer.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:src/llamafactory/v1/core/utils/inference_engine.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
from abc import ABC, abstractmethod
from collections.abc import AsyncGenerator
from threading import Thread
import torch
from transformers import AsyncTextIteratorStreamer
from ...accelerator.interface import DistributedInterface
from ...config import ModelArguments, SampleArguments
from ...utils.helper import get_tokenizer
from ...utils.types import HFModel, Message, Sample, TorchDataset
from .rendering import Renderer
class BaseEngine(ABC):
@abstractmethod
def __init__(
self,
args: SampleArguments,
model_args: ModelArguments,
model: HFModel,
renderer: Renderer,
) -> None:
"""Initialize the engine.
Args:
args: Sample arguments.
model_args: Model arguments.
model: Model.
renderer: Renderer.
"""
...
@abstractmethod
async def generate(self, messages: list[Message], tools: str | None = None) -> AsyncGenerator[str, None]:
"""Generate tokens asynchronously.
Args:
messages: List of messages.
tools: Tools string.
Yields:
Generated tokens.
"""
...
@abstractmethod
async def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
"""Batch infer samples.
Args:
dataset: Torch dataset.
Returns:
List of samples.
"""
...
class HuggingFaceEngine(BaseEngine):
def __init__(
self,
args: SampleArguments,
model_args: ModelArguments,
model: HFModel,
renderer: Renderer,
) -> None:
self.args = args
self.model_args = model_args
self.model = model
self.renderer = renderer
self.semaphore = asyncio.Semaphore(int(os.getenv("MAX_CONCURRENT", "1")))
@torch.inference_mode()
async def generate(self, messages: list[Message], tools: str | None = None) -> AsyncGenerator[str, None]:
async with self.semaphore:
model_inputs = self.renderer.render_messages(messages, tools, is_generate=True)
streamer = AsyncTextIteratorStreamer(
tokenizer=get_tokenizer(self.renderer.processor),
skip_prompt=True,
skip_special_tokens=True, # TODO: configurable
)
device = DistributedInterface().current_device
kwargs = {
"input_ids": torch.tensor([model_inputs["input_ids"]]).to(device),
"attention_mask": torch.tensor([model_inputs["attention_mask"]]).to(device),
"max_new_tokens": self.args.max_new_tokens,
"streamer": streamer,
}
thread = Thread(target=self.model.generate, kwargs=kwargs, daemon=True)
thread.start()
async for token in streamer:
yield token
async def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
"""Batch infer samples.
Args:
dataset: Torch dataset.
Returns:
List of samples.
"""
raise NotImplementedError("Batch infer is not implemented.")
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/core/utils/inference_engine.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/trainer_plugins/lr_scheduler.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils.plugin import BasePlugin
class LRSchedulerPlugin(BasePlugin):
pass
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/trainer_plugins/lr_scheduler.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/trainer_plugins/optimizer.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils.plugin import BasePlugin
class OptimizerPlugin(BasePlugin):
pass
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/trainer_plugins/optimizer.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:scripts/convert_ckpt/tiny_qwen3.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import AutoTokenizer, Qwen3Config, Qwen3ForCausalLM
if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B-Instruct-2507")
config = Qwen3Config(
hidden_size=1408,
image_size=336,
intermediate_size=5632,
num_attention_heads=16,
num_hidden_layers=4,
vision_output_dim=4096,
)
model = Qwen3ForCausalLM.from_config(config)
model.save_pretrained("tiny-qwen3")
tokenizer.save_pretrained("tiny-qwen3")
model.push_to_hub("llamafactory/tiny-random-qwen3")
tokenizer.push_to_hub("llamafactory/tiny-random-qwen3")
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "scripts/convert_ckpt/tiny_qwen3.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/utils/objects.py | # Copyright 2025 Optuna, HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/utils/logging.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .types import ModelInput
class StatefulBuffer:
"""A buffer that stores model inputs."""
def __init__(self, max_buffer_size: int = 1_000_000_000) -> None:
self._buffer: list[ModelInput] = []
self._buffer_size: int = 0
self._max_buffer_size: int = max_buffer_size
def __len__(self) -> int:
return len(self._buffer)
@property
def size(self) -> int:
return self._buffer_size
def put(self, samples: list[ModelInput]) -> None:
"""Add samples to the buffer."""
num_tokens = sum(len(sample["input_ids"]) for sample in samples)
if self._buffer_size + num_tokens > self._max_buffer_size:
raise ValueError(f"Buffer size exceeds max buffer size {self._max_buffer_size}.")
self._buffer.extend(samples)
self._buffer_size += num_tokens
def get(self, value: int) -> list[ModelInput]:
"""Get samples from the buffer and remove them."""
samples = self._buffer[:value]
self._buffer_size -= sum(len(sample["input_ids"]) for sample in samples)
del self._buffer[:value]
return samples
def clear(self) -> None:
"""Clear the buffer."""
self._buffer = []
self._buffer_size = 0
def state_dict(self) -> dict:
"""Returns the state of the buffer."""
return {
"buffer": self._buffer,
"buffer_size": self._buffer_size,
}
def load_state_dict(self, state_dict: dict) -> None:
"""Loads the state into the buffer."""
self._buffer = state_dict["buffer"]
self._buffer_size = state_dict["buffer_size"]
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/utils/objects.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/core/utils/batching.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batching utils supports stateful dataloader.
1. Init stateful dataloader (tokenize)
2. Add to buffer
3. Yield batch indexes (micro batch * grad acc)
a) non pack + non dynamic
b) non pack + dynamic
c) pack + non dynamic
d) pack + dynamic
"""
from collections.abc import Iterator
from typing import Any
import torch
from torch.utils.data import default_collate
from torchdata.stateful_dataloader import StatefulDataLoader
from torchdata.stateful_dataloader.sampler import StatefulDistributedSampler
from ...accelerator.interface import Dim, DistributedInterface
from ...config import BatchingStrategy
from ...utils import logging
from ...utils.helper import pad_and_truncate
from ...utils.objects import StatefulBuffer
from ...utils.types import BatchInfo, BatchInput, ModelInput, TorchDataset
from .rendering import Renderer
logger = logging.get_logger(__name__)
def default_collate_fn(buffer: StatefulBuffer, batch_info: BatchInfo) -> list[BatchInput] | None:
micro_batch_size = batch_info["micro_batch_size"]
num_micro_batch = batch_info["num_micro_batch"]
cutoff_len = batch_info["cutoff_len"]
batch_size = micro_batch_size * num_micro_batch
if len(buffer) < batch_size:
return None
samples = buffer.get(batch_size)
batch = []
for i in range(num_micro_batch):
micro_batch = samples[i * micro_batch_size : (i + 1) * micro_batch_size]
batch.append(default_collate(pad_and_truncate(micro_batch, cutoff_len)))
return batch
class BatchGenerator(Iterator):
def __init__(
self,
dataset: TorchDataset,
renderer: Renderer,
micro_batch_size: int = 1,
global_batch_size: int | None = None,
cutoff_len: int = 2048,
batching_workers: int = 0,
batching_strategy: BatchingStrategy = BatchingStrategy.NORMAL,
pin_memory: bool = True,
drop_last: bool = True,
seed: int = 42,
) -> None:
self.dataset = dataset
self.renderer = renderer
self.micro_batch_size = micro_batch_size
self.global_batch_size = global_batch_size
self.cutoff_len = cutoff_len
self.batching_workers = batching_workers
self.batching_strategy = batching_strategy
self.pin_memory = pin_memory
self.drop_last = drop_last
self.seed = seed
# TODO: support length and infinity
dp_size = DistributedInterface().get_world_size(Dim.DP)
if self.global_batch_size is None:
self.global_batch_size = dp_size * micro_batch_size
self.num_micro_batch = 1
elif self.global_batch_size % (dp_size * micro_batch_size) == 0:
self.num_micro_batch = global_batch_size // dp_size // micro_batch_size
else:
raise ValueError(
"Global batch size must be divisible by DP size and micro batch size. "
f"Got {global_batch_size} % ({dp_size} * {micro_batch_size}) != 0."
)
if not self.drop_last:
raise ValueError("Drop last must be True.")
self._init_data_provider()
self._is_resuming: bool = False
self._data_iter = iter(self._data_provider)
self._buffer = StatefulBuffer()
self._batch_info: BatchInfo = {
"micro_batch_size": self.micro_batch_size,
"num_micro_batch": self.num_micro_batch,
"cutoff_len": self.cutoff_len,
"data_iter": self._data_iter,
}
logger.info_rank0(
f"Init unified data loader with global batch size {self.global_batch_size}, "
f"micro batch size {self.micro_batch_size}, "
f"num micro batch {self.num_micro_batch}, "
f"cutoff len {self.cutoff_len}, "
f"batching workers {self.batching_workers}, "
f"batching strategy {self.batching_strategy}."
)
def _init_data_provider(self) -> None:
if len(self.dataset) != -1:
sampler = StatefulDistributedSampler(
self.dataset,
num_replicas=DistributedInterface().get_world_size(Dim.DP),
rank=DistributedInterface().get_rank(Dim.DP),
shuffle=True,
seed=self.seed,
drop_last=self.drop_last,
)
else:
raise NotImplementedError("Iterable dataset is not supported yet.")
generato_seed = torch.Generator()
generato_seed.manual_seed(self.seed)
self._data_provider = StatefulDataLoader(
self.dataset,
batch_size=self.micro_batch_size * self.num_micro_batch,
sampler=sampler,
num_workers=self.batching_workers,
collate_fn=self.renderer.process_samples,
pin_memory=self.pin_memory,
pin_memory_device=DistributedInterface().current_device.type,
drop_last=self.drop_last,
generator=generato_seed,
)
if self.batching_strategy == BatchingStrategy.NORMAL:
self._length = len(self._data_provider)
else:
from ...plugins.trainer_plugins.batching import BatchingPlugin
self._length = BatchingPlugin(self.batching_strategy).compute_length(self._data_provider)
raise NotImplementedError("Batching strategy other than NORMAL is not supported yet.")
def __len__(self) -> int:
return self._length
def __iter__(self):
if not self._is_resuming:
self._buffer.clear()
self._buffer_tokens = 0
self._data_iter = iter(self._data_provider)
self._is_resuming = False
return self
def __next__(self):
self._fill_buffer()
batch = self._generate_batch()
if batch is None:
raise StopIteration
return batch
def _fill_buffer(self) -> None:
if self.batching_strategy == BatchingStrategy.NORMAL:
while len(self._buffer) < self.micro_batch_size * self.num_micro_batch:
try:
samples: list[ModelInput] = next(self._data_iter)
except StopIteration:
break
self._buffer.put(samples)
else:
from ...plugins.trainer_plugins.batching import BatchingPlugin
BatchingPlugin(self.batching_strategy).fill_buffer(self._buffer, self._batch_info)
def _generate_batch(self) -> list[BatchInput] | None:
if self.batching_strategy == BatchingStrategy.NORMAL:
return default_collate_fn(self._buffer, self._batch_info)
else:
from ...plugins.trainer_plugins.batching import BatchingPlugin
return BatchingPlugin(self.batching_strategy).generate_batch(self._buffer, self._batch_info)
def state_dict(self) -> dict[str, Any]:
return {
"buffer": self._buffer,
"buffer_tokens": self._buffer_tokens,
"data_provider": self._data_provider.state_dict(),
}
def load_state_dict(self, state: dict[str, Any]) -> None:
self._buffer = state["buffer"]
self._buffer_tokens = state["buffer_tokens"]
self._data_provider.load_state_dict(state["data_provider"])
self._is_resuming = True
def set_epoch(self, epoch: int) -> None:
if hasattr(self._data_provider.sampler, "set_epoch"):
self._data_provider.sampler.set_epoch(epoch)
if __name__ == "__main__":
"""
python -m llamafactory.v1.core.utils.batching \
--model llamafactory/tiny-random-qwen2.5 \
--train_dataset data/v1_sft_demo.yaml \
--micro_batch_size 2 \
--global_batch_size 4 \
--batching_workers 0
"""
from ...config.arg_parser import get_args
from ..data_engine import DataEngine
from ..model_engine import ModelEngine
model_args, data_args, training_args, _ = get_args()
data_engine = DataEngine(data_args.train_dataset)
model_engine = ModelEngine(model_args=model_args)
batch_generator = BatchGenerator(
data_engine,
model_engine.renderer,
micro_batch_size=training_args.micro_batch_size,
global_batch_size=training_args.global_batch_size,
cutoff_len=training_args.cutoff_len,
batching_workers=training_args.batching_workers,
batching_strategy=training_args.batching_strategy,
)
for batch in batch_generator:
print(batch)
print(len(batch))
print(batch[0]["input_ids"].shape)
break
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/core/utils/batching.py",
"license": "Apache License 2.0",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/trainer_plugins/batching.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils.objects import StatefulBuffer
from ...utils.plugin import BasePlugin
from ...utils.types import BatchInfo, BatchInput, DataLoader
class BatchingPlugin(BasePlugin):
def compute_length(self, data_provider: DataLoader) -> int:
"""Compute the length of the batch generator.
The approximate length is used to calculate the lr schedule.
"""
raise NotImplementedError()
def fill_buffer(self, buffer: StatefulBuffer, batch_info: BatchInfo) -> None:
"""Fill the buffer with data."""
raise NotImplementedError()
def generate_batch(self, buffer: StatefulBuffer, batch_info: BatchInfo) -> list[BatchInput] | None:
"""Generate a batch from the buffer."""
raise NotImplementedError()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/trainer_plugins/batching.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:tests_v1/core/utils/test_batching.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llamafactory.v1.config import DataArguments, ModelArguments, TrainingArguments
from llamafactory.v1.core.data_engine import DataEngine
from llamafactory.v1.core.model_engine import ModelEngine
from llamafactory.v1.core.utils.batching import BatchGenerator
def test_normal_batching():
data_args = DataArguments(train_dataset="llamafactory/v1-sft-demo")
data_engine = DataEngine(data_args.train_dataset)
model_args = ModelArguments(model="llamafactory/tiny-random-qwen3")
model_engine = ModelEngine(model_args=model_args)
training_args = TrainingArguments(
micro_batch_size=4,
global_batch_size=8,
cutoff_len=10,
batching_workers=0,
batching_strategy="normal",
)
batch_generator = BatchGenerator(
data_engine,
model_engine.renderer,
micro_batch_size=training_args.micro_batch_size,
global_batch_size=training_args.global_batch_size,
cutoff_len=training_args.cutoff_len,
batching_workers=training_args.batching_workers,
batching_strategy=training_args.batching_strategy,
)
assert len(batch_generator) == len(data_engine) // training_args.global_batch_size
batch = next(iter(batch_generator))
assert len(batch) == 2
assert batch[0]["input_ids"].shape == (4, 10)
if __name__ == "__main__":
"""
python -m tests_v1.core.utils.test_batching
"""
test_normal_batching()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/core/utils/test_batching.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/model_plugins/rendering.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from ...utils.constants import IGNORE_INDEX
from ...utils.helper import get_tokenizer
from ...utils.plugin import BasePlugin
from ...utils.types import Message, ModelInput, Processor, ToolCall
class RenderingPlugin(BasePlugin):
def render_messages(
self,
processor: Processor,
messages: list[Message],
tools: str | None = None,
is_generate: bool = False,
) -> ModelInput:
"""Render messages in the template format."""
return self["render_messages"](processor, messages, tools, is_generate)
def parse_messages(self, generated_text: str) -> Message:
"""Parse messages in the template format."""
return self["parse_messages"](generated_text)
def _update_model_input(
processor: Processor,
input_ids: list[int],
labels: list[int],
loss_weights: list[int],
temp_str: str,
temp_weight: float,
) -> str:
"""Update model input with temporary string."""
if not temp_str:
return ""
tokenizer = get_tokenizer(processor)
temp_ids = tokenizer.encode(temp_str, add_special_tokens=False)
input_ids.extend(temp_ids)
loss_weights.extend([temp_weight] * len(temp_ids))
if temp_weight > 1e-6:
labels.extend(temp_ids)
else:
labels.extend([IGNORE_INDEX] * len(temp_ids))
return ""
@RenderingPlugin("qwen3_nothink").register("render_messages")
def render_qwen3_nothink_messages(
processor: Processor,
messages: list[Message],
tools: str | None = None,
is_generate: bool = False,
) -> ModelInput:
"""Render messages in the Qwen3 nothink template format.
See https://huggingface.co/spaces/huggingfacejs/chat-template-playground?modelId=Qwen/Qwen3-4B-Instruct-2507
"""
input_ids, labels, loss_weights = [], [], []
temp_str, temp_weight = "", 0.0
if tools:
temp_str += "<|im_start|>system\n"
if messages[0]["role"] == "system":
for content in messages[0]["content"]:
if content["type"] == "text":
temp_str += content["value"]
else:
raise ValueError(f"Unsupported content type: {content['type']}")
temp_str += "\n\n"
temp_weight = messages[0].get("loss_weight", 0.0)
temp_str += (
"# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
"You are provided with function signatures within <tools></tools> XML tags:\n<tools>"
)
try:
tools = json.loads(tools)
except json.JSONDecodeError:
raise ValueError(f"Invalid tools format: {str(tools)}.")
if not isinstance(tools, list):
tools = [tools]
for tool in tools:
temp_str += "\n" + json.dumps(tool, ensure_ascii=False)
temp_str += (
"\n</tools>\n\nFor each function call, return a json object with function name "
'and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{"name": '
'<function-name>, "arguments": <args-json-object>}\n</tool_call><|im_end|>\n'
)
elif messages[0]["role"] == "system":
temp_str += "<|im_start|>system\n"
for content in messages[0]["content"]:
if content["type"] == "text":
temp_str += content["value"]
else:
raise ValueError(f"Unsupported content type: {content['type']}")
temp_str += "<|im_end|>\n"
temp_weight = messages[0].get("loss_weight", 0.0)
temp_str = _update_model_input(processor, input_ids, labels, loss_weights, temp_str, temp_weight)
for turn_idx, message in enumerate(messages):
if message["role"] == "user" or (message["role"] == "system" and turn_idx != 0):
temp_str += "<|im_start|>" + message["role"] + "\n"
for content in message["content"]:
if content["type"] == "text":
temp_str += content["value"]
else:
raise ValueError(f"Unsupported content type: {content['type']}")
temp_str += "<|im_end|>\n"
temp_weight = message.get("loss_weight", 0.0)
elif message["role"] == "assistant":
temp_str += "<|im_start|>" + message["role"] + "\n"
for val_idx, content in enumerate(message["content"]):
if content["type"] == "text":
temp_str += content["value"]
elif content["type"] == "reasoning":
temp_str += "<thinking>\n" + content["value"] + "\n</thinking>\n\n" # avoid using special tokens
elif content["type"] == "tool_call":
if val_idx != 0 and message["content"][val_idx - 1]["type"] in ["text", "tool_call"]:
temp_str += "\n"
try:
tool_call: ToolCall = json.loads(content["value"])
except json.JSONDecodeError:
raise ValueError(f"Invalid tool call format: {content['value']}.")
temp_str += (
'<tool_call>\n{"name": "'
+ tool_call["name"]
+ '", "arguments": '
+ json.dumps(tool_call["arguments"], ensure_ascii=False)
+ "}\n</tool_call>"
)
else:
raise ValueError(f"Unsupported content type: {content['type']}")
temp_str += "<|im_end|>\n"
temp_weight = message.get("loss_weight", 1.0)
elif message["role"] == "tool":
if turn_idx == 0 or messages[turn_idx - 1]["role"] != "tool":
temp_str += "<|im_start|>user"
temp_str += "\n<tool_response>\n"
for content in message["content"]:
if content["type"] == "text":
temp_str += content["value"]
else:
raise ValueError(f"Unsupported content type: {content['type']}")
temp_str += "\n</tool_response>"
if turn_idx == len(messages) - 1 or messages[turn_idx + 1]["role"] != "tool":
temp_str += "<|im_end|>\n"
temp_weight = message.get("loss_weight", 0.0)
temp_str = _update_model_input(processor, input_ids, labels, loss_weights, temp_str, temp_weight)
if is_generate:
temp_str += "<|im_start|>assistant\n"
temp_weight = 0.0
temp_str = _update_model_input(processor, input_ids, labels, loss_weights, temp_str, temp_weight)
attention_mask = [1] * len(input_ids)
return ModelInput(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
loss_weights=loss_weights,
)
@RenderingPlugin("qwen3_nothink").register("parse_message")
def parse_qwen3_nothink_message(generated_text: str) -> Message:
"""Parse a message in the Qwen3 nothink template format. Supports interleaved reasoning and tool calls.
Args:
generated_text (str): The generated text in the Qwen3 nothink template format.
Returns:
Message: The parsed message.
"""
pattern = re.compile(r"<(thinking|tool_call)>\s*(.*?)\s*</\1>\s*", re.DOTALL)
content = []
last_end = 0
for match in pattern.finditer(generated_text):
start, end = match.span()
if start > last_end:
text = generated_text[last_end:start].strip()
if text:
content.append({"type": "text", "value": text})
tag_type = match.group(1)
tag_value = match.group(2).strip()
if tag_type == "thinking":
content.append({"type": "reasoning", "value": tag_value.strip()})
elif tag_type == "tool_call":
try:
json.loads(tag_value.strip())
except json.JSONDecodeError:
raise ValueError(f"Invalid tool call format: {tag_value.strip()}.")
content.append({"type": "tool_call", "value": tag_value.strip()})
last_end = end
if last_end < len(generated_text):
text = generated_text[last_end:].strip()
if text:
content.append({"type": "text", "value": text})
return Message(role="assistant", content=content)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/model_plugins/rendering.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/utils/helper.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from transformers import PreTrainedTokenizer
from transformers import set_seed as hf_set_seed
from ..accelerator.interface import DistributedInterface
from .constants import IGNORE_INDEX
from .types import BatchInput, ModelInput, Processor, Tensor
def set_seed(seed: int) -> None:
"""Set seed for reproducibility.
Args:
seed: Random seed.
"""
hf_set_seed(seed)
def is_tokenizer(processor: Processor) -> bool:
"""Check if processor is tokenizer.
Args:
processor: Processor.
Returns:
Whether processor is tokenizer.
"""
return not hasattr(processor, "tokenizer")
def get_tokenizer(processor: Processor) -> PreTrainedTokenizer:
"""Get tokenizer from processor.
Args:
processor: Processor.
Returns:
Tokenizer.
"""
return processor.tokenizer if hasattr(processor, "tokenizer") else processor
def _pad_and_truncate(tensor: Tensor, max_seqlen: int, pad_value: int = 0) -> Tensor:
if tensor.shape[-1] >= max_seqlen:
return tensor[..., :max_seqlen]
pad_shape = list(tensor.shape)
pad_shape[-1] = max_seqlen - tensor.shape[-1]
pad_tensor = torch.full(pad_shape, pad_value, dtype=tensor.dtype, device=tensor.device)
return torch.cat([tensor, pad_tensor], dim=-1)
def pad_and_truncate(samples: list[ModelInput], max_seqlen: int) -> list[BatchInput]:
max_length = min(max(len(sample["input_ids"]) for sample in samples), max_seqlen)
padded_samples = []
for sample in samples:
padded_sample = {}
for key, value in sample.items():
if "label" in key:
pad_value = IGNORE_INDEX
else:
pad_value = 0
if not isinstance(value, str):
padded_sample[key] = _pad_and_truncate(torch.tensor(value), max_length, pad_value)
else:
padded_sample[key] = value
padded_samples.append(padded_sample)
return padded_samples
def compute_valid_tokens(batches: list[BatchInput]) -> int:
"""Compute valid tokens in batches.
Args:
batches: Batches.
Returns:
Number of valid tokens.
"""
device = DistributedInterface().current_device
return sum(
(batch["labels"].to(device, non_blocking=True) != IGNORE_INDEX).sum().item()
for batch in batches
if "labels" in batch
)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/utils/helper.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:tests_v1/core/utils/test_rendering.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
from transformers import AutoTokenizer
from llamafactory.v1.config import DataArguments
from llamafactory.v1.core.data_engine import DataEngine
from llamafactory.v1.core.utils.rendering import Renderer
from llamafactory.v1.utils.types import Processor
def _get_input_ids(inputs: list | dict) -> list:
if not isinstance(inputs, list):
return inputs["input_ids"]
else:
return inputs
HF_MESSAGES = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is LLM?"},
{"role": "assistant", "content": "LLM stands for Large Language Model."},
]
V1_MESSAGES = [
{"role": "system", "content": [{"type": "text", "value": "You are a helpful assistant."}]},
{"role": "user", "content": [{"type": "text", "value": "What is LLM?"}]},
{"role": "assistant", "content": [{"type": "text", "value": "LLM stands for Large Language Model."}]},
]
HF_MESSAGES_WITH_TOOLS = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is 6*8?"},
{
"role": "assistant",
"tool_calls": [{"type": "function", "function": {"name": "multiply", "arguments": {"a": 6, "b": 8}}}],
},
{"role": "tool", "content": "48."},
{"role": "assistant", "content": "The result of 6*8 is 48."},
]
V1_MESSAGES_WITH_TOOLS = [
{"role": "system", "content": [{"type": "text", "value": "You are a helpful assistant."}]},
{"role": "user", "content": [{"type": "text", "value": "What is 6*8?"}]},
{
"role": "assistant",
"content": [{"type": "tool_call", "value": json.dumps({"name": "multiply", "arguments": {"a": 6, "b": 8}})}],
"loss_weight": 0.0,
},
{"role": "tool", "content": [{"type": "text", "value": "48."}]},
{"role": "assistant", "content": [{"type": "text", "value": "The result of 6*8 is 48."}]},
]
V1_TOOLS = [
{
"type": "function",
"function": {
"name": "multiply",
"description": "A function that multiplies two numbers",
"parameters": {
"type": "object",
"properties": {
"a": {"type": "number", "description": "The first number to multiply"},
"b": {"type": "number", "description": "The second number to multiply"},
},
"required": ["a", "b"],
},
},
}
]
def test_chatml_rendering():
tokenizer: Processor = AutoTokenizer.from_pretrained("llamafactory/tiny-random-qwen3")
renderer = Renderer(template="chatml", processor=tokenizer)
hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=True))
v1_inputs = renderer.render_messages(V1_MESSAGES[:-1], is_generate=True)
assert v1_inputs["input_ids"] == hf_inputs
assert v1_inputs["attention_mask"] == [1] * len(hf_inputs)
assert v1_inputs["labels"] == [-100] * len(hf_inputs)
assert v1_inputs["loss_weights"] == [0.0] * len(hf_inputs)
hf_inputs_part = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=False))
hf_inputs_full = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES, add_generation_prompt=False))
v1_inputs_full = renderer.render_messages(V1_MESSAGES, is_generate=False)
assert v1_inputs_full["input_ids"] == hf_inputs_full
assert v1_inputs_full["attention_mask"] == [1] * len(hf_inputs_full)
assert v1_inputs_full["labels"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]
assert v1_inputs_full["loss_weights"] == [0.0] * len(hf_inputs_part) + [1.0] * (
len(hf_inputs_full) - len(hf_inputs_part)
)
def test_chatml_parse():
tokenizer: Processor = AutoTokenizer.from_pretrained("llamafactory/tiny-random-qwen3")
renderer = Renderer(template="chatml", processor=tokenizer)
generated_text = "LLM stands for Large Language Model."
parsed_message = renderer.parse_message(generated_text)
assert parsed_message == V1_MESSAGES[-1]
@pytest.mark.parametrize("num_samples", [16])
def test_chatml_rendering_remote(num_samples: int):
tokenizer: Processor = AutoTokenizer.from_pretrained("llamafactory/tiny-random-qwen3")
renderer = Renderer(template="chatml", processor=tokenizer)
data_args = DataArguments(train_dataset="llamafactory/v1-sft-demo")
data_engine = DataEngine(data_args.train_dataset)
for index in range(num_samples):
v1_inputs = renderer.render_messages(data_engine[index]["messages"], is_generate=True)
prefix = tokenizer.encode("<|im_start|>user\n", add_special_tokens=False)
print(tokenizer.decode(v1_inputs["input_ids"][: len(prefix)]))
assert v1_inputs["input_ids"][: len(prefix)] == prefix
def test_qwen3_nothink_rendering():
tokenizer: Processor = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B-Instruct-2507")
renderer = Renderer(template="qwen3_nothink", processor=tokenizer)
hf_inputs = _get_input_ids(
tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=True)
)
v1_inputs = renderer.render_messages(V1_MESSAGES_WITH_TOOLS[:-1], tools=json.dumps(V1_TOOLS), is_generate=True)
assert v1_inputs["input_ids"] == hf_inputs
assert v1_inputs["attention_mask"] == [1] * len(hf_inputs)
assert v1_inputs["labels"] == [-100] * len(hf_inputs)
assert v1_inputs["loss_weights"] == [0.0] * len(hf_inputs)
hf_inputs_part = _get_input_ids(
tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=False)
)
hf_inputs_full = _get_input_ids(
tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS, tools=V1_TOOLS, add_generation_prompt=False)
)
v1_inputs_full = renderer.render_messages(V1_MESSAGES_WITH_TOOLS, tools=json.dumps(V1_TOOLS), is_generate=False)
assert v1_inputs_full["input_ids"] == hf_inputs_full
assert v1_inputs_full["attention_mask"] == [1] * len(hf_inputs_full)
assert v1_inputs_full["labels"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]
assert v1_inputs_full["loss_weights"] == [0.0] * len(hf_inputs_part) + [1.0] * (
len(hf_inputs_full) - len(hf_inputs_part)
)
def test_qwen3_nothink_parse():
tokenizer: Processor = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B-Instruct-2507")
renderer = Renderer(template="qwen3_nothink", processor=tokenizer)
generated_text = (
"<thinking>I need to use the multiply function to calculate 6*8.</thinking>"
"Let me call the multiply function."
'<tool_call>{"name": "multiply", "arguments": {"a": 6, "b": 8}}</tool_call>'
)
parsed_message = renderer.parse_message(generated_text)
assert parsed_message == {
"role": "assistant",
"content": [
{"type": "reasoning", "value": "I need to use the multiply function to calculate 6*8."},
{"type": "text", "value": "Let me call the multiply function."},
{"type": "tool_call", "value": json.dumps({"name": "multiply", "arguments": {"a": 6, "b": 8}})},
],
}
@pytest.mark.parametrize("num_samples", [8])
def test_qwen3_nothink_rendering_remote(num_samples: int):
tokenizer: Processor = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B-Instruct-2507")
renderer = Renderer(template="qwen3_nothink", processor=tokenizer)
data_args = DataArguments(train_dataset="llamafactory/reason-tool-use-demo-1500")
data_engine = DataEngine(data_args.train_dataset)
for index in range(num_samples):
v1_inputs = renderer.render_messages(data_engine[index]["messages"], tools=data_engine[index]["tools"])
prefix_text = (
"<|im_start|>system\nYou are a methodical and expert assistant. "
"Your primary goal is to solve user requests by leveraging a set of available tools. "
"You must reason for the best course of action in a structured manner before responding.\n\n"
"# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
"You are provided with function signatures within <tools></tools> XML tags:\n<tools>\n"
'{"type": "function", "function": {"name":'
)
prefix = tokenizer.encode(prefix_text, add_special_tokens=False)
print(tokenizer.decode(v1_inputs["input_ids"][: len(prefix)]))
assert v1_inputs["input_ids"][: len(prefix)] == prefix
def test_process_sft_samples():
tokenizer: Processor = AutoTokenizer.from_pretrained("llamafactory/tiny-random-qwen3")
renderer = Renderer(template="chatml", processor=tokenizer)
hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))
samples = [{"messages": V1_MESSAGES, "extra_info": "test", "_dataset_name": "default"}]
model_inputs = renderer.process_samples(samples)
assert len(model_inputs) == 1
assert model_inputs[0]["input_ids"] == hf_inputs
assert model_inputs[0]["extra_info"] == "test"
assert model_inputs[0]["_dataset_name"] == "default"
def test_process_dpo_samples():
tokenizer: Processor = AutoTokenizer.from_pretrained("llamafactory/tiny-random-qwen3")
renderer = Renderer(template="chatml", processor=tokenizer)
hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))
samples = [
{
"chosen_messages": V1_MESSAGES,
"rejected_messages": V1_MESSAGES,
"extra_info": "test",
"_dataset_name": "default",
}
]
model_inputs = renderer.process_samples(samples)
assert len(model_inputs) == 1
assert model_inputs[0]["input_ids"] == hf_inputs * 2
assert model_inputs[0]["token_type_ids"] == [1] * len(hf_inputs) + [2] * len(hf_inputs)
assert model_inputs[0]["extra_info"] == "test"
assert model_inputs[0]["_dataset_name"] == "default"
if __name__ == "__main__":
"""
python -m tests_v1.core.utils.test_rendering
"""
test_chatml_rendering()
test_chatml_parse()
test_chatml_rendering_remote(16)
test_qwen3_nothink_rendering()
test_qwen3_nothink_parse()
test_qwen3_nothink_rendering_remote(16)
test_process_sft_samples()
test_process_dpo_samples()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/core/utils/test_rendering.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:tests_v1/sampler/test_cli_sampler.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from llamafactory.v1.config import ModelArguments, SampleArguments
from llamafactory.v1.core.model_engine import ModelEngine
from llamafactory.v1.samplers.cli_sampler import SyncSampler
@pytest.mark.runs_on(["cuda", "npu"])
def test_sync_sampler():
model_args = ModelArguments(model="Qwen/Qwen3-4B-Instruct-2507", template="qwen3_nothink")
sample_args = SampleArguments()
model_engine = ModelEngine(model_args)
sampler = SyncSampler(sample_args, model_args, model_engine.model, model_engine.renderer)
messages = [{"role": "user", "content": [{"type": "text", "value": "Say 'This is a test.'"}]}]
response = ""
for new_text in sampler.generate(messages):
response += new_text
print(response)
assert model_engine.renderer.parse_message(response) == {
"role": "assistant",
"content": [{"type": "text", "value": "This is a test."}],
}
if __name__ == "__main__":
"""
python tests_v1/sampler/test_cli_sampler.py
"""
test_sync_sampler()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/sampler/test_cli_sampler.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.